slab.c 110.5 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
L
Linus Torvalds 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
/*
 * linux/mm/slab.c
 * Written by Mark Hemment, 1996/97.
 * (markhe@nextd.demon.co.uk)
 *
 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
 *
 * Major cleanup, different bufctl logic, per-cpu arrays
 *	(c) 2000 Manfred Spraul
 *
 * Cleanup, make the head arrays unconditional, preparation for NUMA
 * 	(c) 2002 Manfred Spraul
 *
 * An implementation of the Slab Allocator as described in outline in;
 *	UNIX Internals: The New Frontiers by Uresh Vahalia
 *	Pub: Prentice Hall	ISBN 0-13-101908-2
 * or with a little more detail in;
 *	The Slab Allocator: An Object-Caching Kernel Memory Allocator
 *	Jeff Bonwick (Sun Microsystems).
 *	Presented at: USENIX Summer 1994 Technical Conference
 *
 * The memory is organized in caches, one cache for each object type.
 * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
 * Each cache consists out of many slabs (they are small (usually one
 * page long) and always contiguous), and each slab contains multiple
 * initialized objects.
 *
 * This means, that your constructor is used only for newly allocated
S
Simon Arlott 已提交
30
 * slabs and you must pass objects with the same initializations to
L
Linus Torvalds 已提交
31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53
 * kmem_cache_free.
 *
 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
 * normal). If you need a special memory type, then must create a new
 * cache for that memory type.
 *
 * In order to reduce fragmentation, the slabs are sorted in 3 groups:
 *   full slabs with 0 free objects
 *   partial slabs
 *   empty slabs with no allocated objects
 *
 * If partial slabs exist, then new allocations come from these slabs,
 * otherwise from empty slabs or new slabs are allocated.
 *
 * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
 * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
 *
 * Each cache has a short per-cpu head array, most allocs
 * and frees go into that array, and if that array overflows, then 1/2
 * of the entries in the array are given back into the global cache.
 * The head array is strictly LIFO and should improve the cache hit rates.
 * On SMP, it additionally reduces the spinlock operations.
 *
A
Andrew Morton 已提交
54
 * The c_cpuarray may not be read with enabled local interrupts -
L
Linus Torvalds 已提交
55 56 57 58
 * it's changed with a smp_call_function().
 *
 * SMP synchronization:
 *  constructors and destructors are called without any locking.
59
 *  Several members in struct kmem_cache and struct slab never change, they
L
Linus Torvalds 已提交
60 61 62 63 64 65 66 67 68 69 70 71
 *	are accessed without any locking.
 *  The per-cpu arrays are never accessed from the wrong cpu, no locking,
 *  	and local interrupts are disabled so slab code is preempt-safe.
 *  The non-constant members are protected with a per-cache irq spinlock.
 *
 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
 * in 2000 - many ideas in the current implementation are derived from
 * his patch.
 *
 * Further notes from the original documentation:
 *
 * 11 April '97.  Started multi-threading - markhe
72
 *	The global cache-chain is protected by the mutex 'slab_mutex'.
L
Linus Torvalds 已提交
73 74 75 76 77 78
 *	The sem is only needed when accessing/extending the cache-chain, which
 *	can never happen inside an interrupt (kmem_cache_create(),
 *	kmem_cache_shrink() and kmem_cache_reap()).
 *
 *	At present, each engine can be growing a cache.  This should be blocked.
 *
79 80 81 82 83 84 85 86 87
 * 15 March 2005. NUMA slab allocator.
 *	Shai Fultheim <shai@scalex86.org>.
 *	Shobhit Dayal <shobhit@calsoftinc.com>
 *	Alok N Kataria <alokk@calsoftinc.com>
 *	Christoph Lameter <christoph@lameter.com>
 *
 *	Modified the slab allocator to be node aware on NUMA systems.
 *	Each node has its own list of partial, free and full slabs.
 *	All object allocations for a node occur from node specific slab lists.
L
Linus Torvalds 已提交
88 89 90 91
 */

#include	<linux/slab.h>
#include	<linux/mm.h>
92
#include	<linux/poison.h>
L
Linus Torvalds 已提交
93 94 95 96 97
#include	<linux/swap.h>
#include	<linux/cache.h>
#include	<linux/interrupt.h>
#include	<linux/init.h>
#include	<linux/compiler.h>
98
#include	<linux/cpuset.h>
99
#include	<linux/proc_fs.h>
L
Linus Torvalds 已提交
100 101 102 103 104 105 106
#include	<linux/seq_file.h>
#include	<linux/notifier.h>
#include	<linux/kallsyms.h>
#include	<linux/cpu.h>
#include	<linux/sysctl.h>
#include	<linux/module.h>
#include	<linux/rcupdate.h>
107
#include	<linux/string.h>
108
#include	<linux/uaccess.h>
109
#include	<linux/nodemask.h>
110
#include	<linux/kmemleak.h>
111
#include	<linux/mempolicy.h>
I
Ingo Molnar 已提交
112
#include	<linux/mutex.h>
113
#include	<linux/fault-inject.h>
I
Ingo Molnar 已提交
114
#include	<linux/rtmutex.h>
115
#include	<linux/reciprocal_div.h>
116
#include	<linux/debugobjects.h>
117
#include	<linux/memory.h>
118
#include	<linux/prefetch.h>
119
#include	<linux/sched/task_stack.h>
L
Linus Torvalds 已提交
120

121 122
#include	<net/sock.h>

L
Linus Torvalds 已提交
123 124 125 126
#include	<asm/cacheflush.h>
#include	<asm/tlbflush.h>
#include	<asm/page.h>

127 128
#include <trace/events/kmem.h>

129 130
#include	"internal.h"

131 132
#include	"slab.h"

L
Linus Torvalds 已提交
133
/*
134
 * DEBUG	- 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
L
Linus Torvalds 已提交
135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154
 *		  0 for faster, smaller code (especially in the critical paths).
 *
 * STATS	- 1 to collect stats for /proc/slabinfo.
 *		  0 for faster, smaller code (especially in the critical paths).
 *
 * FORCED_DEBUG	- 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
 */

#ifdef CONFIG_DEBUG_SLAB
#define	DEBUG		1
#define	STATS		1
#define	FORCED_DEBUG	1
#else
#define	DEBUG		0
#define	STATS		0
#define	FORCED_DEBUG	0
#endif

/* Shouldn't this be in a header file somewhere? */
#define	BYTES_PER_WORD		sizeof(void *)
D
David Woodhouse 已提交
155
#define	REDZONE_ALIGN		max(BYTES_PER_WORD, __alignof__(unsigned long long))
L
Linus Torvalds 已提交
156 157 158 159 160

#ifndef ARCH_KMALLOC_FLAGS
#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
#endif

161 162 163 164 165 166 167 168 169
#define FREELIST_BYTE_INDEX (((PAGE_SIZE >> BITS_PER_BYTE) \
				<= SLAB_OBJ_MIN_SIZE) ? 1 : 0)

#if FREELIST_BYTE_INDEX
typedef unsigned char freelist_idx_t;
#else
typedef unsigned short freelist_idx_t;
#endif

170
#define SLAB_OBJ_MAX_NUM ((1 << sizeof(freelist_idx_t) * BITS_PER_BYTE) - 1)
171

L
Linus Torvalds 已提交
172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188
/*
 * struct array_cache
 *
 * Purpose:
 * - LIFO ordering, to hand out cache-warm objects from _alloc
 * - reduce the number of linked list operations
 * - reduce spinlock operations
 *
 * The limit is stored in the per-cpu structure to reduce the data cache
 * footprint.
 *
 */
struct array_cache {
	unsigned int avail;
	unsigned int limit;
	unsigned int batchcount;
	unsigned int touched;
189
	void *entry[];	/*
A
Andrew Morton 已提交
190 191 192 193
			 * Must have this definition in here for the proper
			 * alignment of array_cache. Also simplifies accessing
			 * the entries.
			 */
L
Linus Torvalds 已提交
194 195
};

J
Joonsoo Kim 已提交
196 197 198 199 200
struct alien_cache {
	spinlock_t lock;
	struct array_cache ac;
};

201 202 203
/*
 * Need this for bootstrapping a per node allocator.
 */
204
#define NUM_INIT_LISTS (2 * MAX_NUMNODES)
205
static struct kmem_cache_node __initdata init_kmem_cache_node[NUM_INIT_LISTS];
206
#define	CACHE_CACHE 0
207
#define	SIZE_NODE (MAX_NUMNODES)
208

209
static int drain_freelist(struct kmem_cache *cache,
210
			struct kmem_cache_node *n, int tofree);
211
static void free_block(struct kmem_cache *cachep, void **objpp, int len,
212 213
			int node, struct list_head *list);
static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list);
214
static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
215
static void cache_reap(struct work_struct *unused);
216

217 218 219 220 221
static inline void fixup_objfreelist_debug(struct kmem_cache *cachep,
						void **list);
static inline void fixup_slab_list(struct kmem_cache *cachep,
				struct kmem_cache_node *n, struct page *page,
				void **list);
222 223
static int slab_early_init = 1;

224
#define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node))
L
Linus Torvalds 已提交
225

226
static void kmem_cache_node_init(struct kmem_cache_node *parent)
227 228 229 230
{
	INIT_LIST_HEAD(&parent->slabs_full);
	INIT_LIST_HEAD(&parent->slabs_partial);
	INIT_LIST_HEAD(&parent->slabs_free);
231
	parent->total_slabs = 0;
232
	parent->free_slabs = 0;
233 234
	parent->shared = NULL;
	parent->alien = NULL;
235
	parent->colour_next = 0;
236 237 238 239 240
	spin_lock_init(&parent->list_lock);
	parent->free_objects = 0;
	parent->free_touched = 0;
}

A
Andrew Morton 已提交
241 242 243
#define MAKE_LIST(cachep, listp, slab, nodeid)				\
	do {								\
		INIT_LIST_HEAD(listp);					\
244
		list_splice(&get_node(cachep, nodeid)->slab, listp);	\
245 246
	} while (0)

A
Andrew Morton 已提交
247 248
#define	MAKE_ALL_LISTS(cachep, ptr, nodeid)				\
	do {								\
249 250 251 252
	MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid);	\
	MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
	MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid);	\
	} while (0)
L
Linus Torvalds 已提交
253

254 255
#define CFLGS_OBJFREELIST_SLAB	((slab_flags_t __force)0x40000000U)
#define CFLGS_OFF_SLAB		((slab_flags_t __force)0x80000000U)
256
#define	OBJFREELIST_SLAB(x)	((x)->flags & CFLGS_OBJFREELIST_SLAB)
L
Linus Torvalds 已提交
257 258 259
#define	OFF_SLAB(x)	((x)->flags & CFLGS_OFF_SLAB)

#define BATCHREFILL_LIMIT	16
A
Andrew Morton 已提交
260 261 262
/*
 * Optimization question: fewer reaps means less probability for unnessary
 * cpucache drain/refill cycles.
L
Linus Torvalds 已提交
263
 *
A
Adrian Bunk 已提交
264
 * OTOH the cpuarrays can contain lots of objects,
L
Linus Torvalds 已提交
265 266
 * which could lock up otherwise freeable slabs.
 */
267 268
#define REAPTIMEOUT_AC		(2*HZ)
#define REAPTIMEOUT_NODE	(4*HZ)
L
Linus Torvalds 已提交
269 270 271 272 273 274

#if STATS
#define	STATS_INC_ACTIVE(x)	((x)->num_active++)
#define	STATS_DEC_ACTIVE(x)	((x)->num_active--)
#define	STATS_INC_ALLOCED(x)	((x)->num_allocations++)
#define	STATS_INC_GROWN(x)	((x)->grown++)
275
#define	STATS_ADD_REAPED(x,y)	((x)->reaped += (y))
A
Andrew Morton 已提交
276 277 278 279 280
#define	STATS_SET_HIGH(x)						\
	do {								\
		if ((x)->num_active > (x)->high_mark)			\
			(x)->high_mark = (x)->num_active;		\
	} while (0)
L
Linus Torvalds 已提交
281 282
#define	STATS_INC_ERR(x)	((x)->errors++)
#define	STATS_INC_NODEALLOCS(x)	((x)->node_allocs++)
283
#define	STATS_INC_NODEFREES(x)	((x)->node_frees++)
284
#define STATS_INC_ACOVERFLOW(x)   ((x)->node_overflow++)
A
Andrew Morton 已提交
285 286 287 288 289
#define	STATS_SET_FREEABLE(x, i)					\
	do {								\
		if ((x)->max_freeable < i)				\
			(x)->max_freeable = i;				\
	} while (0)
L
Linus Torvalds 已提交
290 291 292 293 294 295 296 297 298
#define STATS_INC_ALLOCHIT(x)	atomic_inc(&(x)->allochit)
#define STATS_INC_ALLOCMISS(x)	atomic_inc(&(x)->allocmiss)
#define STATS_INC_FREEHIT(x)	atomic_inc(&(x)->freehit)
#define STATS_INC_FREEMISS(x)	atomic_inc(&(x)->freemiss)
#else
#define	STATS_INC_ACTIVE(x)	do { } while (0)
#define	STATS_DEC_ACTIVE(x)	do { } while (0)
#define	STATS_INC_ALLOCED(x)	do { } while (0)
#define	STATS_INC_GROWN(x)	do { } while (0)
299
#define	STATS_ADD_REAPED(x,y)	do { (void)(y); } while (0)
L
Linus Torvalds 已提交
300 301 302
#define	STATS_SET_HIGH(x)	do { } while (0)
#define	STATS_INC_ERR(x)	do { } while (0)
#define	STATS_INC_NODEALLOCS(x)	do { } while (0)
303
#define	STATS_INC_NODEFREES(x)	do { } while (0)
304
#define STATS_INC_ACOVERFLOW(x)   do { } while (0)
A
Andrew Morton 已提交
305
#define	STATS_SET_FREEABLE(x, i) do { } while (0)
L
Linus Torvalds 已提交
306 307 308 309 310 311 312 313
#define STATS_INC_ALLOCHIT(x)	do { } while (0)
#define STATS_INC_ALLOCMISS(x)	do { } while (0)
#define STATS_INC_FREEHIT(x)	do { } while (0)
#define STATS_INC_FREEMISS(x)	do { } while (0)
#endif

#if DEBUG

A
Andrew Morton 已提交
314 315
/*
 * memory layout of objects:
L
Linus Torvalds 已提交
316
 * 0		: objp
317
 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
L
Linus Torvalds 已提交
318 319
 * 		the end of an object is aligned with the end of the real
 * 		allocation. Catches writes behind the end of the allocation.
320
 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
L
Linus Torvalds 已提交
321
 * 		redzone word.
322
 * cachep->obj_offset: The real object.
323 324
 * cachep->size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
 * cachep->size - 1* BYTES_PER_WORD: last caller address
A
Andrew Morton 已提交
325
 *					[BYTES_PER_WORD long]
L
Linus Torvalds 已提交
326
 */
327
static int obj_offset(struct kmem_cache *cachep)
L
Linus Torvalds 已提交
328
{
329
	return cachep->obj_offset;
L
Linus Torvalds 已提交
330 331
}

332
static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
L
Linus Torvalds 已提交
333 334
{
	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
335 336
	return (unsigned long long*) (objp + obj_offset(cachep) -
				      sizeof(unsigned long long));
L
Linus Torvalds 已提交
337 338
}

339
static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
L
Linus Torvalds 已提交
340 341 342
{
	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
	if (cachep->flags & SLAB_STORE_USER)
343
		return (unsigned long long *)(objp + cachep->size -
344
					      sizeof(unsigned long long) -
D
David Woodhouse 已提交
345
					      REDZONE_ALIGN);
346
	return (unsigned long long *) (objp + cachep->size -
347
				       sizeof(unsigned long long));
L
Linus Torvalds 已提交
348 349
}

350
static void **dbg_userword(struct kmem_cache *cachep, void *objp)
L
Linus Torvalds 已提交
351 352
{
	BUG_ON(!(cachep->flags & SLAB_STORE_USER));
353
	return (void **)(objp + cachep->size - BYTES_PER_WORD);
L
Linus Torvalds 已提交
354 355 356 357
}

#else

358
#define obj_offset(x)			0
359 360
#define dbg_redzone1(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
#define dbg_redzone2(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
L
Linus Torvalds 已提交
361 362 363 364
#define dbg_userword(cachep, objp)	({BUG(); (void **)NULL;})

#endif

365 366
#ifdef CONFIG_DEBUG_SLAB_LEAK

367
static inline bool is_store_user_clean(struct kmem_cache *cachep)
368
{
369 370
	return atomic_read(&cachep->store_user_clean) == 1;
}
371

372 373 374 375
static inline void set_store_user_clean(struct kmem_cache *cachep)
{
	atomic_set(&cachep->store_user_clean, 1);
}
376

377 378 379 380
static inline void set_store_user_dirty(struct kmem_cache *cachep)
{
	if (is_store_user_clean(cachep))
		atomic_set(&cachep->store_user_clean, 0);
381 382 383
}

#else
384
static inline void set_store_user_dirty(struct kmem_cache *cachep) {}
385 386 387

#endif

L
Linus Torvalds 已提交
388
/*
389 390
 * Do not go above this order unless 0 objects fit into the slab or
 * overridden on the command line.
L
Linus Torvalds 已提交
391
 */
392 393 394
#define	SLAB_MAX_ORDER_HI	1
#define	SLAB_MAX_ORDER_LO	0
static int slab_max_order = SLAB_MAX_ORDER_LO;
395
static bool slab_max_order_set __initdata;
L
Linus Torvalds 已提交
396

397 398
static inline struct kmem_cache *virt_to_cache(const void *obj)
{
399
	struct page *page = virt_to_head_page(obj);
C
Christoph Lameter 已提交
400
	return page->slab_cache;
401 402
}

403
static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
404 405
				 unsigned int idx)
{
406
	return page->s_mem + cache->size * idx;
407 408
}

409
#define BOOT_CPUCACHE_ENTRIES	1
L
Linus Torvalds 已提交
410
/* internal cache of cache description objs */
411
static struct kmem_cache kmem_cache_boot = {
P
Pekka Enberg 已提交
412 413 414
	.batchcount = 1,
	.limit = BOOT_CPUCACHE_ENTRIES,
	.shared = 1,
415
	.size = sizeof(struct kmem_cache),
P
Pekka Enberg 已提交
416
	.name = "kmem_cache",
L
Linus Torvalds 已提交
417 418
};

419
static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
L
Linus Torvalds 已提交
420

421
static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
L
Linus Torvalds 已提交
422
{
423
	return this_cpu_ptr(cachep->cpu_cache);
L
Linus Torvalds 已提交
424 425
}

A
Andrew Morton 已提交
426 427 428
/*
 * Calculate the number of objects and left-over bytes for a given buffer size.
 */
429
static unsigned int cache_estimate(unsigned long gfporder, size_t buffer_size,
430
		slab_flags_t flags, size_t *left_over)
431
{
432
	unsigned int num;
433
	size_t slab_size = PAGE_SIZE << gfporder;
L
Linus Torvalds 已提交
434

435 436 437 438 439 440
	/*
	 * The slab management structure can be either off the slab or
	 * on it. For the latter case, the memory allocated for a
	 * slab is used for:
	 *
	 * - @buffer_size bytes for each object
441 442 443 444 445
	 * - One freelist_idx_t for each object
	 *
	 * We don't need to consider alignment of freelist because
	 * freelist will be at the end of slab page. The objects will be
	 * at the correct alignment.
446 447 448 449 450 451
	 *
	 * If the slab management structure is off the slab, then the
	 * alignment will already be calculated into the size. Because
	 * the slabs are all pages aligned, the objects will be at the
	 * correct alignment when allocated.
	 */
452
	if (flags & (CFLGS_OBJFREELIST_SLAB | CFLGS_OFF_SLAB)) {
453
		num = slab_size / buffer_size;
454
		*left_over = slab_size % buffer_size;
455
	} else {
456
		num = slab_size / (buffer_size + sizeof(freelist_idx_t));
457 458
		*left_over = slab_size %
			(buffer_size + sizeof(freelist_idx_t));
459
	}
460 461

	return num;
L
Linus Torvalds 已提交
462 463
}

464
#if DEBUG
465
#define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
L
Linus Torvalds 已提交
466

A
Andrew Morton 已提交
467 468
static void __slab_error(const char *function, struct kmem_cache *cachep,
			char *msg)
L
Linus Torvalds 已提交
469
{
470
	pr_err("slab error in %s(): cache `%s': %s\n",
P
Pekka Enberg 已提交
471
	       function, cachep->name, msg);
L
Linus Torvalds 已提交
472
	dump_stack();
473
	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
L
Linus Torvalds 已提交
474
}
475
#endif
L
Linus Torvalds 已提交
476

477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492
/*
 * By default on NUMA we use alien caches to stage the freeing of
 * objects allocated from other nodes. This causes massive memory
 * inefficiencies when using fake NUMA setup to split memory into a
 * large number of small nodes, so it can be disabled on the command
 * line
  */

static int use_alien_caches __read_mostly = 1;
static int __init noaliencache_setup(char *s)
{
	use_alien_caches = 0;
	return 1;
}
__setup("noaliencache", noaliencache_setup);

493 494 495 496 497 498 499 500 501 502 503
static int __init slab_max_order_setup(char *str)
{
	get_option(&str, &slab_max_order);
	slab_max_order = slab_max_order < 0 ? 0 :
				min(slab_max_order, MAX_ORDER - 1);
	slab_max_order_set = true;

	return 1;
}
__setup("slab_max_order=", slab_max_order_setup);

504 505 506 507 508 509 510
#ifdef CONFIG_NUMA
/*
 * Special reaping functions for NUMA systems called from cache_reap().
 * These take care of doing round robin flushing of alien caches (containing
 * objects freed on different nodes from which they were allocated) and the
 * flushing of remote pcps by calling drain_node_pages.
 */
511
static DEFINE_PER_CPU(unsigned long, slab_reap_node);
512 513 514

static void init_reap_node(int cpu)
{
515 516
	per_cpu(slab_reap_node, cpu) = next_node_in(cpu_to_mem(cpu),
						    node_online_map);
517 518 519 520
}

static void next_reap_node(void)
{
521
	int node = __this_cpu_read(slab_reap_node);
522

523
	node = next_node_in(node, node_online_map);
524
	__this_cpu_write(slab_reap_node, node);
525 526 527 528 529 530 531
}

#else
#define init_reap_node(cpu) do { } while (0)
#define next_reap_node(void) do { } while (0)
#endif

L
Linus Torvalds 已提交
532 533 534 535 536 537 538
/*
 * Initiate the reap timer running on the target CPU.  We run at around 1 to 2Hz
 * via the workqueue/eventd.
 * Add the CPU number into the expiration time to minimize the possibility of
 * the CPUs getting into lockstep and contending for the global cache chain
 * lock.
 */
539
static void start_cpu_timer(int cpu)
L
Linus Torvalds 已提交
540
{
541
	struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
L
Linus Torvalds 已提交
542

543
	if (reap_work->work.func == NULL) {
544
		init_reap_node(cpu);
545
		INIT_DEFERRABLE_WORK(reap_work, cache_reap);
546 547
		schedule_delayed_work_on(cpu, reap_work,
					__round_jiffies_relative(HZ, cpu));
L
Linus Torvalds 已提交
548 549 550
	}
}

551
static void init_arraycache(struct array_cache *ac, int limit, int batch)
L
Linus Torvalds 已提交
552
{
553 554
	/*
	 * The array_cache structures contain pointers to free object.
L
Lucas De Marchi 已提交
555
	 * However, when such objects are allocated or transferred to another
556 557 558 559
	 * cache the pointers are not cleared and they could be counted as
	 * valid references during a kmemleak scan. Therefore, kmemleak must
	 * not scan such objects.
	 */
560 561 562 563 564 565
	kmemleak_no_scan(ac);
	if (ac) {
		ac->avail = 0;
		ac->limit = limit;
		ac->batchcount = batch;
		ac->touched = 0;
L
Linus Torvalds 已提交
566
	}
567 568 569 570 571
}

static struct array_cache *alloc_arraycache(int node, int entries,
					    int batchcount, gfp_t gfp)
{
572
	size_t memsize = sizeof(void *) * entries + sizeof(struct array_cache);
573 574 575 576 577
	struct array_cache *ac = NULL;

	ac = kmalloc_node(memsize, gfp, node);
	init_arraycache(ac, entries, batchcount);
	return ac;
L
Linus Torvalds 已提交
578 579
}

580 581
static noinline void cache_free_pfmemalloc(struct kmem_cache *cachep,
					struct page *page, void *objp)
582
{
583 584 585
	struct kmem_cache_node *n;
	int page_node;
	LIST_HEAD(list);
586

587 588
	page_node = page_to_nid(page);
	n = get_node(cachep, page_node);
589

590 591 592
	spin_lock(&n->list_lock);
	free_block(cachep, &objp, 1, page_node, &list);
	spin_unlock(&n->list_lock);
593

594
	slabs_destroy(cachep, &list);
595 596
}

597 598 599 600 601 602 603 604 605 606
/*
 * Transfer objects in one arraycache to another.
 * Locking must be handled by the caller.
 *
 * Return the number of entries transferred.
 */
static int transfer_objects(struct array_cache *to,
		struct array_cache *from, unsigned int max)
{
	/* Figure out how many entries to transfer */
607
	int nr = min3(from->avail, max, to->limit - to->avail);
608 609 610 611 612 613 614 615 616 617 618 619

	if (!nr)
		return 0;

	memcpy(to->entry + to->avail, from->entry + from->avail -nr,
			sizeof(void *) *nr);

	from->avail -= nr;
	to->avail += nr;
	return nr;
}

620 621 622
#ifndef CONFIG_NUMA

#define drain_alien_cache(cachep, alien) do { } while (0)
623
#define reap_alien(cachep, n) do { } while (0)
624

J
Joonsoo Kim 已提交
625 626
static inline struct alien_cache **alloc_alien_cache(int node,
						int limit, gfp_t gfp)
627
{
628
	return NULL;
629 630
}

J
Joonsoo Kim 已提交
631
static inline void free_alien_cache(struct alien_cache **ac_ptr)
632 633 634 635 636 637 638 639 640 641 642 643 644 645
{
}

static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
{
	return 0;
}

static inline void *alternate_node_alloc(struct kmem_cache *cachep,
		gfp_t flags)
{
	return NULL;
}

646
static inline void *____cache_alloc_node(struct kmem_cache *cachep,
647 648 649 650 651
		 gfp_t flags, int nodeid)
{
	return NULL;
}

D
David Rientjes 已提交
652 653
static inline gfp_t gfp_exact_node(gfp_t flags)
{
654
	return flags & ~__GFP_NOFAIL;
D
David Rientjes 已提交
655 656
}

657 658
#else	/* CONFIG_NUMA */

659
static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
660
static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
661

J
Joonsoo Kim 已提交
662 663 664
static struct alien_cache *__alloc_alien_cache(int node, int entries,
						int batch, gfp_t gfp)
{
665
	size_t memsize = sizeof(void *) * entries + sizeof(struct alien_cache);
J
Joonsoo Kim 已提交
666 667 668 669
	struct alien_cache *alc = NULL;

	alc = kmalloc_node(memsize, gfp, node);
	init_arraycache(&alc->ac, entries, batch);
670
	spin_lock_init(&alc->lock);
J
Joonsoo Kim 已提交
671 672 673 674
	return alc;
}

static struct alien_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
675
{
J
Joonsoo Kim 已提交
676
	struct alien_cache **alc_ptr;
677
	size_t memsize = sizeof(void *) * nr_node_ids;
678 679 680 681
	int i;

	if (limit > 1)
		limit = 12;
J
Joonsoo Kim 已提交
682 683 684 685 686 687 688 689 690 691 692 693 694
	alc_ptr = kzalloc_node(memsize, gfp, node);
	if (!alc_ptr)
		return NULL;

	for_each_node(i) {
		if (i == node || !node_online(i))
			continue;
		alc_ptr[i] = __alloc_alien_cache(node, limit, 0xbaadf00d, gfp);
		if (!alc_ptr[i]) {
			for (i--; i >= 0; i--)
				kfree(alc_ptr[i]);
			kfree(alc_ptr);
			return NULL;
695 696
		}
	}
J
Joonsoo Kim 已提交
697
	return alc_ptr;
698 699
}

J
Joonsoo Kim 已提交
700
static void free_alien_cache(struct alien_cache **alc_ptr)
701 702 703
{
	int i;

J
Joonsoo Kim 已提交
704
	if (!alc_ptr)
705 706
		return;
	for_each_node(i)
J
Joonsoo Kim 已提交
707 708
	    kfree(alc_ptr[i]);
	kfree(alc_ptr);
709 710
}

711
static void __drain_alien_cache(struct kmem_cache *cachep,
712 713
				struct array_cache *ac, int node,
				struct list_head *list)
714
{
715
	struct kmem_cache_node *n = get_node(cachep, node);
716 717

	if (ac->avail) {
718
		spin_lock(&n->list_lock);
719 720 721 722 723
		/*
		 * Stuff objects into the remote nodes shared array first.
		 * That way we could avoid the overhead of putting the objects
		 * into the free lists and getting them back later.
		 */
724 725
		if (n->shared)
			transfer_objects(n->shared, ac, ac->limit);
726

727
		free_block(cachep, ac->entry, ac->avail, node, list);
728
		ac->avail = 0;
729
		spin_unlock(&n->list_lock);
730 731 732
	}
}

733 734 735
/*
 * Called from cache_reap() to regularly drain alien caches round robin.
 */
736
static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n)
737
{
738
	int node = __this_cpu_read(slab_reap_node);
739

740
	if (n->alien) {
J
Joonsoo Kim 已提交
741 742 743 744 745
		struct alien_cache *alc = n->alien[node];
		struct array_cache *ac;

		if (alc) {
			ac = &alc->ac;
746
			if (ac->avail && spin_trylock_irq(&alc->lock)) {
747 748 749
				LIST_HEAD(list);

				__drain_alien_cache(cachep, ac, node, &list);
750
				spin_unlock_irq(&alc->lock);
751
				slabs_destroy(cachep, &list);
J
Joonsoo Kim 已提交
752
			}
753 754 755 756
		}
	}
}

A
Andrew Morton 已提交
757
static void drain_alien_cache(struct kmem_cache *cachep,
J
Joonsoo Kim 已提交
758
				struct alien_cache **alien)
759
{
P
Pekka Enberg 已提交
760
	int i = 0;
J
Joonsoo Kim 已提交
761
	struct alien_cache *alc;
762 763 764 765
	struct array_cache *ac;
	unsigned long flags;

	for_each_online_node(i) {
J
Joonsoo Kim 已提交
766 767
		alc = alien[i];
		if (alc) {
768 769
			LIST_HEAD(list);

J
Joonsoo Kim 已提交
770
			ac = &alc->ac;
771
			spin_lock_irqsave(&alc->lock, flags);
772
			__drain_alien_cache(cachep, ac, i, &list);
773
			spin_unlock_irqrestore(&alc->lock, flags);
774
			slabs_destroy(cachep, &list);
775 776 777
		}
	}
}
778

779 780
static int __cache_free_alien(struct kmem_cache *cachep, void *objp,
				int node, int page_node)
781
{
782
	struct kmem_cache_node *n;
J
Joonsoo Kim 已提交
783 784
	struct alien_cache *alien = NULL;
	struct array_cache *ac;
785
	LIST_HEAD(list);
P
Pekka Enberg 已提交
786

787
	n = get_node(cachep, node);
788
	STATS_INC_NODEFREES(cachep);
789 790
	if (n->alien && n->alien[page_node]) {
		alien = n->alien[page_node];
J
Joonsoo Kim 已提交
791
		ac = &alien->ac;
792
		spin_lock(&alien->lock);
J
Joonsoo Kim 已提交
793
		if (unlikely(ac->avail == ac->limit)) {
794
			STATS_INC_ACOVERFLOW(cachep);
795
			__drain_alien_cache(cachep, ac, page_node, &list);
796
		}
797
		ac->entry[ac->avail++] = objp;
798
		spin_unlock(&alien->lock);
799
		slabs_destroy(cachep, &list);
800
	} else {
801
		n = get_node(cachep, page_node);
802
		spin_lock(&n->list_lock);
803
		free_block(cachep, &objp, 1, page_node, &list);
804
		spin_unlock(&n->list_lock);
805
		slabs_destroy(cachep, &list);
806 807 808
	}
	return 1;
}
809 810 811 812 813 814 815 816 817 818 819 820 821 822

static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
{
	int page_node = page_to_nid(virt_to_page(objp));
	int node = numa_mem_id();
	/*
	 * Make sure we are not freeing a object from another node to the array
	 * cache on this cpu.
	 */
	if (likely(node == page_node))
		return 0;

	return __cache_free_alien(cachep, objp, node, page_node);
}
D
David Rientjes 已提交
823 824

/*
825 826
 * Construct gfp mask to allocate from a specific node but do not reclaim or
 * warn about failures.
D
David Rientjes 已提交
827 828 829
 */
static inline gfp_t gfp_exact_node(gfp_t flags)
{
830
	return (flags | __GFP_THISNODE | __GFP_NOWARN) & ~(__GFP_RECLAIM|__GFP_NOFAIL);
D
David Rientjes 已提交
831
}
832 833
#endif

834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873
static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp)
{
	struct kmem_cache_node *n;

	/*
	 * Set up the kmem_cache_node for cpu before we can
	 * begin anything. Make sure some other cpu on this
	 * node has not already allocated this
	 */
	n = get_node(cachep, node);
	if (n) {
		spin_lock_irq(&n->list_lock);
		n->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount +
				cachep->num;
		spin_unlock_irq(&n->list_lock);

		return 0;
	}

	n = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node);
	if (!n)
		return -ENOMEM;

	kmem_cache_node_init(n);
	n->next_reap = jiffies + REAPTIMEOUT_NODE +
		    ((unsigned long)cachep) % REAPTIMEOUT_NODE;

	n->free_limit =
		(1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num;

	/*
	 * The kmem_cache_nodes don't come and go as CPUs
	 * come and go.  slab_mutex is sufficient
	 * protection here.
	 */
	cachep->node[node] = n;

	return 0;
}

874
#if (defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)) || defined(CONFIG_SMP)
875
/*
876
 * Allocates and initializes node for a node on each slab cache, used for
877
 * either memory or cpu hotplug.  If memory is being hot-added, the kmem_cache_node
878
 * will be allocated off-node since memory is not yet online for the new node.
879
 * When hotplugging memory or a cpu, existing node are not replaced if
880 881
 * already in use.
 *
882
 * Must hold slab_mutex.
883
 */
884
static int init_cache_node_node(int node)
885
{
886
	int ret;
887 888
	struct kmem_cache *cachep;

889
	list_for_each_entry(cachep, &slab_caches, list) {
890 891 892
		ret = init_cache_node(cachep, node, GFP_KERNEL);
		if (ret)
			return ret;
893
	}
894

895 896
	return 0;
}
897
#endif
898

899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947
static int setup_kmem_cache_node(struct kmem_cache *cachep,
				int node, gfp_t gfp, bool force_change)
{
	int ret = -ENOMEM;
	struct kmem_cache_node *n;
	struct array_cache *old_shared = NULL;
	struct array_cache *new_shared = NULL;
	struct alien_cache **new_alien = NULL;
	LIST_HEAD(list);

	if (use_alien_caches) {
		new_alien = alloc_alien_cache(node, cachep->limit, gfp);
		if (!new_alien)
			goto fail;
	}

	if (cachep->shared) {
		new_shared = alloc_arraycache(node,
			cachep->shared * cachep->batchcount, 0xbaadf00d, gfp);
		if (!new_shared)
			goto fail;
	}

	ret = init_cache_node(cachep, node, gfp);
	if (ret)
		goto fail;

	n = get_node(cachep, node);
	spin_lock_irq(&n->list_lock);
	if (n->shared && force_change) {
		free_block(cachep, n->shared->entry,
				n->shared->avail, node, &list);
		n->shared->avail = 0;
	}

	if (!n->shared || force_change) {
		old_shared = n->shared;
		n->shared = new_shared;
		new_shared = NULL;
	}

	if (!n->alien) {
		n->alien = new_alien;
		new_alien = NULL;
	}

	spin_unlock_irq(&n->list_lock);
	slabs_destroy(cachep, &list);

948 949 950 951
	/*
	 * To protect lockless access to n->shared during irq disabled context.
	 * If n->shared isn't NULL in irq disabled context, accessing to it is
	 * guaranteed to be valid until irq is re-enabled, because it will be
952
	 * freed after synchronize_rcu().
953
	 */
954
	if (old_shared && force_change)
955
		synchronize_rcu();
956

957 958 959 960 961 962 963 964
fail:
	kfree(old_shared);
	kfree(new_shared);
	free_alien_cache(new_alien);

	return ret;
}

965 966
#ifdef CONFIG_SMP

967
static void cpuup_canceled(long cpu)
968 969
{
	struct kmem_cache *cachep;
970
	struct kmem_cache_node *n = NULL;
971
	int node = cpu_to_mem(cpu);
972
	const struct cpumask *mask = cpumask_of_node(node);
973

974
	list_for_each_entry(cachep, &slab_caches, list) {
975 976
		struct array_cache *nc;
		struct array_cache *shared;
J
Joonsoo Kim 已提交
977
		struct alien_cache **alien;
978
		LIST_HEAD(list);
979

980
		n = get_node(cachep, node);
981
		if (!n)
982
			continue;
983

984
		spin_lock_irq(&n->list_lock);
985

986 987
		/* Free limit for this kmem_cache_node */
		n->free_limit -= cachep->batchcount;
988 989 990 991

		/* cpu is dead; no one can alloc from it. */
		nc = per_cpu_ptr(cachep->cpu_cache, cpu);
		if (nc) {
992
			free_block(cachep, nc->entry, nc->avail, node, &list);
993 994
			nc->avail = 0;
		}
995

996
		if (!cpumask_empty(mask)) {
997
			spin_unlock_irq(&n->list_lock);
998
			goto free_slab;
999 1000
		}

1001
		shared = n->shared;
1002 1003
		if (shared) {
			free_block(cachep, shared->entry,
1004
				   shared->avail, node, &list);
1005
			n->shared = NULL;
1006 1007
		}

1008 1009
		alien = n->alien;
		n->alien = NULL;
1010

1011
		spin_unlock_irq(&n->list_lock);
1012 1013 1014 1015 1016 1017

		kfree(shared);
		if (alien) {
			drain_alien_cache(cachep, alien);
			free_alien_cache(alien);
		}
1018 1019

free_slab:
1020
		slabs_destroy(cachep, &list);
1021 1022 1023 1024 1025 1026
	}
	/*
	 * In the previous loop, all the objects were freed to
	 * the respective cache's slabs,  now we can go ahead and
	 * shrink each nodelist to its limit.
	 */
1027
	list_for_each_entry(cachep, &slab_caches, list) {
1028
		n = get_node(cachep, node);
1029
		if (!n)
1030
			continue;
1031
		drain_freelist(cachep, n, INT_MAX);
1032 1033 1034
	}
}

1035
static int cpuup_prepare(long cpu)
L
Linus Torvalds 已提交
1036
{
1037
	struct kmem_cache *cachep;
1038
	int node = cpu_to_mem(cpu);
1039
	int err;
L
Linus Torvalds 已提交
1040

1041 1042 1043 1044
	/*
	 * We need to do this right in the beginning since
	 * alloc_arraycache's are going to use this list.
	 * kmalloc_node allows us to add the slab to the right
1045
	 * kmem_cache_node and not this cpu's kmem_cache_node
1046
	 */
1047
	err = init_cache_node_node(node);
1048 1049
	if (err < 0)
		goto bad;
1050 1051 1052 1053 1054

	/*
	 * Now we can go ahead with allocating the shared arrays and
	 * array caches
	 */
1055
	list_for_each_entry(cachep, &slab_caches, list) {
1056 1057 1058
		err = setup_kmem_cache_node(cachep, node, GFP_KERNEL, false);
		if (err)
			goto bad;
1059
	}
1060

1061 1062
	return 0;
bad:
1063
	cpuup_canceled(cpu);
1064 1065 1066
	return -ENOMEM;
}

1067
int slab_prepare_cpu(unsigned int cpu)
1068
{
1069
	int err;
1070

1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093
	mutex_lock(&slab_mutex);
	err = cpuup_prepare(cpu);
	mutex_unlock(&slab_mutex);
	return err;
}

/*
 * This is called for a failed online attempt and for a successful
 * offline.
 *
 * Even if all the cpus of a node are down, we don't free the
 * kmem_list3 of any cache. This to avoid a race between cpu_down, and
 * a kmalloc allocation from another cpu for memory from the node of
 * the cpu going down.  The list3 structure is usually allocated from
 * kmem_cache_create() and gets destroyed at kmem_cache_destroy().
 */
int slab_dead_cpu(unsigned int cpu)
{
	mutex_lock(&slab_mutex);
	cpuup_canceled(cpu);
	mutex_unlock(&slab_mutex);
	return 0;
}
1094
#endif
1095 1096 1097 1098 1099

static int slab_online_cpu(unsigned int cpu)
{
	start_cpu_timer(cpu);
	return 0;
L
Linus Torvalds 已提交
1100 1101
}

1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114
static int slab_offline_cpu(unsigned int cpu)
{
	/*
	 * Shutdown cache reaper. Note that the slab_mutex is held so
	 * that if cache_reap() is invoked it cannot do anything
	 * expensive but will only modify reap_work and reschedule the
	 * timer.
	 */
	cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu));
	/* Now the cache_reaper is guaranteed to be not running. */
	per_cpu(slab_reap_work, cpu).work.func = NULL;
	return 0;
}
L
Linus Torvalds 已提交
1115

1116 1117 1118 1119 1120 1121
#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
/*
 * Drains freelist for a node on each slab cache, used for memory hot-remove.
 * Returns -EBUSY if all objects cannot be drained so that the node is not
 * removed.
 *
1122
 * Must hold slab_mutex.
1123
 */
1124
static int __meminit drain_cache_node_node(int node)
1125 1126 1127 1128
{
	struct kmem_cache *cachep;
	int ret = 0;

1129
	list_for_each_entry(cachep, &slab_caches, list) {
1130
		struct kmem_cache_node *n;
1131

1132
		n = get_node(cachep, node);
1133
		if (!n)
1134 1135
			continue;

1136
		drain_freelist(cachep, n, INT_MAX);
1137

1138 1139
		if (!list_empty(&n->slabs_full) ||
		    !list_empty(&n->slabs_partial)) {
1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159
			ret = -EBUSY;
			break;
		}
	}
	return ret;
}

static int __meminit slab_memory_callback(struct notifier_block *self,
					unsigned long action, void *arg)
{
	struct memory_notify *mnb = arg;
	int ret = 0;
	int nid;

	nid = mnb->status_change_nid;
	if (nid < 0)
		goto out;

	switch (action) {
	case MEM_GOING_ONLINE:
1160
		mutex_lock(&slab_mutex);
1161
		ret = init_cache_node_node(nid);
1162
		mutex_unlock(&slab_mutex);
1163 1164
		break;
	case MEM_GOING_OFFLINE:
1165
		mutex_lock(&slab_mutex);
1166
		ret = drain_cache_node_node(nid);
1167
		mutex_unlock(&slab_mutex);
1168 1169 1170 1171 1172 1173 1174 1175
		break;
	case MEM_ONLINE:
	case MEM_OFFLINE:
	case MEM_CANCEL_ONLINE:
	case MEM_CANCEL_OFFLINE:
		break;
	}
out:
1176
	return notifier_from_errno(ret);
1177 1178 1179
}
#endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */

1180
/*
1181
 * swap the static kmem_cache_node with kmalloced memory
1182
 */
1183
static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *list,
1184
				int nodeid)
1185
{
1186
	struct kmem_cache_node *ptr;
1187

1188
	ptr = kmalloc_node(sizeof(struct kmem_cache_node), GFP_NOWAIT, nodeid);
1189 1190
	BUG_ON(!ptr);

1191
	memcpy(ptr, list, sizeof(struct kmem_cache_node));
1192 1193 1194 1195 1196
	/*
	 * Do not assume that spinlocks can be initialized via memcpy:
	 */
	spin_lock_init(&ptr->list_lock);

1197
	MAKE_ALL_LISTS(cachep, ptr, nodeid);
1198
	cachep->node[nodeid] = ptr;
1199 1200
}

1201
/*
1202 1203
 * For setting up all the kmem_cache_node for cache whose buffer_size is same as
 * size of kmem_cache_node.
1204
 */
1205
static void __init set_up_node(struct kmem_cache *cachep, int index)
1206 1207 1208 1209
{
	int node;

	for_each_online_node(node) {
1210
		cachep->node[node] = &init_kmem_cache_node[index + node];
1211
		cachep->node[node]->next_reap = jiffies +
1212 1213
		    REAPTIMEOUT_NODE +
		    ((unsigned long)cachep) % REAPTIMEOUT_NODE;
1214 1215 1216
	}
}

A
Andrew Morton 已提交
1217 1218 1219
/*
 * Initialisation.  Called after the page allocator have been initialised and
 * before smp_init().
L
Linus Torvalds 已提交
1220 1221 1222
 */
void __init kmem_cache_init(void)
{
1223 1224
	int i;

1225 1226
	kmem_cache = &kmem_cache_boot;

1227
	if (!IS_ENABLED(CONFIG_NUMA) || num_possible_nodes() == 1)
1228 1229
		use_alien_caches = 0;

C
Christoph Lameter 已提交
1230
	for (i = 0; i < NUM_INIT_LISTS; i++)
1231
		kmem_cache_node_init(&init_kmem_cache_node[i]);
C
Christoph Lameter 已提交
1232

L
Linus Torvalds 已提交
1233 1234
	/*
	 * Fragmentation resistance on low memory - only use bigger
1235 1236
	 * page orders on machines with more than 32MB of memory if
	 * not overridden on the command line.
L
Linus Torvalds 已提交
1237
	 */
1238
	if (!slab_max_order_set && totalram_pages > (32 << 20) >> PAGE_SHIFT)
1239
		slab_max_order = SLAB_MAX_ORDER_HI;
L
Linus Torvalds 已提交
1240 1241 1242

	/* Bootstrap is tricky, because several objects are allocated
	 * from caches that do not exist yet:
1243 1244 1245
	 * 1) initialize the kmem_cache cache: it contains the struct
	 *    kmem_cache structures of all caches, except kmem_cache itself:
	 *    kmem_cache is statically allocated.
1246
	 *    Initially an __init data area is used for the head array and the
1247
	 *    kmem_cache_node structures, it's replaced with a kmalloc allocated
1248
	 *    array at the end of the bootstrap.
L
Linus Torvalds 已提交
1249
	 * 2) Create the first kmalloc cache.
1250
	 *    The struct kmem_cache for the new cache is allocated normally.
1251 1252 1253
	 *    An __init data area is used for the head array.
	 * 3) Create the remaining kmalloc caches, with minimally sized
	 *    head arrays.
1254
	 * 4) Replace the __init data head arrays for kmem_cache and the first
L
Linus Torvalds 已提交
1255
	 *    kmalloc cache with kmalloc allocated arrays.
1256
	 * 5) Replace the __init data for kmem_cache_node for kmem_cache and
1257 1258
	 *    the other cache's with kmalloc allocated memory.
	 * 6) Resize the head arrays of the kmalloc caches to their final sizes.
L
Linus Torvalds 已提交
1259 1260
	 */

1261
	/* 1) create the kmem_cache */
L
Linus Torvalds 已提交
1262

E
Eric Dumazet 已提交
1263
	/*
1264
	 * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids
E
Eric Dumazet 已提交
1265
	 */
1266
	create_boot_cache(kmem_cache, "kmem_cache",
1267
		offsetof(struct kmem_cache, node) +
1268
				  nr_node_ids * sizeof(struct kmem_cache_node *),
1269
				  SLAB_HWCACHE_ALIGN, 0, 0);
1270
	list_add(&kmem_cache->list, &slab_caches);
1271
	memcg_link_cache(kmem_cache);
1272
	slab_state = PARTIAL;
L
Linus Torvalds 已提交
1273

A
Andrew Morton 已提交
1274
	/*
1275 1276
	 * Initialize the caches that provide memory for the  kmem_cache_node
	 * structures first.  Without this, further allocations will bug.
1277
	 */
1278
	kmalloc_caches[KMALLOC_NORMAL][INDEX_NODE] = create_kmalloc_cache(
1279
				kmalloc_info[INDEX_NODE].name,
1280 1281
				kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS,
				0, kmalloc_size(INDEX_NODE));
1282
	slab_state = PARTIAL_NODE;
1283
	setup_kmalloc_cache_index_table();
1284

1285 1286
	slab_early_init = 0;

1287
	/* 5) Replace the bootstrap kmem_cache_node */
1288
	{
P
Pekka Enberg 已提交
1289 1290
		int nid;

1291
		for_each_online_node(nid) {
1292
			init_list(kmem_cache, &init_kmem_cache_node[CACHE_CACHE + nid], nid);
1293

1294
			init_list(kmalloc_caches[KMALLOC_NORMAL][INDEX_NODE],
1295
					  &init_kmem_cache_node[SIZE_NODE + nid], nid);
1296 1297
		}
	}
L
Linus Torvalds 已提交
1298

1299
	create_kmalloc_caches(ARCH_KMALLOC_FLAGS);
1300 1301 1302 1303 1304 1305 1306
}

void __init kmem_cache_init_late(void)
{
	struct kmem_cache *cachep;

	/* 6) resize the head arrays to their final sizes */
1307 1308
	mutex_lock(&slab_mutex);
	list_for_each_entry(cachep, &slab_caches, list)
1309 1310
		if (enable_cpucache(cachep, GFP_NOWAIT))
			BUG();
1311
	mutex_unlock(&slab_mutex);
1312

1313 1314 1315
	/* Done! */
	slab_state = FULL;

1316 1317 1318
#ifdef CONFIG_NUMA
	/*
	 * Register a memory hotplug callback that initializes and frees
1319
	 * node.
1320 1321 1322 1323
	 */
	hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
#endif

A
Andrew Morton 已提交
1324 1325 1326
	/*
	 * The reap timers are started later, with a module init call: That part
	 * of the kernel is not yet operational.
L
Linus Torvalds 已提交
1327 1328 1329 1330 1331
	 */
}

static int __init cpucache_init(void)
{
1332
	int ret;
L
Linus Torvalds 已提交
1333

A
Andrew Morton 已提交
1334 1335
	/*
	 * Register the timers that return unneeded pages to the page allocator
L
Linus Torvalds 已提交
1336
	 */
1337 1338 1339
	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SLAB online",
				slab_online_cpu, slab_offline_cpu);
	WARN_ON(ret < 0);
1340

L
Linus Torvalds 已提交
1341 1342 1343 1344
	return 0;
}
__initcall(cpucache_init);

1345 1346 1347
static noinline void
slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
{
1348
#if DEBUG
1349
	struct kmem_cache_node *n;
1350 1351
	unsigned long flags;
	int node;
1352 1353 1354 1355 1356
	static DEFINE_RATELIMIT_STATE(slab_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
				      DEFAULT_RATELIMIT_BURST);

	if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slab_oom_rs))
		return;
1357

1358 1359 1360
	pr_warn("SLAB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n",
		nodeid, gfpflags, &gfpflags);
	pr_warn("  cache: %s, object size: %d, order: %d\n",
1361
		cachep->name, cachep->size, cachep->gfporder);
1362

1363
	for_each_kmem_cache_node(cachep, node, n) {
1364
		unsigned long total_slabs, free_slabs, free_objs;
1365

1366
		spin_lock_irqsave(&n->list_lock, flags);
1367 1368 1369
		total_slabs = n->total_slabs;
		free_slabs = n->free_slabs;
		free_objs = n->free_objects;
1370
		spin_unlock_irqrestore(&n->list_lock, flags);
1371

1372 1373 1374 1375
		pr_warn("  node %d: slabs: %ld/%ld, objs: %ld/%ld\n",
			node, total_slabs - free_slabs, total_slabs,
			(total_slabs * cachep->num) - free_objs,
			total_slabs * cachep->num);
1376
	}
1377
#endif
1378 1379
}

L
Linus Torvalds 已提交
1380
/*
W
Wang Sheng-Hui 已提交
1381 1382
 * Interface to system's page allocator. No need to hold the
 * kmem_cache_node ->list_lock.
L
Linus Torvalds 已提交
1383 1384 1385 1386 1387
 *
 * If we requested dmaable memory, we will get it. Even if we
 * did not request dmaable memory, we might get it, but that
 * would be relatively rare and ignorable.
 */
1388 1389
static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
								int nodeid)
L
Linus Torvalds 已提交
1390 1391
{
	struct page *page;
1392
	int nr_pages;
1393

1394
	flags |= cachep->allocflags;
1395

1396
	page = __alloc_pages_node(nodeid, flags, cachep->gfporder);
1397
	if (!page) {
1398
		slab_out_of_memory(cachep, flags, nodeid);
L
Linus Torvalds 已提交
1399
		return NULL;
1400
	}
L
Linus Torvalds 已提交
1401

1402 1403 1404 1405 1406
	if (memcg_charge_slab(page, flags, cachep->gfporder, cachep)) {
		__free_pages(page, cachep->gfporder);
		return NULL;
	}

1407
	nr_pages = (1 << cachep->gfporder);
L
Linus Torvalds 已提交
1408
	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1409
		mod_lruvec_page_state(page, NR_SLAB_RECLAIMABLE, nr_pages);
1410
	else
1411
		mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE, nr_pages);
1412

1413
	__SetPageSlab(page);
1414 1415
	/* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
	if (sk_memalloc_socks() && page_is_pfmemalloc(page))
1416
		SetPageSlabPfmemalloc(page);
1417

1418
	return page;
L
Linus Torvalds 已提交
1419 1420 1421 1422 1423
}

/*
 * Interface to system's page release.
 */
1424
static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
L
Linus Torvalds 已提交
1425
{
1426 1427
	int order = cachep->gfporder;
	unsigned long nr_freed = (1 << order);
L
Linus Torvalds 已提交
1428

1429
	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1430
		mod_lruvec_page_state(page, NR_SLAB_RECLAIMABLE, -nr_freed);
1431
	else
1432
		mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE, -nr_freed);
J
Joonsoo Kim 已提交
1433

1434
	BUG_ON(!PageSlab(page));
J
Joonsoo Kim 已提交
1435
	__ClearPageSlabPfmemalloc(page);
1436
	__ClearPageSlab(page);
1437 1438
	page_mapcount_reset(page);
	page->mapping = NULL;
G
Glauber Costa 已提交
1439

L
Linus Torvalds 已提交
1440 1441
	if (current->reclaim_state)
		current->reclaim_state->reclaimed_slab += nr_freed;
1442 1443
	memcg_uncharge_slab(page, order, cachep);
	__free_pages(page, order);
L
Linus Torvalds 已提交
1444 1445 1446 1447
}

static void kmem_rcu_free(struct rcu_head *head)
{
1448 1449
	struct kmem_cache *cachep;
	struct page *page;
L
Linus Torvalds 已提交
1450

1451 1452 1453 1454
	page = container_of(head, struct page, rcu_head);
	cachep = page->slab_cache;

	kmem_freepages(cachep, page);
L
Linus Torvalds 已提交
1455 1456 1457
}

#if DEBUG
1458 1459 1460 1461 1462 1463 1464 1465
static bool is_debug_pagealloc_cache(struct kmem_cache *cachep)
{
	if (debug_pagealloc_enabled() && OFF_SLAB(cachep) &&
		(cachep->size % PAGE_SIZE) == 0)
		return true;

	return false;
}
L
Linus Torvalds 已提交
1466 1467

#ifdef CONFIG_DEBUG_PAGEALLOC
1468
static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
P
Pekka Enberg 已提交
1469
			    unsigned long caller)
L
Linus Torvalds 已提交
1470
{
1471
	int size = cachep->object_size;
L
Linus Torvalds 已提交
1472

1473
	addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];
L
Linus Torvalds 已提交
1474

P
Pekka Enberg 已提交
1475
	if (size < 5 * sizeof(unsigned long))
L
Linus Torvalds 已提交
1476 1477
		return;

P
Pekka Enberg 已提交
1478 1479 1480 1481
	*addr++ = 0x12345678;
	*addr++ = caller;
	*addr++ = smp_processor_id();
	size -= 3 * sizeof(unsigned long);
L
Linus Torvalds 已提交
1482 1483 1484 1485 1486 1487 1488
	{
		unsigned long *sptr = &caller;
		unsigned long svalue;

		while (!kstack_end(sptr)) {
			svalue = *sptr++;
			if (kernel_text_address(svalue)) {
P
Pekka Enberg 已提交
1489
				*addr++ = svalue;
L
Linus Torvalds 已提交
1490 1491 1492 1493 1494 1495 1496
				size -= sizeof(unsigned long);
				if (size <= sizeof(unsigned long))
					break;
			}
		}

	}
P
Pekka Enberg 已提交
1497
	*addr++ = 0x87654321;
L
Linus Torvalds 已提交
1498
}
1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515

static void slab_kernel_map(struct kmem_cache *cachep, void *objp,
				int map, unsigned long caller)
{
	if (!is_debug_pagealloc_cache(cachep))
		return;

	if (caller)
		store_stackinfo(cachep, objp, caller);

	kernel_map_pages(virt_to_page(objp), cachep->size / PAGE_SIZE, map);
}

#else
static inline void slab_kernel_map(struct kmem_cache *cachep, void *objp,
				int map, unsigned long caller) {}

L
Linus Torvalds 已提交
1516 1517
#endif

1518
static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
L
Linus Torvalds 已提交
1519
{
1520
	int size = cachep->object_size;
1521
	addr = &((char *)addr)[obj_offset(cachep)];
L
Linus Torvalds 已提交
1522 1523

	memset(addr, val, size);
P
Pekka Enberg 已提交
1524
	*(unsigned char *)(addr + size - 1) = POISON_END;
L
Linus Torvalds 已提交
1525 1526 1527 1528 1529
}

static void dump_line(char *data, int offset, int limit)
{
	int i;
D
Dave Jones 已提交
1530 1531 1532
	unsigned char error = 0;
	int bad_count = 0;

1533
	pr_err("%03x: ", offset);
D
Dave Jones 已提交
1534 1535 1536 1537 1538 1539
	for (i = 0; i < limit; i++) {
		if (data[offset + i] != POISON_FREE) {
			error = data[offset + i];
			bad_count++;
		}
	}
1540 1541
	print_hex_dump(KERN_CONT, "", 0, 16, 1,
			&data[offset], limit, 1);
D
Dave Jones 已提交
1542 1543 1544 1545

	if (bad_count == 1) {
		error ^= POISON_FREE;
		if (!(error & (error - 1))) {
1546
			pr_err("Single bit error detected. Probably bad RAM.\n");
D
Dave Jones 已提交
1547
#ifdef CONFIG_X86
1548
			pr_err("Run memtest86+ or a similar memory test tool.\n");
D
Dave Jones 已提交
1549
#else
1550
			pr_err("Run a memory test tool.\n");
D
Dave Jones 已提交
1551 1552 1553
#endif
		}
	}
L
Linus Torvalds 已提交
1554 1555 1556 1557 1558
}
#endif

#if DEBUG

1559
static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
L
Linus Torvalds 已提交
1560 1561 1562 1563 1564
{
	int i, size;
	char *realobj;

	if (cachep->flags & SLAB_RED_ZONE) {
1565 1566 1567
		pr_err("Redzone: 0x%llx/0x%llx\n",
		       *dbg_redzone1(cachep, objp),
		       *dbg_redzone2(cachep, objp));
L
Linus Torvalds 已提交
1568 1569
	}

1570 1571
	if (cachep->flags & SLAB_STORE_USER)
		pr_err("Last user: (%pSR)\n", *dbg_userword(cachep, objp));
1572
	realobj = (char *)objp + obj_offset(cachep);
1573
	size = cachep->object_size;
P
Pekka Enberg 已提交
1574
	for (i = 0; i < size && lines; i += 16, lines--) {
L
Linus Torvalds 已提交
1575 1576
		int limit;
		limit = 16;
P
Pekka Enberg 已提交
1577 1578
		if (i + limit > size)
			limit = size - i;
L
Linus Torvalds 已提交
1579 1580 1581 1582
		dump_line(realobj, i, limit);
	}
}

1583
static void check_poison_obj(struct kmem_cache *cachep, void *objp)
L
Linus Torvalds 已提交
1584 1585 1586 1587 1588
{
	char *realobj;
	int size, i;
	int lines = 0;

1589 1590 1591
	if (is_debug_pagealloc_cache(cachep))
		return;

1592
	realobj = (char *)objp + obj_offset(cachep);
1593
	size = cachep->object_size;
L
Linus Torvalds 已提交
1594

P
Pekka Enberg 已提交
1595
	for (i = 0; i < size; i++) {
L
Linus Torvalds 已提交
1596
		char exp = POISON_FREE;
P
Pekka Enberg 已提交
1597
		if (i == size - 1)
L
Linus Torvalds 已提交
1598 1599 1600 1601 1602 1603
			exp = POISON_END;
		if (realobj[i] != exp) {
			int limit;
			/* Mismatch ! */
			/* Print header */
			if (lines == 0) {
1604
				pr_err("Slab corruption (%s): %s start=%px, len=%d\n",
1605 1606
				       print_tainted(), cachep->name,
				       realobj, size);
L
Linus Torvalds 已提交
1607 1608 1609
				print_objinfo(cachep, objp, 0);
			}
			/* Hexdump the affected line */
P
Pekka Enberg 已提交
1610
			i = (i / 16) * 16;
L
Linus Torvalds 已提交
1611
			limit = 16;
P
Pekka Enberg 已提交
1612 1613
			if (i + limit > size)
				limit = size - i;
L
Linus Torvalds 已提交
1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625
			dump_line(realobj, i, limit);
			i += 16;
			lines++;
			/* Limit to 5 lines */
			if (lines > 5)
				break;
		}
	}
	if (lines != 0) {
		/* Print some data about the neighboring objects, if they
		 * exist:
		 */
1626
		struct page *page = virt_to_head_page(objp);
1627
		unsigned int objnr;
L
Linus Torvalds 已提交
1628

1629
		objnr = obj_to_index(cachep, page, objp);
L
Linus Torvalds 已提交
1630
		if (objnr) {
1631
			objp = index_to_obj(cachep, page, objnr - 1);
1632
			realobj = (char *)objp + obj_offset(cachep);
1633
			pr_err("Prev obj: start=%px, len=%d\n", realobj, size);
L
Linus Torvalds 已提交
1634 1635
			print_objinfo(cachep, objp, 2);
		}
P
Pekka Enberg 已提交
1636
		if (objnr + 1 < cachep->num) {
1637
			objp = index_to_obj(cachep, page, objnr + 1);
1638
			realobj = (char *)objp + obj_offset(cachep);
1639
			pr_err("Next obj: start=%px, len=%d\n", realobj, size);
L
Linus Torvalds 已提交
1640 1641 1642 1643 1644 1645
			print_objinfo(cachep, objp, 2);
		}
	}
}
#endif

1646
#if DEBUG
1647 1648
static void slab_destroy_debugcheck(struct kmem_cache *cachep,
						struct page *page)
L
Linus Torvalds 已提交
1649 1650
{
	int i;
1651 1652 1653 1654 1655 1656

	if (OBJFREELIST_SLAB(cachep) && cachep->flags & SLAB_POISON) {
		poison_obj(cachep, page->freelist - obj_offset(cachep),
			POISON_FREE);
	}

L
Linus Torvalds 已提交
1657
	for (i = 0; i < cachep->num; i++) {
1658
		void *objp = index_to_obj(cachep, page, i);
L
Linus Torvalds 已提交
1659 1660 1661

		if (cachep->flags & SLAB_POISON) {
			check_poison_obj(cachep, objp);
1662
			slab_kernel_map(cachep, objp, 1, 0);
L
Linus Torvalds 已提交
1663 1664 1665
		}
		if (cachep->flags & SLAB_RED_ZONE) {
			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
J
Joe Perches 已提交
1666
				slab_error(cachep, "start of a freed object was overwritten");
L
Linus Torvalds 已提交
1667
			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
J
Joe Perches 已提交
1668
				slab_error(cachep, "end of a freed object was overwritten");
L
Linus Torvalds 已提交
1669 1670
		}
	}
1671
}
L
Linus Torvalds 已提交
1672
#else
1673 1674
static void slab_destroy_debugcheck(struct kmem_cache *cachep,
						struct page *page)
1675 1676
{
}
L
Linus Torvalds 已提交
1677 1678
#endif

1679 1680 1681
/**
 * slab_destroy - destroy and release all objects in a slab
 * @cachep: cache pointer being destroyed
1682
 * @page: page pointer being destroyed
1683
 *
W
Wang Sheng-Hui 已提交
1684 1685 1686
 * Destroy all the objs in a slab page, and release the mem back to the system.
 * Before calling the slab page must have been unlinked from the cache. The
 * kmem_cache_node ->list_lock is not held/needed.
1687
 */
1688
static void slab_destroy(struct kmem_cache *cachep, struct page *page)
1689
{
1690
	void *freelist;
1691

1692 1693
	freelist = page->freelist;
	slab_destroy_debugcheck(cachep, page);
1694
	if (unlikely(cachep->flags & SLAB_TYPESAFE_BY_RCU))
1695 1696
		call_rcu(&page->rcu_head, kmem_rcu_free);
	else
1697
		kmem_freepages(cachep, page);
1698 1699

	/*
1700
	 * From now on, we don't use freelist
1701 1702 1703
	 * although actual page can be freed in rcu context
	 */
	if (OFF_SLAB(cachep))
1704
		kmem_cache_free(cachep->freelist_cache, freelist);
L
Linus Torvalds 已提交
1705 1706
}

1707 1708 1709 1710 1711 1712 1713 1714 1715 1716
static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list)
{
	struct page *page, *n;

	list_for_each_entry_safe(page, n, list, lru) {
		list_del(&page->lru);
		slab_destroy(cachep, page);
	}
}

1717
/**
1718 1719 1720 1721 1722 1723
 * calculate_slab_order - calculate size (page order) of slabs
 * @cachep: pointer to the cache that is being created
 * @size: size of objects to be created in this cache.
 * @flags: slab allocation flags
 *
 * Also calculates the number of objects per slab.
1724 1725 1726 1727 1728
 *
 * This could be made much more intelligent.  For now, try to avoid using
 * high order pages for slabs.  When the gfp() functions are more friendly
 * towards high-order requests, this should be changed.
 */
A
Andrew Morton 已提交
1729
static size_t calculate_slab_order(struct kmem_cache *cachep,
1730
				size_t size, slab_flags_t flags)
1731 1732
{
	size_t left_over = 0;
1733
	int gfporder;
1734

1735
	for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) {
1736 1737 1738
		unsigned int num;
		size_t remainder;

1739
		num = cache_estimate(gfporder, size, flags, &remainder);
1740 1741
		if (!num)
			continue;
1742

1743 1744 1745 1746
		/* Can't handle number of objects more than SLAB_OBJ_MAX_NUM */
		if (num > SLAB_OBJ_MAX_NUM)
			break;

1747
		if (flags & CFLGS_OFF_SLAB) {
1748 1749 1750 1751 1752 1753 1754 1755
			struct kmem_cache *freelist_cache;
			size_t freelist_size;

			freelist_size = num * sizeof(freelist_idx_t);
			freelist_cache = kmalloc_slab(freelist_size, 0u);
			if (!freelist_cache)
				continue;

1756
			/*
1757
			 * Needed to avoid possible looping condition
1758
			 * in cache_grow_begin()
1759
			 */
1760 1761
			if (OFF_SLAB(freelist_cache))
				continue;
1762

1763 1764 1765
			/* check if off slab has enough benefit */
			if (freelist_cache->size > cachep->size / 2)
				continue;
1766
		}
1767

1768
		/* Found something acceptable - save it away */
1769
		cachep->num = num;
1770
		cachep->gfporder = gfporder;
1771 1772
		left_over = remainder;

1773 1774 1775 1776 1777 1778 1779 1780
		/*
		 * A VFS-reclaimable slab tends to have most allocations
		 * as GFP_NOFS and we really don't want to have to be allocating
		 * higher-order pages when we are unable to shrink dcache.
		 */
		if (flags & SLAB_RECLAIM_ACCOUNT)
			break;

1781 1782 1783 1784
		/*
		 * Large number of objects is good, but very large slabs are
		 * currently bad for the gfp()s.
		 */
1785
		if (gfporder >= slab_max_order)
1786 1787
			break;

1788 1789 1790
		/*
		 * Acceptable internal fragmentation?
		 */
A
Andrew Morton 已提交
1791
		if (left_over * 8 <= (PAGE_SIZE << gfporder))
1792 1793 1794 1795 1796
			break;
	}
	return left_over;
}

1797 1798 1799 1800 1801 1802 1803 1804
static struct array_cache __percpu *alloc_kmem_cache_cpus(
		struct kmem_cache *cachep, int entries, int batchcount)
{
	int cpu;
	size_t size;
	struct array_cache __percpu *cpu_cache;

	size = sizeof(void *) * entries + sizeof(struct array_cache);
1805
	cpu_cache = __alloc_percpu(size, sizeof(void *));
1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817

	if (!cpu_cache)
		return NULL;

	for_each_possible_cpu(cpu) {
		init_arraycache(per_cpu_ptr(cpu_cache, cpu),
				entries, batchcount);
	}

	return cpu_cache;
}

1818
static int __ref setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
1819
{
1820
	if (slab_state >= FULL)
1821
		return enable_cpucache(cachep, gfp);
1822

1823 1824 1825 1826
	cachep->cpu_cache = alloc_kmem_cache_cpus(cachep, 1, 1);
	if (!cachep->cpu_cache)
		return 1;

1827
	if (slab_state == DOWN) {
1828 1829
		/* Creation of first cache (kmem_cache). */
		set_up_node(kmem_cache, CACHE_CACHE);
1830
	} else if (slab_state == PARTIAL) {
1831 1832
		/* For kmem_cache_node */
		set_up_node(cachep, SIZE_NODE);
1833
	} else {
1834
		int node;
1835

1836 1837 1838 1839 1840
		for_each_online_node(node) {
			cachep->node[node] = kmalloc_node(
				sizeof(struct kmem_cache_node), gfp, node);
			BUG_ON(!cachep->node[node]);
			kmem_cache_node_init(cachep->node[node]);
1841 1842
		}
	}
1843

1844
	cachep->node[numa_mem_id()]->next_reap =
1845 1846
			jiffies + REAPTIMEOUT_NODE +
			((unsigned long)cachep) % REAPTIMEOUT_NODE;
1847 1848 1849 1850 1851 1852 1853

	cpu_cache_get(cachep)->avail = 0;
	cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
	cpu_cache_get(cachep)->batchcount = 1;
	cpu_cache_get(cachep)->touched = 0;
	cachep->batchcount = 1;
	cachep->limit = BOOT_CPUCACHE_ENTRIES;
1854
	return 0;
1855 1856
}

1857
slab_flags_t kmem_cache_flags(unsigned int object_size,
1858
	slab_flags_t flags, const char *name,
J
Joonsoo Kim 已提交
1859 1860 1861 1862 1863 1864
	void (*ctor)(void *))
{
	return flags;
}

struct kmem_cache *
1865
__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
1866
		   slab_flags_t flags, void (*ctor)(void *))
J
Joonsoo Kim 已提交
1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882
{
	struct kmem_cache *cachep;

	cachep = find_mergeable(size, align, flags, name, ctor);
	if (cachep) {
		cachep->refcount++;

		/*
		 * Adjust the object sizes so that we clear
		 * the complete object on kzalloc.
		 */
		cachep->object_size = max_t(int, cachep->object_size, size);
	}
	return cachep;
}

1883
static bool set_objfreelist_slab_cache(struct kmem_cache *cachep,
1884
			size_t size, slab_flags_t flags)
1885 1886 1887 1888 1889
{
	size_t left;

	cachep->num = 0;

1890
	if (cachep->ctor || flags & SLAB_TYPESAFE_BY_RCU)
1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905
		return false;

	left = calculate_slab_order(cachep, size,
			flags | CFLGS_OBJFREELIST_SLAB);
	if (!cachep->num)
		return false;

	if (cachep->num * sizeof(freelist_idx_t) > cachep->object_size)
		return false;

	cachep->colour = left / cachep->colour_off;

	return true;
}

1906
static bool set_off_slab_cache(struct kmem_cache *cachep,
1907
			size_t size, slab_flags_t flags)
1908 1909 1910 1911 1912 1913
{
	size_t left;

	cachep->num = 0;

	/*
1914 1915
	 * Always use on-slab management when SLAB_NOLEAKTRACE
	 * to avoid recursive calls into kmemleak.
1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940
	 */
	if (flags & SLAB_NOLEAKTRACE)
		return false;

	/*
	 * Size is large, assume best to place the slab management obj
	 * off-slab (should allow better packing of objs).
	 */
	left = calculate_slab_order(cachep, size, flags | CFLGS_OFF_SLAB);
	if (!cachep->num)
		return false;

	/*
	 * If the slab has been placed off-slab, and we have enough space then
	 * move it on-slab. This is at the expense of any extra colouring.
	 */
	if (left >= cachep->num * sizeof(freelist_idx_t))
		return false;

	cachep->colour = left / cachep->colour_off;

	return true;
}

static bool set_on_slab_cache(struct kmem_cache *cachep,
1941
			size_t size, slab_flags_t flags)
1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955
{
	size_t left;

	cachep->num = 0;

	left = calculate_slab_order(cachep, size, flags);
	if (!cachep->num)
		return false;

	cachep->colour = left / cachep->colour_off;

	return true;
}

L
Linus Torvalds 已提交
1956
/**
1957
 * __kmem_cache_create - Create a cache.
R
Randy Dunlap 已提交
1958
 * @cachep: cache management descriptor
L
Linus Torvalds 已提交
1959 1960 1961 1962
 * @flags: SLAB flags
 *
 * Returns a ptr to the cache on success, NULL on failure.
 * Cannot be called within a int, but can be interrupted.
1963
 * The @ctor is run when new pages are allocated by the cache.
L
Linus Torvalds 已提交
1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976
 *
 * The flags are
 *
 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
 * to catch references to uninitialised memory.
 *
 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
 * for buffer overruns.
 *
 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
 * cacheline.  This can be beneficial if you're counting cycles as closely
 * as davem.
 */
1977
int __kmem_cache_create(struct kmem_cache *cachep, slab_flags_t flags)
L
Linus Torvalds 已提交
1978
{
1979
	size_t ralign = BYTES_PER_WORD;
1980
	gfp_t gfp;
1981
	int err;
1982
	unsigned int size = cachep->size;
L
Linus Torvalds 已提交
1983 1984 1985 1986 1987 1988 1989 1990 1991

#if DEBUG
#if FORCED_DEBUG
	/*
	 * Enable redzoning and last user accounting, except for caches with
	 * large objects, if the increased size would increase the object size
	 * above the next power of two: caches with object sizes just above a
	 * power of two have a significant amount of internal fragmentation.
	 */
D
David Woodhouse 已提交
1992 1993
	if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN +
						2 * sizeof(unsigned long long)))
P
Pekka Enberg 已提交
1994
		flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
1995
	if (!(flags & SLAB_TYPESAFE_BY_RCU))
L
Linus Torvalds 已提交
1996 1997 1998 1999
		flags |= SLAB_POISON;
#endif
#endif

A
Andrew Morton 已提交
2000 2001
	/*
	 * Check that size is in terms of words.  This is needed to avoid
L
Linus Torvalds 已提交
2002 2003 2004
	 * unaligned accesses for some archs when redzoning is used, and makes
	 * sure any on-slab bufctl's are also correctly aligned.
	 */
2005
	size = ALIGN(size, BYTES_PER_WORD);
L
Linus Torvalds 已提交
2006

D
David Woodhouse 已提交
2007 2008 2009 2010
	if (flags & SLAB_RED_ZONE) {
		ralign = REDZONE_ALIGN;
		/* If redzoning, ensure that the second redzone is suitably
		 * aligned, by adjusting the object size accordingly. */
2011
		size = ALIGN(size, REDZONE_ALIGN);
D
David Woodhouse 已提交
2012
	}
2013

2014
	/* 3) caller mandated alignment */
2015 2016
	if (ralign < cachep->align) {
		ralign = cachep->align;
L
Linus Torvalds 已提交
2017
	}
2018 2019
	/* disable debug if necessary */
	if (ralign > __alignof__(unsigned long long))
2020
		flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
A
Andrew Morton 已提交
2021
	/*
2022
	 * 4) Store it.
L
Linus Torvalds 已提交
2023
	 */
2024
	cachep->align = ralign;
2025 2026 2027 2028
	cachep->colour_off = cache_line_size();
	/* Offset must be a multiple of the alignment. */
	if (cachep->colour_off < cachep->align)
		cachep->colour_off = cachep->align;
L
Linus Torvalds 已提交
2029

2030 2031 2032 2033 2034
	if (slab_is_available())
		gfp = GFP_KERNEL;
	else
		gfp = GFP_NOWAIT;

L
Linus Torvalds 已提交
2035 2036
#if DEBUG

2037 2038 2039 2040
	/*
	 * Both debugging options require word-alignment which is calculated
	 * into align above.
	 */
L
Linus Torvalds 已提交
2041 2042
	if (flags & SLAB_RED_ZONE) {
		/* add space for red zone words */
2043 2044
		cachep->obj_offset += sizeof(unsigned long long);
		size += 2 * sizeof(unsigned long long);
L
Linus Torvalds 已提交
2045 2046
	}
	if (flags & SLAB_STORE_USER) {
2047
		/* user store requires one word storage behind the end of
D
David Woodhouse 已提交
2048 2049
		 * the real object. But if the second red zone needs to be
		 * aligned to 64 bits, we must allow that much space.
L
Linus Torvalds 已提交
2050
		 */
D
David Woodhouse 已提交
2051 2052 2053 2054
		if (flags & SLAB_RED_ZONE)
			size += REDZONE_ALIGN;
		else
			size += BYTES_PER_WORD;
L
Linus Torvalds 已提交
2055
	}
2056 2057
#endif

A
Alexander Potapenko 已提交
2058 2059
	kasan_cache_create(cachep, &size, &flags);

2060 2061 2062 2063 2064 2065 2066 2067 2068
	size = ALIGN(size, cachep->align);
	/*
	 * We should restrict the number of objects in a slab to implement
	 * byte sized index. Refer comment on SLAB_OBJ_MIN_SIZE definition.
	 */
	if (FREELIST_BYTE_INDEX && size < SLAB_OBJ_MIN_SIZE)
		size = ALIGN(SLAB_OBJ_MIN_SIZE, cachep->align);

#if DEBUG
2069 2070 2071 2072 2073 2074 2075
	/*
	 * To activate debug pagealloc, off-slab management is necessary
	 * requirement. In early phase of initialization, small sized slab
	 * doesn't get initialized so it would not be possible. So, we need
	 * to check size >= 256. It guarantees that all necessary small
	 * sized slab is initialized in current slab initialization sequence.
	 */
2076
	if (debug_pagealloc_enabled() && (flags & SLAB_POISON) &&
2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087
		size >= 256 && cachep->object_size > cache_line_size()) {
		if (size < PAGE_SIZE || size % PAGE_SIZE == 0) {
			size_t tmp_size = ALIGN(size, PAGE_SIZE);

			if (set_off_slab_cache(cachep, tmp_size, flags)) {
				flags |= CFLGS_OFF_SLAB;
				cachep->obj_offset += tmp_size - size;
				size = tmp_size;
				goto done;
			}
		}
L
Linus Torvalds 已提交
2088 2089 2090
	}
#endif

2091 2092 2093 2094 2095
	if (set_objfreelist_slab_cache(cachep, size, flags)) {
		flags |= CFLGS_OBJFREELIST_SLAB;
		goto done;
	}

2096
	if (set_off_slab_cache(cachep, size, flags)) {
L
Linus Torvalds 已提交
2097
		flags |= CFLGS_OFF_SLAB;
2098
		goto done;
2099
	}
L
Linus Torvalds 已提交
2100

2101 2102
	if (set_on_slab_cache(cachep, size, flags))
		goto done;
L
Linus Torvalds 已提交
2103

2104
	return -E2BIG;
L
Linus Torvalds 已提交
2105

2106 2107
done:
	cachep->freelist_size = cachep->num * sizeof(freelist_idx_t);
L
Linus Torvalds 已提交
2108
	cachep->flags = flags;
2109
	cachep->allocflags = __GFP_COMP;
Y
Yang Shi 已提交
2110
	if (flags & SLAB_CACHE_DMA)
2111
		cachep->allocflags |= GFP_DMA;
2112 2113
	if (flags & SLAB_RECLAIM_ACCOUNT)
		cachep->allocflags |= __GFP_RECLAIMABLE;
2114
	cachep->size = size;
2115
	cachep->reciprocal_buffer_size = reciprocal_value(size);
L
Linus Torvalds 已提交
2116

2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129
#if DEBUG
	/*
	 * If we're going to use the generic kernel_map_pages()
	 * poisoning, then it's going to smash the contents of
	 * the redzone and userword anyhow, so switch them off.
	 */
	if (IS_ENABLED(CONFIG_PAGE_POISONING) &&
		(cachep->flags & SLAB_POISON) &&
		is_debug_pagealloc_cache(cachep))
		cachep->flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
#endif

	if (OFF_SLAB(cachep)) {
2130 2131
		cachep->freelist_cache =
			kmalloc_slab(cachep->freelist_size, 0u);
2132
	}
L
Linus Torvalds 已提交
2133

2134 2135
	err = setup_cpu_cache(cachep, gfp);
	if (err) {
2136
		__kmem_cache_release(cachep);
2137
		return err;
2138
	}
L
Linus Torvalds 已提交
2139

2140
	return 0;
L
Linus Torvalds 已提交
2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153
}

#if DEBUG
static void check_irq_off(void)
{
	BUG_ON(!irqs_disabled());
}

static void check_irq_on(void)
{
	BUG_ON(irqs_disabled());
}

2154 2155 2156 2157 2158
static void check_mutex_acquired(void)
{
	BUG_ON(!mutex_is_locked(&slab_mutex));
}

2159
static void check_spinlock_acquired(struct kmem_cache *cachep)
L
Linus Torvalds 已提交
2160 2161 2162
{
#ifdef CONFIG_SMP
	check_irq_off();
2163
	assert_spin_locked(&get_node(cachep, numa_mem_id())->list_lock);
L
Linus Torvalds 已提交
2164 2165
#endif
}
2166

2167
static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
2168 2169 2170
{
#ifdef CONFIG_SMP
	check_irq_off();
2171
	assert_spin_locked(&get_node(cachep, node)->list_lock);
2172 2173 2174
#endif
}

L
Linus Torvalds 已提交
2175 2176 2177
#else
#define check_irq_off()	do { } while(0)
#define check_irq_on()	do { } while(0)
2178
#define check_mutex_acquired()	do { } while(0)
L
Linus Torvalds 已提交
2179
#define check_spinlock_acquired(x) do { } while(0)
2180
#define check_spinlock_acquired_node(x, y) do { } while(0)
L
Linus Torvalds 已提交
2181 2182
#endif

2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198
static void drain_array_locked(struct kmem_cache *cachep, struct array_cache *ac,
				int node, bool free_all, struct list_head *list)
{
	int tofree;

	if (!ac || !ac->avail)
		return;

	tofree = free_all ? ac->avail : (ac->limit + 4) / 5;
	if (tofree > ac->avail)
		tofree = (ac->avail + 1) / 2;

	free_block(cachep, ac->entry, tofree, node, list);
	ac->avail -= tofree;
	memmove(ac->entry, &(ac->entry[tofree]), sizeof(void *) * ac->avail);
}
2199

L
Linus Torvalds 已提交
2200 2201
static void do_drain(void *arg)
{
A
Andrew Morton 已提交
2202
	struct kmem_cache *cachep = arg;
L
Linus Torvalds 已提交
2203
	struct array_cache *ac;
2204
	int node = numa_mem_id();
2205
	struct kmem_cache_node *n;
2206
	LIST_HEAD(list);
L
Linus Torvalds 已提交
2207 2208

	check_irq_off();
2209
	ac = cpu_cache_get(cachep);
2210 2211
	n = get_node(cachep, node);
	spin_lock(&n->list_lock);
2212
	free_block(cachep, ac->entry, ac->avail, node, &list);
2213
	spin_unlock(&n->list_lock);
2214
	slabs_destroy(cachep, &list);
L
Linus Torvalds 已提交
2215 2216 2217
	ac->avail = 0;
}

2218
static void drain_cpu_caches(struct kmem_cache *cachep)
L
Linus Torvalds 已提交
2219
{
2220
	struct kmem_cache_node *n;
2221
	int node;
2222
	LIST_HEAD(list);
2223

2224
	on_each_cpu(do_drain, cachep, 1);
L
Linus Torvalds 已提交
2225
	check_irq_on();
2226 2227
	for_each_kmem_cache_node(cachep, node, n)
		if (n->alien)
2228
			drain_alien_cache(cachep, n->alien);
2229

2230 2231 2232 2233 2234 2235 2236
	for_each_kmem_cache_node(cachep, node, n) {
		spin_lock_irq(&n->list_lock);
		drain_array_locked(cachep, n->shared, node, true, &list);
		spin_unlock_irq(&n->list_lock);

		slabs_destroy(cachep, &list);
	}
L
Linus Torvalds 已提交
2237 2238
}

2239 2240 2241 2242 2243 2244 2245
/*
 * Remove slabs from the list of free slabs.
 * Specify the number of slabs to drain in tofree.
 *
 * Returns the actual number of slabs released.
 */
static int drain_freelist(struct kmem_cache *cache,
2246
			struct kmem_cache_node *n, int tofree)
L
Linus Torvalds 已提交
2247
{
2248 2249
	struct list_head *p;
	int nr_freed;
2250
	struct page *page;
L
Linus Torvalds 已提交
2251

2252
	nr_freed = 0;
2253
	while (nr_freed < tofree && !list_empty(&n->slabs_free)) {
L
Linus Torvalds 已提交
2254

2255 2256 2257 2258
		spin_lock_irq(&n->list_lock);
		p = n->slabs_free.prev;
		if (p == &n->slabs_free) {
			spin_unlock_irq(&n->list_lock);
2259 2260
			goto out;
		}
L
Linus Torvalds 已提交
2261

2262 2263
		page = list_entry(p, struct page, lru);
		list_del(&page->lru);
2264
		n->free_slabs--;
2265
		n->total_slabs--;
2266 2267 2268 2269
		/*
		 * Safe to drop the lock. The slab is no longer linked
		 * to the cache.
		 */
2270 2271
		n->free_objects -= cache->num;
		spin_unlock_irq(&n->list_lock);
2272
		slab_destroy(cache, page);
2273
		nr_freed++;
L
Linus Torvalds 已提交
2274
	}
2275 2276
out:
	return nr_freed;
L
Linus Torvalds 已提交
2277 2278
}

2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290
bool __kmem_cache_empty(struct kmem_cache *s)
{
	int node;
	struct kmem_cache_node *n;

	for_each_kmem_cache_node(s, node, n)
		if (!list_empty(&n->slabs_full) ||
		    !list_empty(&n->slabs_partial))
			return false;
	return true;
}

2291
int __kmem_cache_shrink(struct kmem_cache *cachep)
2292
{
2293 2294
	int ret = 0;
	int node;
2295
	struct kmem_cache_node *n;
2296 2297 2298 2299

	drain_cpu_caches(cachep);

	check_irq_on();
2300
	for_each_kmem_cache_node(cachep, node, n) {
2301
		drain_freelist(cachep, n, INT_MAX);
2302

2303 2304
		ret += !list_empty(&n->slabs_full) ||
			!list_empty(&n->slabs_partial);
2305 2306 2307 2308
	}
	return (ret ? 1 : 0);
}

2309 2310 2311 2312 2313 2314 2315
#ifdef CONFIG_MEMCG
void __kmemcg_cache_deactivate(struct kmem_cache *cachep)
{
	__kmem_cache_shrink(cachep);
}
#endif

2316
int __kmem_cache_shutdown(struct kmem_cache *cachep)
2317
{
2318
	return __kmem_cache_shrink(cachep);
2319 2320 2321
}

void __kmem_cache_release(struct kmem_cache *cachep)
L
Linus Torvalds 已提交
2322
{
2323
	int i;
2324
	struct kmem_cache_node *n;
L
Linus Torvalds 已提交
2325

T
Thomas Garnier 已提交
2326 2327
	cache_random_seq_destroy(cachep);

2328
	free_percpu(cachep->cpu_cache);
L
Linus Torvalds 已提交
2329

2330
	/* NUMA: free the node structures */
2331 2332 2333 2334 2335
	for_each_kmem_cache_node(cachep, i, n) {
		kfree(n->shared);
		free_alien_cache(n->alien);
		kfree(n);
		cachep->node[i] = NULL;
2336
	}
L
Linus Torvalds 已提交
2337 2338
}

2339 2340
/*
 * Get the memory for a slab management obj.
2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351
 *
 * For a slab cache when the slab descriptor is off-slab, the
 * slab descriptor can't come from the same cache which is being created,
 * Because if it is the case, that means we defer the creation of
 * the kmalloc_{dma,}_cache of size sizeof(slab descriptor) to this point.
 * And we eventually call down to __kmem_cache_create(), which
 * in turn looks up in the kmalloc_{dma,}_caches for the disired-size one.
 * This is a "chicken-and-egg" problem.
 *
 * So the off-slab slab descriptor shall come from the kmalloc_{dma,}_caches,
 * which are all initialized during kmem_cache_init().
2352
 */
2353
static void *alloc_slabmgmt(struct kmem_cache *cachep,
2354 2355
				   struct page *page, int colour_off,
				   gfp_t local_flags, int nodeid)
L
Linus Torvalds 已提交
2356
{
2357
	void *freelist;
2358
	void *addr = page_address(page);
P
Pekka Enberg 已提交
2359

2360 2361 2362
	page->s_mem = addr + colour_off;
	page->active = 0;

2363 2364 2365
	if (OBJFREELIST_SLAB(cachep))
		freelist = NULL;
	else if (OFF_SLAB(cachep)) {
L
Linus Torvalds 已提交
2366
		/* Slab management obj is off-slab. */
2367
		freelist = kmem_cache_alloc_node(cachep->freelist_cache,
2368
					      local_flags, nodeid);
2369
		if (!freelist)
L
Linus Torvalds 已提交
2370 2371
			return NULL;
	} else {
2372 2373 2374
		/* We will use last bytes at the slab for freelist */
		freelist = addr + (PAGE_SIZE << cachep->gfporder) -
				cachep->freelist_size;
L
Linus Torvalds 已提交
2375
	}
2376

2377
	return freelist;
L
Linus Torvalds 已提交
2378 2379
}

2380
static inline freelist_idx_t get_free_obj(struct page *page, unsigned int idx)
L
Linus Torvalds 已提交
2381
{
2382
	return ((freelist_idx_t *)page->freelist)[idx];
2383 2384 2385
}

static inline void set_free_obj(struct page *page,
2386
					unsigned int idx, freelist_idx_t val)
2387
{
2388
	((freelist_idx_t *)(page->freelist))[idx] = val;
L
Linus Torvalds 已提交
2389 2390
}

2391
static void cache_init_objs_debug(struct kmem_cache *cachep, struct page *page)
L
Linus Torvalds 已提交
2392
{
2393
#if DEBUG
L
Linus Torvalds 已提交
2394 2395 2396
	int i;

	for (i = 0; i < cachep->num; i++) {
2397
		void *objp = index_to_obj(cachep, page, i);
2398

L
Linus Torvalds 已提交
2399 2400 2401 2402 2403 2404 2405 2406
		if (cachep->flags & SLAB_STORE_USER)
			*dbg_userword(cachep, objp) = NULL;

		if (cachep->flags & SLAB_RED_ZONE) {
			*dbg_redzone1(cachep, objp) = RED_INACTIVE;
			*dbg_redzone2(cachep, objp) = RED_INACTIVE;
		}
		/*
A
Andrew Morton 已提交
2407 2408 2409
		 * Constructors are not allowed to allocate memory from the same
		 * cache which they are a constructor for.  Otherwise, deadlock.
		 * They must also be threaded.
L
Linus Torvalds 已提交
2410
		 */
A
Alexander Potapenko 已提交
2411 2412 2413
		if (cachep->ctor && !(cachep->flags & SLAB_POISON)) {
			kasan_unpoison_object_data(cachep,
						   objp + obj_offset(cachep));
2414
			cachep->ctor(objp + obj_offset(cachep));
A
Alexander Potapenko 已提交
2415 2416 2417
			kasan_poison_object_data(
				cachep, objp + obj_offset(cachep));
		}
L
Linus Torvalds 已提交
2418 2419 2420

		if (cachep->flags & SLAB_RED_ZONE) {
			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
J
Joe Perches 已提交
2421
				slab_error(cachep, "constructor overwrote the end of an object");
L
Linus Torvalds 已提交
2422
			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
J
Joe Perches 已提交
2423
				slab_error(cachep, "constructor overwrote the start of an object");
L
Linus Torvalds 已提交
2424
		}
2425 2426 2427 2428 2429
		/* need to poison the objs? */
		if (cachep->flags & SLAB_POISON) {
			poison_obj(cachep, objp, POISON_FREE);
			slab_kernel_map(cachep, objp, 0, 0);
		}
2430
	}
L
Linus Torvalds 已提交
2431
#endif
2432 2433
}

T
Thomas Garnier 已提交
2434 2435 2436 2437 2438
#ifdef CONFIG_SLAB_FREELIST_RANDOM
/* Hold information during a freelist initialization */
union freelist_init_state {
	struct {
		unsigned int pos;
2439
		unsigned int *list;
T
Thomas Garnier 已提交
2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456
		unsigned int count;
	};
	struct rnd_state rnd_state;
};

/*
 * Initialize the state based on the randomization methode available.
 * return true if the pre-computed list is available, false otherwize.
 */
static bool freelist_state_initialize(union freelist_init_state *state,
				struct kmem_cache *cachep,
				unsigned int count)
{
	bool ret;
	unsigned int rand;

	/* Use best entropy available to define a random shift */
2457
	rand = get_random_int();
T
Thomas Garnier 已提交
2458 2459 2460 2461 2462 2463 2464 2465

	/* Use a random state if the pre-computed list is not available */
	if (!cachep->random_seq) {
		prandom_seed_state(&state->rnd_state, rand);
		ret = false;
	} else {
		state->list = cachep->random_seq;
		state->count = count;
2466
		state->pos = rand % count;
T
Thomas Garnier 已提交
2467 2468 2469 2470 2471 2472 2473 2474
		ret = true;
	}
	return ret;
}

/* Get the next entry on the list and randomize it using a random shift */
static freelist_idx_t next_random_slot(union freelist_init_state *state)
{
2475 2476 2477
	if (state->pos >= state->count)
		state->pos = 0;
	return state->list[state->pos++];
T
Thomas Garnier 已提交
2478 2479
}

2480 2481 2482 2483 2484 2485 2486
/* Swap two freelist entries */
static void swap_free_obj(struct page *page, unsigned int a, unsigned int b)
{
	swap(((freelist_idx_t *)page->freelist)[a],
		((freelist_idx_t *)page->freelist)[b]);
}

T
Thomas Garnier 已提交
2487 2488 2489 2490 2491 2492
/*
 * Shuffle the freelist initialization state based on pre-computed lists.
 * return true if the list was successfully shuffled, false otherwise.
 */
static bool shuffle_freelist(struct kmem_cache *cachep, struct page *page)
{
2493
	unsigned int objfreelist = 0, i, rand, count = cachep->num;
T
Thomas Garnier 已提交
2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517
	union freelist_init_state state;
	bool precomputed;

	if (count < 2)
		return false;

	precomputed = freelist_state_initialize(&state, cachep, count);

	/* Take a random entry as the objfreelist */
	if (OBJFREELIST_SLAB(cachep)) {
		if (!precomputed)
			objfreelist = count - 1;
		else
			objfreelist = next_random_slot(&state);
		page->freelist = index_to_obj(cachep, page, objfreelist) +
						obj_offset(cachep);
		count--;
	}

	/*
	 * On early boot, generate the list dynamically.
	 * Later use a pre-computed list for speed.
	 */
	if (!precomputed) {
2518 2519 2520 2521 2522 2523 2524 2525 2526
		for (i = 0; i < count; i++)
			set_free_obj(page, i, i);

		/* Fisher-Yates shuffle */
		for (i = count - 1; i > 0; i--) {
			rand = prandom_u32_state(&state.rnd_state);
			rand %= (i + 1);
			swap_free_obj(page, i, rand);
		}
T
Thomas Garnier 已提交
2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544
	} else {
		for (i = 0; i < count; i++)
			set_free_obj(page, i, next_random_slot(&state));
	}

	if (OBJFREELIST_SLAB(cachep))
		set_free_obj(page, cachep->num - 1, objfreelist);

	return true;
}
#else
static inline bool shuffle_freelist(struct kmem_cache *cachep,
				struct page *page)
{
	return false;
}
#endif /* CONFIG_SLAB_FREELIST_RANDOM */

2545 2546 2547 2548
static void cache_init_objs(struct kmem_cache *cachep,
			    struct page *page)
{
	int i;
A
Alexander Potapenko 已提交
2549
	void *objp;
T
Thomas Garnier 已提交
2550
	bool shuffled;
2551 2552 2553

	cache_init_objs_debug(cachep, page);

T
Thomas Garnier 已提交
2554 2555 2556 2557
	/* Try to randomize the freelist if enabled */
	shuffled = shuffle_freelist(cachep, page);

	if (!shuffled && OBJFREELIST_SLAB(cachep)) {
2558 2559 2560 2561
		page->freelist = index_to_obj(cachep, page, cachep->num - 1) +
						obj_offset(cachep);
	}

2562
	for (i = 0; i < cachep->num; i++) {
2563
		objp = index_to_obj(cachep, page, i);
2564
		objp = kasan_init_slab_obj(cachep, objp);
2565

2566
		/* constructor could break poison info */
A
Alexander Potapenko 已提交
2567 2568 2569 2570 2571
		if (DEBUG == 0 && cachep->ctor) {
			kasan_unpoison_object_data(cachep, objp);
			cachep->ctor(objp);
			kasan_poison_object_data(cachep, objp);
		}
2572

T
Thomas Garnier 已提交
2573 2574
		if (!shuffled)
			set_free_obj(page, i, i);
L
Linus Torvalds 已提交
2575 2576 2577
	}
}

2578
static void *slab_get_obj(struct kmem_cache *cachep, struct page *page)
2579
{
2580
	void *objp;
2581

2582
	objp = index_to_obj(cachep, page, get_free_obj(page, page->active));
2583
	page->active++;
2584

2585 2586 2587 2588 2589
#if DEBUG
	if (cachep->flags & SLAB_STORE_USER)
		set_store_user_dirty(cachep);
#endif

2590 2591 2592
	return objp;
}

2593 2594
static void slab_put_obj(struct kmem_cache *cachep,
			struct page *page, void *objp)
2595
{
2596
	unsigned int objnr = obj_to_index(cachep, page, objp);
2597
#if DEBUG
J
Joonsoo Kim 已提交
2598
	unsigned int i;
2599 2600

	/* Verify double free bug */
2601
	for (i = page->active; i < cachep->num; i++) {
2602
		if (get_free_obj(page, i) == objnr) {
2603
			pr_err("slab: double free detected in cache '%s', objp %px\n",
J
Joe Perches 已提交
2604
			       cachep->name, objp);
2605 2606
			BUG();
		}
2607 2608
	}
#endif
2609
	page->active--;
2610 2611 2612
	if (!page->freelist)
		page->freelist = objp + obj_offset(cachep);

2613
	set_free_obj(page, page->active, objnr);
2614 2615
}

2616 2617 2618
/*
 * Map pages beginning at addr to the given cache and slab. This is required
 * for the slab allocator to be able to lookup the cache and slab of a
2619
 * virtual address for kfree, ksize, and slab debugging.
2620
 */
2621
static void slab_map_pages(struct kmem_cache *cache, struct page *page,
2622
			   void *freelist)
L
Linus Torvalds 已提交
2623
{
2624
	page->slab_cache = cache;
2625
	page->freelist = freelist;
L
Linus Torvalds 已提交
2626 2627 2628 2629 2630 2631
}

/*
 * Grow (by 1) the number of slabs within a cache.  This is called by
 * kmem_cache_alloc() when there are no active objs left in a cache.
 */
2632 2633
static struct page *cache_grow_begin(struct kmem_cache *cachep,
				gfp_t flags, int nodeid)
L
Linus Torvalds 已提交
2634
{
2635
	void *freelist;
P
Pekka Enberg 已提交
2636 2637
	size_t offset;
	gfp_t local_flags;
2638
	int page_node;
2639
	struct kmem_cache_node *n;
2640
	struct page *page;
L
Linus Torvalds 已提交
2641

A
Andrew Morton 已提交
2642 2643 2644
	/*
	 * Be lazy and only check for valid flags here,  keeping it out of the
	 * critical path in kmem_cache_alloc().
L
Linus Torvalds 已提交
2645
	 */
2646
	if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
2647
		gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK;
2648 2649 2650 2651
		flags &= ~GFP_SLAB_BUG_MASK;
		pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n",
				invalid_mask, &invalid_mask, flags, &flags);
		dump_stack();
2652
	}
2653
	WARN_ON_ONCE(cachep->ctor && (flags & __GFP_ZERO));
C
Christoph Lameter 已提交
2654
	local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
L
Linus Torvalds 已提交
2655 2656

	check_irq_off();
2657
	if (gfpflags_allow_blocking(local_flags))
L
Linus Torvalds 已提交
2658 2659
		local_irq_enable();

A
Andrew Morton 已提交
2660 2661 2662
	/*
	 * Get mem for the objs.  Attempt to allocate a physical page from
	 * 'nodeid'.
2663
	 */
2664
	page = kmem_getpages(cachep, local_flags, nodeid);
2665
	if (!page)
L
Linus Torvalds 已提交
2666 2667
		goto failed;

2668 2669
	page_node = page_to_nid(page);
	n = get_node(cachep, page_node);
2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681

	/* Get colour for the slab, and cal the next value. */
	n->colour_next++;
	if (n->colour_next >= cachep->colour)
		n->colour_next = 0;

	offset = n->colour_next;
	if (offset >= cachep->colour)
		offset = 0;

	offset *= cachep->colour_off;

L
Linus Torvalds 已提交
2682
	/* Get slab management. */
2683
	freelist = alloc_slabmgmt(cachep, page, offset,
2684
			local_flags & ~GFP_CONSTRAINT_MASK, page_node);
2685
	if (OFF_SLAB(cachep) && !freelist)
L
Linus Torvalds 已提交
2686 2687
		goto opps1;

2688
	slab_map_pages(cachep, page, freelist);
L
Linus Torvalds 已提交
2689

A
Alexander Potapenko 已提交
2690
	kasan_poison_slab(page);
2691
	cache_init_objs(cachep, page);
L
Linus Torvalds 已提交
2692

2693
	if (gfpflags_allow_blocking(local_flags))
L
Linus Torvalds 已提交
2694 2695
		local_irq_disable();

2696 2697
	return page;

A
Andrew Morton 已提交
2698
opps1:
2699
	kmem_freepages(cachep, page);
A
Andrew Morton 已提交
2700
failed:
2701
	if (gfpflags_allow_blocking(local_flags))
L
Linus Torvalds 已提交
2702
		local_irq_disable();
2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719
	return NULL;
}

static void cache_grow_end(struct kmem_cache *cachep, struct page *page)
{
	struct kmem_cache_node *n;
	void *list = NULL;

	check_irq_off();

	if (!page)
		return;

	INIT_LIST_HEAD(&page->lru);
	n = get_node(cachep, page_to_nid(page));

	spin_lock(&n->list_lock);
2720
	n->total_slabs++;
2721
	if (!page->active) {
2722
		list_add_tail(&page->lru, &(n->slabs_free));
2723
		n->free_slabs++;
2724
	} else
2725
		fixup_slab_list(cachep, n, page, &list);
2726

2727 2728 2729 2730 2731
	STATS_INC_GROWN(cachep);
	n->free_objects += cachep->num - page->active;
	spin_unlock(&n->list_lock);

	fixup_objfreelist_debug(cachep, &list);
L
Linus Torvalds 已提交
2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743
}

#if DEBUG

/*
 * Perform extra freeing checks:
 * - detect bad pointers.
 * - POISON/RED_ZONE checking
 */
static void kfree_debugcheck(const void *objp)
{
	if (!virt_addr_valid(objp)) {
2744
		pr_err("kfree_debugcheck: out of range ptr %lxh\n",
P
Pekka Enberg 已提交
2745 2746
		       (unsigned long)objp);
		BUG();
L
Linus Torvalds 已提交
2747 2748 2749
	}
}

2750 2751
static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
{
2752
	unsigned long long redzone1, redzone2;
2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767

	redzone1 = *dbg_redzone1(cache, obj);
	redzone2 = *dbg_redzone2(cache, obj);

	/*
	 * Redzone is ok.
	 */
	if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE)
		return;

	if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE)
		slab_error(cache, "double free detected");
	else
		slab_error(cache, "memory outside object was overwritten");

2768
	pr_err("%px: redzone 1:0x%llx, redzone 2:0x%llx\n",
2769
	       obj, redzone1, redzone2);
2770 2771
}

2772
static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
2773
				   unsigned long caller)
L
Linus Torvalds 已提交
2774 2775
{
	unsigned int objnr;
2776
	struct page *page;
L
Linus Torvalds 已提交
2777

2778 2779
	BUG_ON(virt_to_cache(objp) != cachep);

2780
	objp -= obj_offset(cachep);
L
Linus Torvalds 已提交
2781
	kfree_debugcheck(objp);
2782
	page = virt_to_head_page(objp);
L
Linus Torvalds 已提交
2783 2784

	if (cachep->flags & SLAB_RED_ZONE) {
2785
		verify_redzone_free(cachep, objp);
L
Linus Torvalds 已提交
2786 2787 2788
		*dbg_redzone1(cachep, objp) = RED_INACTIVE;
		*dbg_redzone2(cachep, objp) = RED_INACTIVE;
	}
2789 2790
	if (cachep->flags & SLAB_STORE_USER) {
		set_store_user_dirty(cachep);
2791
		*dbg_userword(cachep, objp) = (void *)caller;
2792
	}
L
Linus Torvalds 已提交
2793

2794
	objnr = obj_to_index(cachep, page, objp);
L
Linus Torvalds 已提交
2795 2796

	BUG_ON(objnr >= cachep->num);
2797
	BUG_ON(objp != index_to_obj(cachep, page, objnr));
L
Linus Torvalds 已提交
2798 2799 2800

	if (cachep->flags & SLAB_POISON) {
		poison_obj(cachep, objp, POISON_FREE);
2801
		slab_kernel_map(cachep, objp, 0, caller);
L
Linus Torvalds 已提交
2802 2803 2804 2805 2806 2807 2808 2809 2810
	}
	return objp;
}

#else
#define kfree_debugcheck(x) do { } while(0)
#define cache_free_debugcheck(x,objp,z) (objp)
#endif

2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825
static inline void fixup_objfreelist_debug(struct kmem_cache *cachep,
						void **list)
{
#if DEBUG
	void *next = *list;
	void *objp;

	while (next) {
		objp = next - obj_offset(cachep);
		next = *(void **)next;
		poison_obj(cachep, objp, POISON_FREE);
	}
#endif
}

2826
static inline void fixup_slab_list(struct kmem_cache *cachep,
2827 2828
				struct kmem_cache_node *n, struct page *page,
				void **list)
2829 2830 2831
{
	/* move slabp to correct slabp list: */
	list_del(&page->lru);
2832
	if (page->active == cachep->num) {
2833
		list_add(&page->lru, &n->slabs_full);
2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846
		if (OBJFREELIST_SLAB(cachep)) {
#if DEBUG
			/* Poisoning will be done without holding the lock */
			if (cachep->flags & SLAB_POISON) {
				void **objp = page->freelist;

				*objp = *list;
				*list = objp;
			}
#endif
			page->freelist = NULL;
		}
	} else
2847 2848 2849
		list_add(&page->lru, &n->slabs_partial);
}

2850 2851
/* Try to find non-pfmemalloc slab if needed */
static noinline struct page *get_valid_first_slab(struct kmem_cache_node *n,
2852
					struct page *page, bool pfmemalloc)
2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870
{
	if (!page)
		return NULL;

	if (pfmemalloc)
		return page;

	if (!PageSlabPfmemalloc(page))
		return page;

	/* No need to keep pfmemalloc slab if we have enough free objects */
	if (n->free_objects > n->free_limit) {
		ClearPageSlabPfmemalloc(page);
		return page;
	}

	/* Move pfmemalloc slab to the end of list to speed up next search */
	list_del(&page->lru);
2871
	if (!page->active) {
2872
		list_add_tail(&page->lru, &n->slabs_free);
2873
		n->free_slabs++;
2874
	} else
2875 2876 2877 2878 2879 2880 2881
		list_add_tail(&page->lru, &n->slabs_partial);

	list_for_each_entry(page, &n->slabs_partial, lru) {
		if (!PageSlabPfmemalloc(page))
			return page;
	}

2882
	n->free_touched = 1;
2883
	list_for_each_entry(page, &n->slabs_free, lru) {
2884
		if (!PageSlabPfmemalloc(page)) {
2885
			n->free_slabs--;
2886
			return page;
2887
		}
2888 2889 2890 2891 2892 2893
	}

	return NULL;
}

static struct page *get_first_slab(struct kmem_cache_node *n, bool pfmemalloc)
2894 2895 2896
{
	struct page *page;

2897
	assert_spin_locked(&n->list_lock);
2898
	page = list_first_entry_or_null(&n->slabs_partial, struct page, lru);
2899 2900
	if (!page) {
		n->free_touched = 1;
2901 2902
		page = list_first_entry_or_null(&n->slabs_free, struct page,
						lru);
2903
		if (page)
2904
			n->free_slabs--;
2905 2906
	}

2907
	if (sk_memalloc_socks())
2908
		page = get_valid_first_slab(n, page, pfmemalloc);
2909

2910 2911 2912
	return page;
}

2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940
static noinline void *cache_alloc_pfmemalloc(struct kmem_cache *cachep,
				struct kmem_cache_node *n, gfp_t flags)
{
	struct page *page;
	void *obj;
	void *list = NULL;

	if (!gfp_pfmemalloc_allowed(flags))
		return NULL;

	spin_lock(&n->list_lock);
	page = get_first_slab(n, true);
	if (!page) {
		spin_unlock(&n->list_lock);
		return NULL;
	}

	obj = slab_get_obj(cachep, page);
	n->free_objects--;

	fixup_slab_list(cachep, n, page, &list);

	spin_unlock(&n->list_lock);
	fixup_objfreelist_debug(cachep, &list);

	return obj;
}

2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964
/*
 * Slab list should be fixed up by fixup_slab_list() for existing slab
 * or cache_grow_end() for new slab
 */
static __always_inline int alloc_block(struct kmem_cache *cachep,
		struct array_cache *ac, struct page *page, int batchcount)
{
	/*
	 * There must be at least one object available for
	 * allocation.
	 */
	BUG_ON(page->active >= cachep->num);

	while (page->active < cachep->num && batchcount--) {
		STATS_INC_ALLOCED(cachep);
		STATS_INC_ACTIVE(cachep);
		STATS_SET_HIGH(cachep);

		ac->entry[ac->avail++] = slab_get_obj(cachep, page);
	}

	return batchcount;
}

2965
static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
L
Linus Torvalds 已提交
2966 2967
{
	int batchcount;
2968
	struct kmem_cache_node *n;
2969
	struct array_cache *ac, *shared;
P
Pekka Enberg 已提交
2970
	int node;
2971
	void *list = NULL;
2972
	struct page *page;
P
Pekka Enberg 已提交
2973

L
Linus Torvalds 已提交
2974
	check_irq_off();
2975
	node = numa_mem_id();
2976

2977
	ac = cpu_cache_get(cachep);
L
Linus Torvalds 已提交
2978 2979
	batchcount = ac->batchcount;
	if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
A
Andrew Morton 已提交
2980 2981 2982 2983
		/*
		 * If there was little recent activity on this cache, then
		 * perform only a partial refill.  Otherwise we could generate
		 * refill bouncing.
L
Linus Torvalds 已提交
2984 2985 2986
		 */
		batchcount = BATCHREFILL_LIMIT;
	}
2987
	n = get_node(cachep, node);
2988

2989
	BUG_ON(ac->avail > 0 || !n);
2990 2991 2992 2993
	shared = READ_ONCE(n->shared);
	if (!n->free_objects && (!shared || !shared->avail))
		goto direct_grow;

2994
	spin_lock(&n->list_lock);
2995
	shared = READ_ONCE(n->shared);
L
Linus Torvalds 已提交
2996

2997
	/* See if we can refill from the shared array */
2998 2999
	if (shared && transfer_objects(ac, shared, batchcount)) {
		shared->touched = 1;
3000
		goto alloc_done;
3001
	}
3002

L
Linus Torvalds 已提交
3003 3004
	while (batchcount > 0) {
		/* Get slab alloc is to come from. */
3005
		page = get_first_slab(n, false);
3006 3007
		if (!page)
			goto must_grow;
L
Linus Torvalds 已提交
3008 3009

		check_spinlock_acquired(cachep);
3010

3011
		batchcount = alloc_block(cachep, ac, page, batchcount);
3012
		fixup_slab_list(cachep, n, page, &list);
L
Linus Torvalds 已提交
3013 3014
	}

A
Andrew Morton 已提交
3015
must_grow:
3016
	n->free_objects -= ac->avail;
A
Andrew Morton 已提交
3017
alloc_done:
3018
	spin_unlock(&n->list_lock);
3019
	fixup_objfreelist_debug(cachep, &list);
L
Linus Torvalds 已提交
3020

3021
direct_grow:
L
Linus Torvalds 已提交
3022
	if (unlikely(!ac->avail)) {
3023 3024 3025 3026 3027 3028 3029 3030
		/* Check if we can use obj in pfmemalloc slab */
		if (sk_memalloc_socks()) {
			void *obj = cache_alloc_pfmemalloc(cachep, n, flags);

			if (obj)
				return obj;
		}

3031
		page = cache_grow_begin(cachep, gfp_exact_node(flags), node);
3032

3033 3034 3035 3036
		/*
		 * cache_grow_begin() can reenable interrupts,
		 * then ac could change.
		 */
3037
		ac = cpu_cache_get(cachep);
3038 3039 3040
		if (!ac->avail && page)
			alloc_block(cachep, ac, page, batchcount);
		cache_grow_end(cachep, page);
3041

3042
		if (!ac->avail)
L
Linus Torvalds 已提交
3043 3044 3045
			return NULL;
	}
	ac->touched = 1;
3046

3047
	return ac->entry[--ac->avail];
L
Linus Torvalds 已提交
3048 3049
}

A
Andrew Morton 已提交
3050 3051
static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
						gfp_t flags)
L
Linus Torvalds 已提交
3052
{
3053
	might_sleep_if(gfpflags_allow_blocking(flags));
L
Linus Torvalds 已提交
3054 3055 3056
}

#if DEBUG
A
Andrew Morton 已提交
3057
static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
3058
				gfp_t flags, void *objp, unsigned long caller)
L
Linus Torvalds 已提交
3059
{
3060
	WARN_ON_ONCE(cachep->ctor && (flags & __GFP_ZERO));
P
Pekka Enberg 已提交
3061
	if (!objp)
L
Linus Torvalds 已提交
3062
		return objp;
P
Pekka Enberg 已提交
3063
	if (cachep->flags & SLAB_POISON) {
L
Linus Torvalds 已提交
3064
		check_poison_obj(cachep, objp);
3065
		slab_kernel_map(cachep, objp, 1, 0);
L
Linus Torvalds 已提交
3066 3067 3068
		poison_obj(cachep, objp, POISON_INUSE);
	}
	if (cachep->flags & SLAB_STORE_USER)
3069
		*dbg_userword(cachep, objp) = (void *)caller;
L
Linus Torvalds 已提交
3070 3071

	if (cachep->flags & SLAB_RED_ZONE) {
A
Andrew Morton 已提交
3072 3073
		if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
				*dbg_redzone2(cachep, objp) != RED_INACTIVE) {
J
Joe Perches 已提交
3074
			slab_error(cachep, "double free, or memory outside object was overwritten");
3075
			pr_err("%px: redzone 1:0x%llx, redzone 2:0x%llx\n",
3076 3077
			       objp, *dbg_redzone1(cachep, objp),
			       *dbg_redzone2(cachep, objp));
L
Linus Torvalds 已提交
3078 3079 3080 3081
		}
		*dbg_redzone1(cachep, objp) = RED_ACTIVE;
		*dbg_redzone2(cachep, objp) = RED_ACTIVE;
	}
3082

3083
	objp += obj_offset(cachep);
3084
	if (cachep->ctor && cachep->flags & SLAB_POISON)
3085
		cachep->ctor(objp);
T
Tetsuo Handa 已提交
3086 3087
	if (ARCH_SLAB_MINALIGN &&
	    ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) {
3088
		pr_err("0x%px: not aligned to ARCH_SLAB_MINALIGN=%d\n",
H
Hugh Dickins 已提交
3089
		       objp, (int)ARCH_SLAB_MINALIGN);
3090
	}
L
Linus Torvalds 已提交
3091 3092 3093 3094 3095 3096
	return objp;
}
#else
#define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
#endif

3097
static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
L
Linus Torvalds 已提交
3098
{
P
Pekka Enberg 已提交
3099
	void *objp;
L
Linus Torvalds 已提交
3100 3101
	struct array_cache *ac;

3102
	check_irq_off();
3103

3104
	ac = cpu_cache_get(cachep);
L
Linus Torvalds 已提交
3105 3106
	if (likely(ac->avail)) {
		ac->touched = 1;
3107
		objp = ac->entry[--ac->avail];
3108

3109 3110
		STATS_INC_ALLOCHIT(cachep);
		goto out;
L
Linus Torvalds 已提交
3111
	}
3112 3113

	STATS_INC_ALLOCMISS(cachep);
3114
	objp = cache_alloc_refill(cachep, flags);
3115 3116 3117 3118 3119 3120 3121
	/*
	 * the 'ac' may be updated by cache_alloc_refill(),
	 * and kmemleak_erase() requires its correct value.
	 */
	ac = cpu_cache_get(cachep);

out:
3122 3123 3124 3125 3126
	/*
	 * To avoid a false negative, if an object that is in one of the
	 * per-CPU caches is leaked, we need to make sure kmemleak doesn't
	 * treat the array pointers as a reference to the object.
	 */
3127 3128
	if (objp)
		kmemleak_erase(&ac->entry[ac->avail]);
3129 3130 3131
	return objp;
}

3132
#ifdef CONFIG_NUMA
3133
/*
3134
 * Try allocating on another node if PFA_SPREAD_SLAB is a mempolicy is set.
3135 3136 3137 3138 3139 3140 3141 3142
 *
 * If we are in_interrupt, then process context, including cpusets and
 * mempolicy, may not apply and should not be used for allocation policy.
 */
static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
{
	int nid_alloc, nid_here;

3143
	if (in_interrupt() || (flags & __GFP_THISNODE))
3144
		return NULL;
3145
	nid_alloc = nid_here = numa_mem_id();
3146
	if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
3147
		nid_alloc = cpuset_slab_spread_node();
3148
	else if (current->mempolicy)
3149
		nid_alloc = mempolicy_slab_node();
3150
	if (nid_alloc != nid_here)
3151
		return ____cache_alloc_node(cachep, flags, nid_alloc);
3152 3153 3154
	return NULL;
}

3155 3156
/*
 * Fallback function if there was no memory available and no objects on a
3157
 * certain node and fall back is permitted. First we scan all the
3158
 * available node for available objects. If that fails then we
3159 3160 3161
 * perform an allocation without specifying a node. This allows the page
 * allocator to do its reclaim / fallback magic. We then insert the
 * slab into the proper nodelist and then allocate from it.
3162
 */
3163
static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
3164
{
3165
	struct zonelist *zonelist;
3166
	struct zoneref *z;
3167 3168
	struct zone *zone;
	enum zone_type high_zoneidx = gfp_zone(flags);
3169
	void *obj = NULL;
3170
	struct page *page;
3171
	int nid;
3172
	unsigned int cpuset_mems_cookie;
3173 3174 3175 3176

	if (flags & __GFP_THISNODE)
		return NULL;

3177
retry_cpuset:
3178
	cpuset_mems_cookie = read_mems_allowed_begin();
3179
	zonelist = node_zonelist(mempolicy_slab_node(), flags);
3180

3181 3182 3183 3184 3185
retry:
	/*
	 * Look through allowed nodes for objects available
	 * from existing per node queues.
	 */
3186 3187
	for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
		nid = zone_to_nid(zone);
3188

3189
		if (cpuset_zone_allowed(zone, flags) &&
3190 3191
			get_node(cache, nid) &&
			get_node(cache, nid)->free_objects) {
3192
				obj = ____cache_alloc_node(cache,
D
David Rientjes 已提交
3193
					gfp_exact_node(flags), nid);
3194 3195 3196
				if (obj)
					break;
		}
3197 3198
	}

3199
	if (!obj) {
3200 3201 3202 3203 3204 3205
		/*
		 * This allocation will be performed within the constraints
		 * of the current cpuset / memory policy requirements.
		 * We may trigger various forms of reclaim on the allowed
		 * set and go into memory reserves if necessary.
		 */
3206 3207 3208 3209
		page = cache_grow_begin(cache, flags, numa_mem_id());
		cache_grow_end(cache, page);
		if (page) {
			nid = page_to_nid(page);
3210 3211
			obj = ____cache_alloc_node(cache,
				gfp_exact_node(flags), nid);
3212

3213
			/*
3214 3215
			 * Another processor may allocate the objects in
			 * the slab since we are not holding any locks.
3216
			 */
3217 3218
			if (!obj)
				goto retry;
3219
		}
3220
	}
3221

3222
	if (unlikely(!obj && read_mems_allowed_retry(cpuset_mems_cookie)))
3223
		goto retry_cpuset;
3224 3225 3226
	return obj;
}

3227 3228
/*
 * A interface to enable slab creation on nodeid
L
Linus Torvalds 已提交
3229
 */
3230
static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
A
Andrew Morton 已提交
3231
				int nodeid)
3232
{
3233
	struct page *page;
3234
	struct kmem_cache_node *n;
3235
	void *obj = NULL;
3236
	void *list = NULL;
P
Pekka Enberg 已提交
3237

3238
	VM_BUG_ON(nodeid < 0 || nodeid >= MAX_NUMNODES);
3239
	n = get_node(cachep, nodeid);
3240
	BUG_ON(!n);
P
Pekka Enberg 已提交
3241

3242
	check_irq_off();
3243
	spin_lock(&n->list_lock);
3244
	page = get_first_slab(n, false);
3245 3246
	if (!page)
		goto must_grow;
P
Pekka Enberg 已提交
3247 3248 3249 3250 3251 3252 3253

	check_spinlock_acquired_node(cachep, nodeid);

	STATS_INC_NODEALLOCS(cachep);
	STATS_INC_ACTIVE(cachep);
	STATS_SET_HIGH(cachep);

3254
	BUG_ON(page->active == cachep->num);
P
Pekka Enberg 已提交
3255

3256
	obj = slab_get_obj(cachep, page);
3257
	n->free_objects--;
P
Pekka Enberg 已提交
3258

3259
	fixup_slab_list(cachep, n, page, &list);
3260

3261
	spin_unlock(&n->list_lock);
3262
	fixup_objfreelist_debug(cachep, &list);
3263
	return obj;
3264

A
Andrew Morton 已提交
3265
must_grow:
3266
	spin_unlock(&n->list_lock);
3267
	page = cache_grow_begin(cachep, gfp_exact_node(flags), nodeid);
3268 3269 3270 3271
	if (page) {
		/* This slab isn't counted yet so don't update free_objects */
		obj = slab_get_obj(cachep, page);
	}
3272
	cache_grow_end(cachep, page);
L
Linus Torvalds 已提交
3273

3274
	return obj ? obj : fallback_alloc(cachep, flags);
3275
}
3276 3277

static __always_inline void *
3278
slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3279
		   unsigned long caller)
3280 3281 3282
{
	unsigned long save_flags;
	void *ptr;
3283
	int slab_node = numa_mem_id();
3284

3285
	flags &= gfp_allowed_mask;
3286 3287
	cachep = slab_pre_alloc_hook(cachep, flags);
	if (unlikely(!cachep))
3288 3289
		return NULL;

3290 3291 3292
	cache_alloc_debugcheck_before(cachep, flags);
	local_irq_save(save_flags);

A
Andrew Morton 已提交
3293
	if (nodeid == NUMA_NO_NODE)
3294
		nodeid = slab_node;
3295

3296
	if (unlikely(!get_node(cachep, nodeid))) {
3297 3298 3299 3300 3301
		/* Node not bootstrapped yet */
		ptr = fallback_alloc(cachep, flags);
		goto out;
	}

3302
	if (nodeid == slab_node) {
3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318
		/*
		 * Use the locally cached objects if possible.
		 * However ____cache_alloc does not allow fallback
		 * to other nodes. It may fail while we still have
		 * objects on other nodes available.
		 */
		ptr = ____cache_alloc(cachep, flags);
		if (ptr)
			goto out;
	}
	/* ___cache_alloc_node can fall back to other nodes */
	ptr = ____cache_alloc_node(cachep, flags, nodeid);
  out:
	local_irq_restore(save_flags);
	ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);

3319 3320
	if (unlikely(flags & __GFP_ZERO) && ptr)
		memset(ptr, 0, cachep->object_size);
3321

3322
	slab_post_alloc_hook(cachep, flags, 1, &ptr);
3323 3324 3325 3326 3327 3328 3329 3330
	return ptr;
}

static __always_inline void *
__do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
{
	void *objp;

3331
	if (current->mempolicy || cpuset_do_slab_mem_spread()) {
3332 3333 3334 3335 3336 3337 3338 3339 3340 3341
		objp = alternate_node_alloc(cache, flags);
		if (objp)
			goto out;
	}
	objp = ____cache_alloc(cache, flags);

	/*
	 * We may just have run out of memory on the local node.
	 * ____cache_alloc_node() knows how to locate memory on other nodes
	 */
3342 3343
	if (!objp)
		objp = ____cache_alloc_node(cache, flags, numa_mem_id());
3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358

  out:
	return objp;
}
#else

static __always_inline void *
__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
{
	return ____cache_alloc(cachep, flags);
}

#endif /* CONFIG_NUMA */

static __always_inline void *
3359
slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
3360 3361 3362 3363
{
	unsigned long save_flags;
	void *objp;

3364
	flags &= gfp_allowed_mask;
3365 3366
	cachep = slab_pre_alloc_hook(cachep, flags);
	if (unlikely(!cachep))
3367 3368
		return NULL;

3369 3370 3371 3372 3373 3374 3375
	cache_alloc_debugcheck_before(cachep, flags);
	local_irq_save(save_flags);
	objp = __do_cache_alloc(cachep, flags);
	local_irq_restore(save_flags);
	objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
	prefetchw(objp);

3376 3377
	if (unlikely(flags & __GFP_ZERO) && objp)
		memset(objp, 0, cachep->object_size);
3378

3379
	slab_post_alloc_hook(cachep, flags, 1, &objp);
3380 3381
	return objp;
}
3382 3383

/*
3384
 * Caller needs to acquire correct kmem_cache_node's list_lock
3385
 * @list: List of detached free slabs should be freed by caller
3386
 */
3387 3388
static void free_block(struct kmem_cache *cachep, void **objpp,
			int nr_objects, int node, struct list_head *list)
L
Linus Torvalds 已提交
3389 3390
{
	int i;
3391
	struct kmem_cache_node *n = get_node(cachep, node);
3392 3393 3394
	struct page *page;

	n->free_objects += nr_objects;
L
Linus Torvalds 已提交
3395 3396

	for (i = 0; i < nr_objects; i++) {
3397
		void *objp;
3398
		struct page *page;
L
Linus Torvalds 已提交
3399

3400 3401
		objp = objpp[i];

3402 3403
		page = virt_to_head_page(objp);
		list_del(&page->lru);
3404
		check_spinlock_acquired_node(cachep, node);
3405
		slab_put_obj(cachep, page, objp);
L
Linus Torvalds 已提交
3406 3407 3408
		STATS_DEC_ACTIVE(cachep);

		/* fixup slab chains */
3409
		if (page->active == 0) {
3410
			list_add(&page->lru, &n->slabs_free);
3411 3412
			n->free_slabs++;
		} else {
L
Linus Torvalds 已提交
3413 3414 3415 3416
			/* Unconditionally move a slab to the end of the
			 * partial list on free - maximum time for the
			 * other objects to be freed, too.
			 */
3417
			list_add_tail(&page->lru, &n->slabs_partial);
L
Linus Torvalds 已提交
3418 3419
		}
	}
3420 3421 3422 3423 3424

	while (n->free_objects > n->free_limit && !list_empty(&n->slabs_free)) {
		n->free_objects -= cachep->num;

		page = list_last_entry(&n->slabs_free, struct page, lru);
3425
		list_move(&page->lru, list);
3426
		n->free_slabs--;
3427
		n->total_slabs--;
3428
	}
L
Linus Torvalds 已提交
3429 3430
}

3431
static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
L
Linus Torvalds 已提交
3432 3433
{
	int batchcount;
3434
	struct kmem_cache_node *n;
3435
	int node = numa_mem_id();
3436
	LIST_HEAD(list);
L
Linus Torvalds 已提交
3437 3438

	batchcount = ac->batchcount;
3439

L
Linus Torvalds 已提交
3440
	check_irq_off();
3441
	n = get_node(cachep, node);
3442 3443 3444
	spin_lock(&n->list_lock);
	if (n->shared) {
		struct array_cache *shared_array = n->shared;
P
Pekka Enberg 已提交
3445
		int max = shared_array->limit - shared_array->avail;
L
Linus Torvalds 已提交
3446 3447 3448
		if (max) {
			if (batchcount > max)
				batchcount = max;
3449
			memcpy(&(shared_array->entry[shared_array->avail]),
P
Pekka Enberg 已提交
3450
			       ac->entry, sizeof(void *) * batchcount);
L
Linus Torvalds 已提交
3451 3452 3453 3454 3455
			shared_array->avail += batchcount;
			goto free_done;
		}
	}

3456
	free_block(cachep, ac->entry, batchcount, node, &list);
A
Andrew Morton 已提交
3457
free_done:
L
Linus Torvalds 已提交
3458 3459 3460
#if STATS
	{
		int i = 0;
3461
		struct page *page;
L
Linus Torvalds 已提交
3462

3463
		list_for_each_entry(page, &n->slabs_free, lru) {
3464
			BUG_ON(page->active);
L
Linus Torvalds 已提交
3465 3466 3467 3468 3469 3470

			i++;
		}
		STATS_SET_FREEABLE(cachep, i);
	}
#endif
3471
	spin_unlock(&n->list_lock);
3472
	slabs_destroy(cachep, &list);
L
Linus Torvalds 已提交
3473
	ac->avail -= batchcount;
A
Andrew Morton 已提交
3474
	memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
L
Linus Torvalds 已提交
3475 3476 3477
}

/*
A
Andrew Morton 已提交
3478 3479
 * Release an obj back to its cache. If the obj has a constructed state, it must
 * be in this state _before_ it is released.  Called with disabled ints.
L
Linus Torvalds 已提交
3480
 */
3481 3482
static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp,
					 unsigned long caller)
L
Linus Torvalds 已提交
3483
{
3484
	/* Put the object into the quarantine, don't touch it for now. */
3485
	if (kasan_slab_free(cachep, objp, _RET_IP_))
3486 3487 3488 3489
		return;

	___cache_free(cachep, objp, caller);
}
L
Linus Torvalds 已提交
3490

3491 3492 3493 3494
void ___cache_free(struct kmem_cache *cachep, void *objp,
		unsigned long caller)
{
	struct array_cache *ac = cpu_cache_get(cachep);
A
Alexander Potapenko 已提交
3495

L
Linus Torvalds 已提交
3496
	check_irq_off();
3497
	kmemleak_free_recursive(objp, cachep->flags);
3498
	objp = cache_free_debugcheck(cachep, objp, caller);
L
Linus Torvalds 已提交
3499

3500 3501 3502 3503 3504 3505 3506
	/*
	 * Skip calling cache_free_alien() when the platform is not numa.
	 * This will avoid cache misses that happen while accessing slabp (which
	 * is per page memory  reference) to get nodeid. Instead use a global
	 * variable to skip the call, which is mostly likely to be present in
	 * the cache.
	 */
3507
	if (nr_online_nodes > 1 && cache_free_alien(cachep, objp))
3508 3509
		return;

3510
	if (ac->avail < ac->limit) {
L
Linus Torvalds 已提交
3511 3512 3513 3514 3515
		STATS_INC_FREEHIT(cachep);
	} else {
		STATS_INC_FREEMISS(cachep);
		cache_flusharray(cachep, ac);
	}
Z
Zhao Jin 已提交
3516

3517 3518 3519 3520 3521 3522 3523 3524 3525 3526
	if (sk_memalloc_socks()) {
		struct page *page = virt_to_head_page(objp);

		if (unlikely(PageSlabPfmemalloc(page))) {
			cache_free_pfmemalloc(cachep, page, objp);
			return;
		}
	}

	ac->entry[ac->avail++] = objp;
L
Linus Torvalds 已提交
3527 3528 3529 3530 3531 3532 3533 3534 3535 3536
}

/**
 * kmem_cache_alloc - Allocate an object
 * @cachep: The cache to allocate from.
 * @flags: See kmalloc().
 *
 * Allocate an object from this cache.  The flags are only relevant
 * if the cache has no available objects.
 */
3537
void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
L
Linus Torvalds 已提交
3538
{
3539
	void *ret = slab_alloc(cachep, flags, _RET_IP_);
E
Eduard - Gabriel Munteanu 已提交
3540

3541
	ret = kasan_slab_alloc(cachep, ret, flags);
3542
	trace_kmem_cache_alloc(_RET_IP_, ret,
3543
			       cachep->object_size, cachep->size, flags);
E
Eduard - Gabriel Munteanu 已提交
3544 3545

	return ret;
L
Linus Torvalds 已提交
3546 3547 3548
}
EXPORT_SYMBOL(kmem_cache_alloc);

3549 3550 3551 3552 3553 3554 3555 3556 3557 3558
static __always_inline void
cache_alloc_debugcheck_after_bulk(struct kmem_cache *s, gfp_t flags,
				  size_t size, void **p, unsigned long caller)
{
	size_t i;

	for (i = 0; i < size; i++)
		p[i] = cache_alloc_debugcheck_after(s, flags, p[i], caller);
}

3559
int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
3560
			  void **p)
3561
{
3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579
	size_t i;

	s = slab_pre_alloc_hook(s, flags);
	if (!s)
		return 0;

	cache_alloc_debugcheck_before(s, flags);

	local_irq_disable();
	for (i = 0; i < size; i++) {
		void *objp = __do_cache_alloc(s, flags);

		if (unlikely(!objp))
			goto error;
		p[i] = objp;
	}
	local_irq_enable();

3580 3581
	cache_alloc_debugcheck_after_bulk(s, flags, size, p, _RET_IP_);

3582 3583 3584 3585 3586 3587 3588 3589 3590 3591
	/* Clear memory outside IRQ disabled section */
	if (unlikely(flags & __GFP_ZERO))
		for (i = 0; i < size; i++)
			memset(p[i], 0, s->object_size);

	slab_post_alloc_hook(s, flags, size, p);
	/* FIXME: Trace call missing. Christoph would like a bulk variant */
	return size;
error:
	local_irq_enable();
3592
	cache_alloc_debugcheck_after_bulk(s, flags, i, p, _RET_IP_);
3593 3594 3595
	slab_post_alloc_hook(s, flags, i, p);
	__kmem_cache_free_bulk(s, i, p);
	return 0;
3596 3597 3598
}
EXPORT_SYMBOL(kmem_cache_alloc_bulk);

3599
#ifdef CONFIG_TRACING
3600
void *
3601
kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
E
Eduard - Gabriel Munteanu 已提交
3602
{
3603 3604
	void *ret;

3605
	ret = slab_alloc(cachep, flags, _RET_IP_);
3606

3607
	ret = kasan_kmalloc(cachep, ret, size, flags);
3608
	trace_kmalloc(_RET_IP_, ret,
3609
		      size, cachep->size, flags);
3610
	return ret;
E
Eduard - Gabriel Munteanu 已提交
3611
}
3612
EXPORT_SYMBOL(kmem_cache_alloc_trace);
E
Eduard - Gabriel Munteanu 已提交
3613 3614
#endif

L
Linus Torvalds 已提交
3615
#ifdef CONFIG_NUMA
3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626
/**
 * kmem_cache_alloc_node - Allocate an object on the specified node
 * @cachep: The cache to allocate from.
 * @flags: See kmalloc().
 * @nodeid: node number of the target node.
 *
 * Identical to kmem_cache_alloc but it will allocate memory on the given
 * node, which can improve the performance for cpu bound structures.
 *
 * Fallback to other node is possible if __GFP_THISNODE is not set.
 */
3627 3628
void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
3629
	void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
E
Eduard - Gabriel Munteanu 已提交
3630

3631
	ret = kasan_slab_alloc(cachep, ret, flags);
3632
	trace_kmem_cache_alloc_node(_RET_IP_, ret,
3633
				    cachep->object_size, cachep->size,
3634
				    flags, nodeid);
E
Eduard - Gabriel Munteanu 已提交
3635 3636

	return ret;
3637
}
L
Linus Torvalds 已提交
3638 3639
EXPORT_SYMBOL(kmem_cache_alloc_node);

3640
#ifdef CONFIG_TRACING
3641
void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
3642
				  gfp_t flags,
3643 3644
				  int nodeid,
				  size_t size)
E
Eduard - Gabriel Munteanu 已提交
3645
{
3646 3647
	void *ret;

3648
	ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
3649

3650
	ret = kasan_kmalloc(cachep, ret, size, flags);
3651
	trace_kmalloc_node(_RET_IP_, ret,
3652
			   size, cachep->size,
3653 3654
			   flags, nodeid);
	return ret;
E
Eduard - Gabriel Munteanu 已提交
3655
}
3656
EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
E
Eduard - Gabriel Munteanu 已提交
3657 3658
#endif

3659
static __always_inline void *
3660
__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
3661
{
3662
	struct kmem_cache *cachep;
A
Alexander Potapenko 已提交
3663
	void *ret;
3664

3665 3666
	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
		return NULL;
3667
	cachep = kmalloc_slab(size, flags);
3668 3669
	if (unlikely(ZERO_OR_NULL_PTR(cachep)))
		return cachep;
A
Alexander Potapenko 已提交
3670
	ret = kmem_cache_alloc_node_trace(cachep, flags, node, size);
3671
	ret = kasan_kmalloc(cachep, ret, size, flags);
A
Alexander Potapenko 已提交
3672 3673

	return ret;
3674
}
3675 3676 3677

void *__kmalloc_node(size_t size, gfp_t flags, int node)
{
3678
	return __do_kmalloc_node(size, flags, node, _RET_IP_);
3679
}
3680
EXPORT_SYMBOL(__kmalloc_node);
3681 3682

void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
3683
		int node, unsigned long caller)
3684
{
3685
	return __do_kmalloc_node(size, flags, node, caller);
3686 3687 3688
}
EXPORT_SYMBOL(__kmalloc_node_track_caller);
#endif /* CONFIG_NUMA */
L
Linus Torvalds 已提交
3689 3690

/**
3691
 * __do_kmalloc - allocate memory
L
Linus Torvalds 已提交
3692
 * @size: how many bytes of memory are required.
3693
 * @flags: the type of memory to allocate (see kmalloc).
3694
 * @caller: function caller for debug tracking of the caller
L
Linus Torvalds 已提交
3695
 */
3696
static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3697
					  unsigned long caller)
L
Linus Torvalds 已提交
3698
{
3699
	struct kmem_cache *cachep;
E
Eduard - Gabriel Munteanu 已提交
3700
	void *ret;
L
Linus Torvalds 已提交
3701

3702 3703
	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
		return NULL;
3704
	cachep = kmalloc_slab(size, flags);
3705 3706
	if (unlikely(ZERO_OR_NULL_PTR(cachep)))
		return cachep;
3707
	ret = slab_alloc(cachep, flags, caller);
E
Eduard - Gabriel Munteanu 已提交
3708

3709
	ret = kasan_kmalloc(cachep, ret, size, flags);
3710
	trace_kmalloc(caller, ret,
3711
		      size, cachep->size, flags);
E
Eduard - Gabriel Munteanu 已提交
3712 3713

	return ret;
3714 3715 3716 3717
}

void *__kmalloc(size_t size, gfp_t flags)
{
3718
	return __do_kmalloc(size, flags, _RET_IP_);
L
Linus Torvalds 已提交
3719 3720 3721
}
EXPORT_SYMBOL(__kmalloc);

3722
void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
3723
{
3724
	return __do_kmalloc(size, flags, caller);
3725 3726
}
EXPORT_SYMBOL(__kmalloc_track_caller);
3727

L
Linus Torvalds 已提交
3728 3729 3730 3731 3732 3733 3734 3735
/**
 * kmem_cache_free - Deallocate an object
 * @cachep: The cache the allocation was from.
 * @objp: The previously allocated object.
 *
 * Free an object which was previously allocated from this
 * cache.
 */
3736
void kmem_cache_free(struct kmem_cache *cachep, void *objp)
L
Linus Torvalds 已提交
3737 3738
{
	unsigned long flags;
3739 3740 3741
	cachep = cache_from_obj(cachep, objp);
	if (!cachep)
		return;
L
Linus Torvalds 已提交
3742 3743

	local_irq_save(flags);
3744
	debug_check_no_locks_freed(objp, cachep->object_size);
3745
	if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
3746
		debug_check_no_obj_freed(objp, cachep->object_size);
3747
	__cache_free(cachep, objp, _RET_IP_);
L
Linus Torvalds 已提交
3748
	local_irq_restore(flags);
E
Eduard - Gabriel Munteanu 已提交
3749

3750
	trace_kmem_cache_free(_RET_IP_, objp);
L
Linus Torvalds 已提交
3751 3752 3753
}
EXPORT_SYMBOL(kmem_cache_free);

3754 3755 3756 3757 3758 3759 3760 3761 3762
void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p)
{
	struct kmem_cache *s;
	size_t i;

	local_irq_disable();
	for (i = 0; i < size; i++) {
		void *objp = p[i];

3763 3764 3765 3766
		if (!orig_s) /* called via kfree_bulk */
			s = virt_to_cache(objp);
		else
			s = cache_from_obj(orig_s, objp);
3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779

		debug_check_no_locks_freed(objp, s->object_size);
		if (!(s->flags & SLAB_DEBUG_OBJECTS))
			debug_check_no_obj_freed(objp, s->object_size);

		__cache_free(s, objp, _RET_IP_);
	}
	local_irq_enable();

	/* FIXME: add tracing */
}
EXPORT_SYMBOL(kmem_cache_free_bulk);

L
Linus Torvalds 已提交
3780 3781 3782 3783
/**
 * kfree - free previously allocated memory
 * @objp: pointer returned by kmalloc.
 *
3784 3785
 * If @objp is NULL, no operation is performed.
 *
L
Linus Torvalds 已提交
3786 3787 3788 3789 3790
 * Don't free memory not originally allocated by kmalloc()
 * or you will run into trouble.
 */
void kfree(const void *objp)
{
3791
	struct kmem_cache *c;
L
Linus Torvalds 已提交
3792 3793
	unsigned long flags;

3794 3795
	trace_kfree(_RET_IP_, objp);

3796
	if (unlikely(ZERO_OR_NULL_PTR(objp)))
L
Linus Torvalds 已提交
3797 3798 3799
		return;
	local_irq_save(flags);
	kfree_debugcheck(objp);
3800
	c = virt_to_cache(objp);
3801 3802 3803
	debug_check_no_locks_freed(objp, c->object_size);

	debug_check_no_obj_freed(objp, c->object_size);
3804
	__cache_free(c, (void *)objp, _RET_IP_);
L
Linus Torvalds 已提交
3805 3806 3807 3808
	local_irq_restore(flags);
}
EXPORT_SYMBOL(kfree);

3809
/*
3810
 * This initializes kmem_cache_node or resizes various caches for all nodes.
3811
 */
3812
static int setup_kmem_cache_nodes(struct kmem_cache *cachep, gfp_t gfp)
3813
{
3814
	int ret;
3815
	int node;
3816
	struct kmem_cache_node *n;
3817

3818
	for_each_online_node(node) {
3819 3820
		ret = setup_kmem_cache_node(cachep, node, gfp, true);
		if (ret)
3821 3822 3823
			goto fail;

	}
3824

3825
	return 0;
3826

A
Andrew Morton 已提交
3827
fail:
3828
	if (!cachep->list.next) {
3829 3830 3831
		/* Cache is not active yet. Roll back what we did */
		node--;
		while (node >= 0) {
3832 3833
			n = get_node(cachep, node);
			if (n) {
3834 3835 3836
				kfree(n->shared);
				free_alien_cache(n->alien);
				kfree(n);
3837
				cachep->node[node] = NULL;
3838 3839 3840 3841
			}
			node--;
		}
	}
3842
	return -ENOMEM;
3843 3844
}

3845
/* Always called with the slab_mutex held */
G
Glauber Costa 已提交
3846
static int __do_tune_cpucache(struct kmem_cache *cachep, int limit,
3847
				int batchcount, int shared, gfp_t gfp)
L
Linus Torvalds 已提交
3848
{
3849 3850
	struct array_cache __percpu *cpu_cache, *prev;
	int cpu;
L
Linus Torvalds 已提交
3851

3852 3853
	cpu_cache = alloc_kmem_cache_cpus(cachep, limit, batchcount);
	if (!cpu_cache)
3854 3855
		return -ENOMEM;

3856 3857
	prev = cachep->cpu_cache;
	cachep->cpu_cache = cpu_cache;
3858 3859 3860 3861 3862 3863
	/*
	 * Without a previous cpu_cache there's no need to synchronize remote
	 * cpus, so skip the IPIs.
	 */
	if (prev)
		kick_all_cpus_sync();
3864

L
Linus Torvalds 已提交
3865 3866 3867
	check_irq_on();
	cachep->batchcount = batchcount;
	cachep->limit = limit;
3868
	cachep->shared = shared;
L
Linus Torvalds 已提交
3869

3870
	if (!prev)
3871
		goto setup_node;
3872 3873

	for_each_online_cpu(cpu) {
3874
		LIST_HEAD(list);
3875 3876
		int node;
		struct kmem_cache_node *n;
3877
		struct array_cache *ac = per_cpu_ptr(prev, cpu);
3878

3879
		node = cpu_to_mem(cpu);
3880 3881
		n = get_node(cachep, node);
		spin_lock_irq(&n->list_lock);
3882
		free_block(cachep, ac->entry, ac->avail, node, &list);
3883
		spin_unlock_irq(&n->list_lock);
3884
		slabs_destroy(cachep, &list);
L
Linus Torvalds 已提交
3885
	}
3886 3887
	free_percpu(prev);

3888 3889
setup_node:
	return setup_kmem_cache_nodes(cachep, gfp);
L
Linus Torvalds 已提交
3890 3891
}

G
Glauber Costa 已提交
3892 3893 3894 3895
static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
				int batchcount, int shared, gfp_t gfp)
{
	int ret;
3896
	struct kmem_cache *c;
G
Glauber Costa 已提交
3897 3898 3899 3900 3901 3902 3903 3904 3905

	ret = __do_tune_cpucache(cachep, limit, batchcount, shared, gfp);

	if (slab_state < FULL)
		return ret;

	if ((ret < 0) || !is_root_cache(cachep))
		return ret;

3906 3907 3908 3909
	lockdep_assert_held(&slab_mutex);
	for_each_memcg_cache(c, cachep) {
		/* return value determined by the root cache only */
		__do_tune_cpucache(c, limit, batchcount, shared, gfp);
G
Glauber Costa 已提交
3910 3911 3912 3913 3914
	}

	return ret;
}

3915
/* Called with slab_mutex held always */
3916
static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
L
Linus Torvalds 已提交
3917 3918
{
	int err;
G
Glauber Costa 已提交
3919 3920 3921 3922
	int limit = 0;
	int shared = 0;
	int batchcount = 0;

3923
	err = cache_random_seq_create(cachep, cachep->num, gfp);
T
Thomas Garnier 已提交
3924 3925 3926
	if (err)
		goto end;

G
Glauber Costa 已提交
3927 3928 3929 3930 3931 3932
	if (!is_root_cache(cachep)) {
		struct kmem_cache *root = memcg_root_cache(cachep);
		limit = root->limit;
		shared = root->shared;
		batchcount = root->batchcount;
	}
L
Linus Torvalds 已提交
3933

G
Glauber Costa 已提交
3934 3935
	if (limit && shared && batchcount)
		goto skip_setup;
A
Andrew Morton 已提交
3936 3937
	/*
	 * The head array serves three purposes:
L
Linus Torvalds 已提交
3938 3939
	 * - create a LIFO ordering, i.e. return objects that are cache-warm
	 * - reduce the number of spinlock operations.
A
Andrew Morton 已提交
3940
	 * - reduce the number of linked list operations on the slab and
L
Linus Torvalds 已提交
3941 3942 3943 3944
	 *   bufctl chains: array operations are cheaper.
	 * The numbers are guessed, we should auto-tune as described by
	 * Bonwick.
	 */
3945
	if (cachep->size > 131072)
L
Linus Torvalds 已提交
3946
		limit = 1;
3947
	else if (cachep->size > PAGE_SIZE)
L
Linus Torvalds 已提交
3948
		limit = 8;
3949
	else if (cachep->size > 1024)
L
Linus Torvalds 已提交
3950
		limit = 24;
3951
	else if (cachep->size > 256)
L
Linus Torvalds 已提交
3952 3953 3954 3955
		limit = 54;
	else
		limit = 120;

A
Andrew Morton 已提交
3956 3957
	/*
	 * CPU bound tasks (e.g. network routing) can exhibit cpu bound
L
Linus Torvalds 已提交
3958 3959 3960 3961 3962 3963 3964 3965
	 * allocation behaviour: Most allocs on one cpu, most free operations
	 * on another cpu. For these cases, an efficient object passing between
	 * cpus is necessary. This is provided by a shared array. The array
	 * replaces Bonwick's magazine layer.
	 * On uniprocessor, it's functionally equivalent (but less efficient)
	 * to a larger limit. Thus disabled by default.
	 */
	shared = 0;
3966
	if (cachep->size <= PAGE_SIZE && num_possible_cpus() > 1)
L
Linus Torvalds 已提交
3967 3968 3969
		shared = 8;

#if DEBUG
A
Andrew Morton 已提交
3970 3971 3972
	/*
	 * With debugging enabled, large batchcount lead to excessively long
	 * periods with disabled local interrupts. Limit the batchcount
L
Linus Torvalds 已提交
3973 3974 3975 3976
	 */
	if (limit > 32)
		limit = 32;
#endif
G
Glauber Costa 已提交
3977 3978 3979
	batchcount = (limit + 1) / 2;
skip_setup:
	err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
T
Thomas Garnier 已提交
3980
end:
L
Linus Torvalds 已提交
3981
	if (err)
3982
		pr_err("enable_cpucache failed for %s, error %d\n",
P
Pekka Enberg 已提交
3983
		       cachep->name, -err);
3984
	return err;
L
Linus Torvalds 已提交
3985 3986
}

3987
/*
3988 3989
 * Drain an array if it contains any elements taking the node lock only if
 * necessary. Note that the node listlock also protects the array_cache
3990
 * if drain_array() is used on the shared array.
3991
 */
3992
static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
3993
			 struct array_cache *ac, int node)
L
Linus Torvalds 已提交
3994
{
3995
	LIST_HEAD(list);
3996 3997 3998

	/* ac from n->shared can be freed if we don't hold the slab_mutex. */
	check_mutex_acquired();
L
Linus Torvalds 已提交
3999

4000 4001
	if (!ac || !ac->avail)
		return;
4002 4003

	if (ac->touched) {
L
Linus Torvalds 已提交
4004
		ac->touched = 0;
4005
		return;
L
Linus Torvalds 已提交
4006
	}
4007 4008 4009 4010 4011 4012

	spin_lock_irq(&n->list_lock);
	drain_array_locked(cachep, ac, node, false, &list);
	spin_unlock_irq(&n->list_lock);

	slabs_destroy(cachep, &list);
L
Linus Torvalds 已提交
4013 4014 4015 4016
}

/**
 * cache_reap - Reclaim memory from caches.
4017
 * @w: work descriptor
L
Linus Torvalds 已提交
4018 4019 4020 4021 4022 4023
 *
 * Called from workqueue/eventd every few seconds.
 * Purpose:
 * - clear the per-cpu caches for this CPU.
 * - return freeable pages to the main free memory pool.
 *
A
Andrew Morton 已提交
4024 4025
 * If we cannot acquire the cache chain mutex then just give up - we'll try
 * again on the next iteration.
L
Linus Torvalds 已提交
4026
 */
4027
static void cache_reap(struct work_struct *w)
L
Linus Torvalds 已提交
4028
{
4029
	struct kmem_cache *searchp;
4030
	struct kmem_cache_node *n;
4031
	int node = numa_mem_id();
4032
	struct delayed_work *work = to_delayed_work(w);
L
Linus Torvalds 已提交
4033

4034
	if (!mutex_trylock(&slab_mutex))
L
Linus Torvalds 已提交
4035
		/* Give up. Setup the next iteration. */
4036
		goto out;
L
Linus Torvalds 已提交
4037

4038
	list_for_each_entry(searchp, &slab_caches, list) {
L
Linus Torvalds 已提交
4039 4040
		check_irq_on();

4041
		/*
4042
		 * We only take the node lock if absolutely necessary and we
4043 4044 4045
		 * have established with reasonable certainty that
		 * we can do some work if the lock was obtained.
		 */
4046
		n = get_node(searchp, node);
4047

4048
		reap_alien(searchp, n);
L
Linus Torvalds 已提交
4049

4050
		drain_array(searchp, n, cpu_cache_get(searchp), node);
L
Linus Torvalds 已提交
4051

4052 4053 4054 4055
		/*
		 * These are racy checks but it does not matter
		 * if we skip one check or scan twice.
		 */
4056
		if (time_after(n->next_reap, jiffies))
4057
			goto next;
L
Linus Torvalds 已提交
4058

4059
		n->next_reap = jiffies + REAPTIMEOUT_NODE;
L
Linus Torvalds 已提交
4060

4061
		drain_array(searchp, n, n->shared, node);
L
Linus Torvalds 已提交
4062

4063 4064
		if (n->free_touched)
			n->free_touched = 0;
4065 4066
		else {
			int freed;
L
Linus Torvalds 已提交
4067

4068
			freed = drain_freelist(searchp, n, (n->free_limit +
4069 4070 4071
				5 * searchp->num - 1) / (5 * searchp->num));
			STATS_ADD_REAPED(searchp, freed);
		}
4072
next:
L
Linus Torvalds 已提交
4073 4074 4075
		cond_resched();
	}
	check_irq_on();
4076
	mutex_unlock(&slab_mutex);
4077
	next_reap_node();
4078
out:
A
Andrew Morton 已提交
4079
	/* Set up the next iteration */
4080 4081
	schedule_delayed_work_on(smp_processor_id(), work,
				round_jiffies_relative(REAPTIMEOUT_AC));
L
Linus Torvalds 已提交
4082 4083
}

4084
void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
L
Linus Torvalds 已提交
4085
{
4086
	unsigned long active_objs, num_objs, active_slabs;
4087 4088
	unsigned long total_slabs = 0, free_objs = 0, shared_avail = 0;
	unsigned long free_slabs = 0;
4089
	int node;
4090
	struct kmem_cache_node *n;
L
Linus Torvalds 已提交
4091

4092
	for_each_kmem_cache_node(cachep, node, n) {
4093
		check_irq_on();
4094
		spin_lock_irq(&n->list_lock);
4095

4096 4097
		total_slabs += n->total_slabs;
		free_slabs += n->free_slabs;
4098
		free_objs += n->free_objects;
4099

4100 4101
		if (n->shared)
			shared_avail += n->shared->avail;
4102

4103
		spin_unlock_irq(&n->list_lock);
L
Linus Torvalds 已提交
4104
	}
4105 4106
	num_objs = total_slabs * cachep->num;
	active_slabs = total_slabs - free_slabs;
4107
	active_objs = num_objs - free_objs;
L
Linus Torvalds 已提交
4108

4109 4110 4111
	sinfo->active_objs = active_objs;
	sinfo->num_objs = num_objs;
	sinfo->active_slabs = active_slabs;
4112
	sinfo->num_slabs = total_slabs;
4113 4114 4115 4116 4117 4118 4119 4120 4121 4122
	sinfo->shared_avail = shared_avail;
	sinfo->limit = cachep->limit;
	sinfo->batchcount = cachep->batchcount;
	sinfo->shared = cachep->shared;
	sinfo->objects_per_slab = cachep->num;
	sinfo->cache_order = cachep->gfporder;
}

void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
{
L
Linus Torvalds 已提交
4123
#if STATS
4124
	{			/* node stats */
L
Linus Torvalds 已提交
4125 4126 4127 4128 4129 4130 4131
		unsigned long high = cachep->high_mark;
		unsigned long allocs = cachep->num_allocations;
		unsigned long grown = cachep->grown;
		unsigned long reaped = cachep->reaped;
		unsigned long errors = cachep->errors;
		unsigned long max_freeable = cachep->max_freeable;
		unsigned long node_allocs = cachep->node_allocs;
4132
		unsigned long node_frees = cachep->node_frees;
4133
		unsigned long overflows = cachep->node_overflow;
L
Linus Torvalds 已提交
4134

J
Joe Perches 已提交
4135
		seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu %4lu %4lu %4lu %4lu %4lu",
J
Joe Perches 已提交
4136 4137 4138
			   allocs, high, grown,
			   reaped, errors, max_freeable, node_allocs,
			   node_frees, overflows);
L
Linus Torvalds 已提交
4139 4140 4141 4142 4143 4144 4145 4146 4147
	}
	/* cpu stats */
	{
		unsigned long allochit = atomic_read(&cachep->allochit);
		unsigned long allocmiss = atomic_read(&cachep->allocmiss);
		unsigned long freehit = atomic_read(&cachep->freehit);
		unsigned long freemiss = atomic_read(&cachep->freemiss);

		seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
P
Pekka Enberg 已提交
4148
			   allochit, allocmiss, freehit, freemiss);
L
Linus Torvalds 已提交
4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160
	}
#endif
}

#define MAX_SLABINFO_WRITE 128
/**
 * slabinfo_write - Tuning for the slab allocator
 * @file: unused
 * @buffer: user buffer
 * @count: data length
 * @ppos: unused
 */
4161
ssize_t slabinfo_write(struct file *file, const char __user *buffer,
P
Pekka Enberg 已提交
4162
		       size_t count, loff_t *ppos)
L
Linus Torvalds 已提交
4163
{
P
Pekka Enberg 已提交
4164
	char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
L
Linus Torvalds 已提交
4165
	int limit, batchcount, shared, res;
4166
	struct kmem_cache *cachep;
P
Pekka Enberg 已提交
4167

L
Linus Torvalds 已提交
4168 4169 4170 4171
	if (count > MAX_SLABINFO_WRITE)
		return -EINVAL;
	if (copy_from_user(&kbuf, buffer, count))
		return -EFAULT;
P
Pekka Enberg 已提交
4172
	kbuf[MAX_SLABINFO_WRITE] = '\0';
L
Linus Torvalds 已提交
4173 4174 4175 4176 4177 4178 4179 4180 4181 4182

	tmp = strchr(kbuf, ' ');
	if (!tmp)
		return -EINVAL;
	*tmp = '\0';
	tmp++;
	if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3)
		return -EINVAL;

	/* Find the cache in the chain of caches. */
4183
	mutex_lock(&slab_mutex);
L
Linus Torvalds 已提交
4184
	res = -EINVAL;
4185
	list_for_each_entry(cachep, &slab_caches, list) {
L
Linus Torvalds 已提交
4186
		if (!strcmp(cachep->name, kbuf)) {
A
Andrew Morton 已提交
4187 4188
			if (limit < 1 || batchcount < 1 ||
					batchcount > limit || shared < 0) {
4189
				res = 0;
L
Linus Torvalds 已提交
4190
			} else {
4191
				res = do_tune_cpucache(cachep, limit,
4192 4193
						       batchcount, shared,
						       GFP_KERNEL);
L
Linus Torvalds 已提交
4194 4195 4196 4197
			}
			break;
		}
	}
4198
	mutex_unlock(&slab_mutex);
L
Linus Torvalds 已提交
4199 4200 4201 4202
	if (res >= 0)
		res = count;
	return res;
}
4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235

#ifdef CONFIG_DEBUG_SLAB_LEAK

static inline int add_caller(unsigned long *n, unsigned long v)
{
	unsigned long *p;
	int l;
	if (!v)
		return 1;
	l = n[1];
	p = n + 2;
	while (l) {
		int i = l/2;
		unsigned long *q = p + 2 * i;
		if (*q == v) {
			q[1]++;
			return 1;
		}
		if (*q > v) {
			l = i;
		} else {
			p = q + 2;
			l -= i + 1;
		}
	}
	if (++n[1] == n[0])
		return 0;
	memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n));
	p[0] = v;
	p[1] = 1;
	return 1;
}

4236 4237
static void handle_slab(unsigned long *n, struct kmem_cache *c,
						struct page *page)
4238 4239
{
	void *p;
4240 4241
	int i, j;
	unsigned long v;
4242

4243 4244
	if (n[0] == n[1])
		return;
4245
	for (i = 0, p = page->s_mem; i < c->num; i++, p += c->size) {
4246 4247 4248 4249 4250 4251 4252 4253 4254 4255
		bool active = true;

		for (j = page->active; j < c->num; j++) {
			if (get_free_obj(page, j) == i) {
				active = false;
				break;
			}
		}

		if (!active)
4256
			continue;
4257

4258 4259 4260 4261 4262 4263 4264 4265 4266 4267
		/*
		 * probe_kernel_read() is used for DEBUG_PAGEALLOC. page table
		 * mapping is established when actual object allocation and
		 * we could mistakenly access the unmapped object in the cpu
		 * cache.
		 */
		if (probe_kernel_read(&v, dbg_userword(c, p), sizeof(v)))
			continue;

		if (!add_caller(n, v))
4268 4269 4270 4271 4272 4273 4274 4275
			return;
	}
}

static void show_symbol(struct seq_file *m, unsigned long address)
{
#ifdef CONFIG_KALLSYMS
	unsigned long offset, size;
4276
	char modname[MODULE_NAME_LEN], name[KSYM_NAME_LEN];
4277

4278
	if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) {
4279
		seq_printf(m, "%s+%#lx/%#lx", name, offset, size);
4280
		if (modname[0])
4281 4282 4283 4284
			seq_printf(m, " [%s]", modname);
		return;
	}
#endif
4285
	seq_printf(m, "%px", (void *)address);
4286 4287 4288 4289
}

static int leaks_show(struct seq_file *m, void *p)
{
4290
	struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list);
4291
	struct page *page;
4292
	struct kmem_cache_node *n;
4293
	const char *name;
4294
	unsigned long *x = m->private;
4295 4296 4297 4298 4299 4300 4301 4302
	int node;
	int i;

	if (!(cachep->flags & SLAB_STORE_USER))
		return 0;
	if (!(cachep->flags & SLAB_RED_ZONE))
		return 0;

4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313
	/*
	 * Set store_user_clean and start to grab stored user information
	 * for all objects on this cache. If some alloc/free requests comes
	 * during the processing, information would be wrong so restart
	 * whole processing.
	 */
	do {
		set_store_user_clean(cachep);
		drain_cpu_caches(cachep);

		x[1] = 0;
4314

4315
		for_each_kmem_cache_node(cachep, node, n) {
4316

4317 4318
			check_irq_on();
			spin_lock_irq(&n->list_lock);
4319

4320 4321 4322 4323 4324 4325 4326
			list_for_each_entry(page, &n->slabs_full, lru)
				handle_slab(x, cachep, page);
			list_for_each_entry(page, &n->slabs_partial, lru)
				handle_slab(x, cachep, page);
			spin_unlock_irq(&n->list_lock);
		}
	} while (!is_store_user_clean(cachep));
4327 4328

	name = cachep->name;
4329
	if (x[0] == x[1]) {
4330
		/* Increase the buffer size */
4331
		mutex_unlock(&slab_mutex);
K
Kees Cook 已提交
4332 4333
		m->private = kcalloc(x[0] * 4, sizeof(unsigned long),
				     GFP_KERNEL);
4334 4335
		if (!m->private) {
			/* Too bad, we are really out */
4336
			m->private = x;
4337
			mutex_lock(&slab_mutex);
4338 4339
			return -ENOMEM;
		}
4340 4341
		*(unsigned long *)m->private = x[0] * 2;
		kfree(x);
4342
		mutex_lock(&slab_mutex);
4343 4344 4345 4346
		/* Now make sure this entry will be retried */
		m->count = m->size;
		return 0;
	}
4347 4348 4349
	for (i = 0; i < x[1]; i++) {
		seq_printf(m, "%s: %lu ", name, x[2*i+3]);
		show_symbol(m, x[2*i+2]);
4350 4351
		seq_putc(m, '\n');
	}
4352

4353 4354 4355
	return 0;
}

4356
static const struct seq_operations slabstats_op = {
4357
	.start = slab_start,
4358 4359
	.next = slab_next,
	.stop = slab_stop,
4360 4361
	.show = leaks_show,
};
4362 4363 4364

static int slabstats_open(struct inode *inode, struct file *file)
{
4365 4366 4367 4368 4369 4370 4371 4372 4373
	unsigned long *n;

	n = __seq_open_private(file, &slabstats_op, PAGE_SIZE);
	if (!n)
		return -ENOMEM;

	*n = PAGE_SIZE / (2 * sizeof(unsigned long));

	return 0;
4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387
}

static const struct file_operations proc_slabstats_operations = {
	.open		= slabstats_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= seq_release_private,
};
#endif

static int __init slab_proc_init(void)
{
#ifdef CONFIG_DEBUG_SLAB_LEAK
	proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
4388
#endif
4389 4390 4391
	return 0;
}
module_init(slab_proc_init);
L
Linus Torvalds 已提交
4392

K
Kees Cook 已提交
4393 4394
#ifdef CONFIG_HARDENED_USERCOPY
/*
4395 4396 4397
 * Rejects incorrectly sized objects and objects that are to be copied
 * to/from userspace but do not fall entirely within the containing slab
 * cache's usercopy region.
K
Kees Cook 已提交
4398 4399 4400 4401
 *
 * Returns NULL if check passes, otherwise const char * to name of cache
 * to indicate an error.
 */
4402 4403
void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
			 bool to_user)
K
Kees Cook 已提交
4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416
{
	struct kmem_cache *cachep;
	unsigned int objnr;
	unsigned long offset;

	/* Find and validate object. */
	cachep = page->slab_cache;
	objnr = obj_to_index(cachep, page, (void *)ptr);
	BUG_ON(objnr >= cachep->num);

	/* Find offset within object. */
	offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);

4417 4418 4419 4420
	/* Allow address range falling entirely within usercopy region. */
	if (offset >= cachep->useroffset &&
	    offset - cachep->useroffset <= cachep->usersize &&
	    n <= cachep->useroffset - offset + cachep->usersize)
4421
		return;
K
Kees Cook 已提交
4422

4423 4424 4425 4426 4427 4428
	/*
	 * If the copy is still within the allocated object, produce
	 * a warning instead of rejecting the copy. This is intended
	 * to be a temporary method to find any missing usercopy
	 * whitelists.
	 */
4429 4430
	if (usercopy_fallback &&
	    offset <= cachep->object_size &&
4431 4432 4433 4434
	    n <= cachep->object_size - offset) {
		usercopy_warn("SLAB object", cachep->name, to_user, offset, n);
		return;
	}
K
Kees Cook 已提交
4435

4436
	usercopy_abort("SLAB object", cachep->name, to_user, offset, n);
K
Kees Cook 已提交
4437 4438 4439
}
#endif /* CONFIG_HARDENED_USERCOPY */

4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451
/**
 * ksize - get the actual amount of memory allocated for a given object
 * @objp: Pointer to the object
 *
 * kmalloc may internally round up allocations and return more memory
 * than requested. ksize() can be used to determine the actual amount of
 * memory allocated. The caller may use this additional memory, even though
 * a smaller amount of memory was initially specified with the kmalloc call.
 * The caller must guarantee that objp points to a valid object previously
 * allocated with either kmalloc() or kmem_cache_alloc(). The object
 * must not be freed during the duration of the call.
 */
P
Pekka Enberg 已提交
4452
size_t ksize(const void *objp)
L
Linus Torvalds 已提交
4453
{
A
Alexander Potapenko 已提交
4454 4455
	size_t size;

4456 4457
	BUG_ON(!objp);
	if (unlikely(objp == ZERO_SIZE_PTR))
4458
		return 0;
L
Linus Torvalds 已提交
4459

A
Alexander Potapenko 已提交
4460 4461 4462 4463
	size = virt_to_cache(objp)->object_size;
	/* We assume that ksize callers could use the whole allocated area,
	 * so we need to unpoison this area.
	 */
4464
	kasan_unpoison_shadow(objp, size);
A
Alexander Potapenko 已提交
4465 4466

	return size;
L
Linus Torvalds 已提交
4467
}
K
Kirill A. Shutemov 已提交
4468
EXPORT_SYMBOL(ksize);