slab.c 106.9 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * linux/mm/slab.c
 * Written by Mark Hemment, 1996/97.
 * (markhe@nextd.demon.co.uk)
 *
 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
 *
 * Major cleanup, different bufctl logic, per-cpu arrays
 *	(c) 2000 Manfred Spraul
 *
 * Cleanup, make the head arrays unconditional, preparation for NUMA
 * 	(c) 2002 Manfred Spraul
 *
 * An implementation of the Slab Allocator as described in outline in;
 *	UNIX Internals: The New Frontiers by Uresh Vahalia
 *	Pub: Prentice Hall	ISBN 0-13-101908-2
 * or with a little more detail in;
 *	The Slab Allocator: An Object-Caching Kernel Memory Allocator
 *	Jeff Bonwick (Sun Microsystems).
 *	Presented at: USENIX Summer 1994 Technical Conference
 *
 * The memory is organized in caches, one cache for each object type.
 * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
 * Each cache consists out of many slabs (they are small (usually one
 * page long) and always contiguous), and each slab contains multiple
 * initialized objects.
 *
 * This means, that your constructor is used only for newly allocated
S
Simon Arlott 已提交
29
 * slabs and you must pass objects with the same initializations to
L
Linus Torvalds 已提交
30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52
 * kmem_cache_free.
 *
 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
 * normal). If you need a special memory type, then must create a new
 * cache for that memory type.
 *
 * In order to reduce fragmentation, the slabs are sorted in 3 groups:
 *   full slabs with 0 free objects
 *   partial slabs
 *   empty slabs with no allocated objects
 *
 * If partial slabs exist, then new allocations come from these slabs,
 * otherwise from empty slabs or new slabs are allocated.
 *
 * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
 * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
 *
 * Each cache has a short per-cpu head array, most allocs
 * and frees go into that array, and if that array overflows, then 1/2
 * of the entries in the array are given back into the global cache.
 * The head array is strictly LIFO and should improve the cache hit rates.
 * On SMP, it additionally reduces the spinlock operations.
 *
A
Andrew Morton 已提交
53
 * The c_cpuarray may not be read with enabled local interrupts -
L
Linus Torvalds 已提交
54 55 56 57
 * it's changed with a smp_call_function().
 *
 * SMP synchronization:
 *  constructors and destructors are called without any locking.
58
 *  Several members in struct kmem_cache and struct slab never change, they
L
Linus Torvalds 已提交
59 60 61 62 63 64 65 66 67 68 69 70
 *	are accessed without any locking.
 *  The per-cpu arrays are never accessed from the wrong cpu, no locking,
 *  	and local interrupts are disabled so slab code is preempt-safe.
 *  The non-constant members are protected with a per-cache irq spinlock.
 *
 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
 * in 2000 - many ideas in the current implementation are derived from
 * his patch.
 *
 * Further notes from the original documentation:
 *
 * 11 April '97.  Started multi-threading - markhe
71
 *	The global cache-chain is protected by the mutex 'slab_mutex'.
L
Linus Torvalds 已提交
72 73 74 75 76 77
 *	The sem is only needed when accessing/extending the cache-chain, which
 *	can never happen inside an interrupt (kmem_cache_create(),
 *	kmem_cache_shrink() and kmem_cache_reap()).
 *
 *	At present, each engine can be growing a cache.  This should be blocked.
 *
78 79 80 81 82 83 84 85 86
 * 15 March 2005. NUMA slab allocator.
 *	Shai Fultheim <shai@scalex86.org>.
 *	Shobhit Dayal <shobhit@calsoftinc.com>
 *	Alok N Kataria <alokk@calsoftinc.com>
 *	Christoph Lameter <christoph@lameter.com>
 *
 *	Modified the slab allocator to be node aware on NUMA systems.
 *	Each node has its own list of partial, free and full slabs.
 *	All object allocations for a node occur from node specific slab lists.
L
Linus Torvalds 已提交
87 88 89 90
 */

#include	<linux/slab.h>
#include	<linux/mm.h>
91
#include	<linux/poison.h>
L
Linus Torvalds 已提交
92 93 94 95 96
#include	<linux/swap.h>
#include	<linux/cache.h>
#include	<linux/interrupt.h>
#include	<linux/init.h>
#include	<linux/compiler.h>
97
#include	<linux/cpuset.h>
98
#include	<linux/proc_fs.h>
L
Linus Torvalds 已提交
99 100 101 102 103 104 105
#include	<linux/seq_file.h>
#include	<linux/notifier.h>
#include	<linux/kallsyms.h>
#include	<linux/cpu.h>
#include	<linux/sysctl.h>
#include	<linux/module.h>
#include	<linux/rcupdate.h>
106
#include	<linux/string.h>
107
#include	<linux/uaccess.h>
108
#include	<linux/nodemask.h>
109
#include	<linux/kmemleak.h>
110
#include	<linux/mempolicy.h>
I
Ingo Molnar 已提交
111
#include	<linux/mutex.h>
112
#include	<linux/fault-inject.h>
I
Ingo Molnar 已提交
113
#include	<linux/rtmutex.h>
114
#include	<linux/reciprocal_div.h>
115
#include	<linux/debugobjects.h>
P
Pekka Enberg 已提交
116
#include	<linux/kmemcheck.h>
117
#include	<linux/memory.h>
118
#include	<linux/prefetch.h>
L
Linus Torvalds 已提交
119

120 121
#include	<net/sock.h>

L
Linus Torvalds 已提交
122 123 124 125
#include	<asm/cacheflush.h>
#include	<asm/tlbflush.h>
#include	<asm/page.h>

126 127
#include <trace/events/kmem.h>

128 129
#include	"internal.h"

130 131
#include	"slab.h"

L
Linus Torvalds 已提交
132
/*
133
 * DEBUG	- 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
L
Linus Torvalds 已提交
134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153
 *		  0 for faster, smaller code (especially in the critical paths).
 *
 * STATS	- 1 to collect stats for /proc/slabinfo.
 *		  0 for faster, smaller code (especially in the critical paths).
 *
 * FORCED_DEBUG	- 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
 */

#ifdef CONFIG_DEBUG_SLAB
#define	DEBUG		1
#define	STATS		1
#define	FORCED_DEBUG	1
#else
#define	DEBUG		0
#define	STATS		0
#define	FORCED_DEBUG	0
#endif

/* Shouldn't this be in a header file somewhere? */
#define	BYTES_PER_WORD		sizeof(void *)
D
David Woodhouse 已提交
154
#define	REDZONE_ALIGN		max(BYTES_PER_WORD, __alignof__(unsigned long long))
L
Linus Torvalds 已提交
155 156 157 158 159

#ifndef ARCH_KMALLOC_FLAGS
#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
#endif

160 161 162 163 164 165 166 167 168
#define FREELIST_BYTE_INDEX (((PAGE_SIZE >> BITS_PER_BYTE) \
				<= SLAB_OBJ_MIN_SIZE) ? 1 : 0)

#if FREELIST_BYTE_INDEX
typedef unsigned char freelist_idx_t;
#else
typedef unsigned short freelist_idx_t;
#endif

169
#define SLAB_OBJ_MAX_NUM ((1 << sizeof(freelist_idx_t) * BITS_PER_BYTE) - 1)
170

171 172 173 174 175 176
/*
 * true if a page was allocated from pfmemalloc reserves for network-based
 * swap
 */
static bool pfmemalloc_active __read_mostly;

L
Linus Torvalds 已提交
177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193
/*
 * struct array_cache
 *
 * Purpose:
 * - LIFO ordering, to hand out cache-warm objects from _alloc
 * - reduce the number of linked list operations
 * - reduce spinlock operations
 *
 * The limit is stored in the per-cpu structure to reduce the data cache
 * footprint.
 *
 */
struct array_cache {
	unsigned int avail;
	unsigned int limit;
	unsigned int batchcount;
	unsigned int touched;
194
	void *entry[];	/*
A
Andrew Morton 已提交
195 196 197
			 * Must have this definition in here for the proper
			 * alignment of array_cache. Also simplifies accessing
			 * the entries.
198 199 200 201
			 *
			 * Entries should not be directly dereferenced as
			 * entries belonging to slabs marked pfmemalloc will
			 * have the lower bits set SLAB_OBJ_PFMEMALLOC
A
Andrew Morton 已提交
202
			 */
L
Linus Torvalds 已提交
203 204
};

J
Joonsoo Kim 已提交
205 206 207 208 209
struct alien_cache {
	spinlock_t lock;
	struct array_cache ac;
};

210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226
#define SLAB_OBJ_PFMEMALLOC	1
static inline bool is_obj_pfmemalloc(void *objp)
{
	return (unsigned long)objp & SLAB_OBJ_PFMEMALLOC;
}

static inline void set_obj_pfmemalloc(void **objp)
{
	*objp = (void *)((unsigned long)*objp | SLAB_OBJ_PFMEMALLOC);
	return;
}

static inline void clear_obj_pfmemalloc(void **objp)
{
	*objp = (void *)((unsigned long)*objp & ~SLAB_OBJ_PFMEMALLOC);
}

A
Andrew Morton 已提交
227 228 229
/*
 * bootstrap: The caches do not work without cpuarrays anymore, but the
 * cpuarrays are allocated from the generic caches...
L
Linus Torvalds 已提交
230 231 232 233
 */
#define BOOT_CPUCACHE_ENTRIES	1
struct arraycache_init {
	struct array_cache cache;
P
Pekka Enberg 已提交
234
	void *entries[BOOT_CPUCACHE_ENTRIES];
L
Linus Torvalds 已提交
235 236
};

237 238 239
/*
 * Need this for bootstrapping a per node allocator.
 */
240
#define NUM_INIT_LISTS (2 * MAX_NUMNODES)
241
static struct kmem_cache_node __initdata init_kmem_cache_node[NUM_INIT_LISTS];
242
#define	CACHE_CACHE 0
243
#define	SIZE_NODE (MAX_NUMNODES)
244

245
static int drain_freelist(struct kmem_cache *cache,
246
			struct kmem_cache_node *n, int tofree);
247
static void free_block(struct kmem_cache *cachep, void **objpp, int len,
248 249
			int node, struct list_head *list);
static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list);
250
static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
251
static void cache_reap(struct work_struct *unused);
252

253 254
static int slab_early_init = 1;

255
#define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node))
L
Linus Torvalds 已提交
256

257
static void kmem_cache_node_init(struct kmem_cache_node *parent)
258 259 260 261 262 263
{
	INIT_LIST_HEAD(&parent->slabs_full);
	INIT_LIST_HEAD(&parent->slabs_partial);
	INIT_LIST_HEAD(&parent->slabs_free);
	parent->shared = NULL;
	parent->alien = NULL;
264
	parent->colour_next = 0;
265 266 267 268 269
	spin_lock_init(&parent->list_lock);
	parent->free_objects = 0;
	parent->free_touched = 0;
}

A
Andrew Morton 已提交
270 271 272
#define MAKE_LIST(cachep, listp, slab, nodeid)				\
	do {								\
		INIT_LIST_HEAD(listp);					\
273
		list_splice(&get_node(cachep, nodeid)->slab, listp);	\
274 275
	} while (0)

A
Andrew Morton 已提交
276 277
#define	MAKE_ALL_LISTS(cachep, ptr, nodeid)				\
	do {								\
278 279 280 281
	MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid);	\
	MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
	MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid);	\
	} while (0)
L
Linus Torvalds 已提交
282 283 284 285 286

#define CFLGS_OFF_SLAB		(0x80000000UL)
#define	OFF_SLAB(x)	((x)->flags & CFLGS_OFF_SLAB)

#define BATCHREFILL_LIMIT	16
A
Andrew Morton 已提交
287 288 289
/*
 * Optimization question: fewer reaps means less probability for unnessary
 * cpucache drain/refill cycles.
L
Linus Torvalds 已提交
290
 *
A
Adrian Bunk 已提交
291
 * OTOH the cpuarrays can contain lots of objects,
L
Linus Torvalds 已提交
292 293
 * which could lock up otherwise freeable slabs.
 */
294 295
#define REAPTIMEOUT_AC		(2*HZ)
#define REAPTIMEOUT_NODE	(4*HZ)
L
Linus Torvalds 已提交
296 297 298 299 300 301

#if STATS
#define	STATS_INC_ACTIVE(x)	((x)->num_active++)
#define	STATS_DEC_ACTIVE(x)	((x)->num_active--)
#define	STATS_INC_ALLOCED(x)	((x)->num_allocations++)
#define	STATS_INC_GROWN(x)	((x)->grown++)
302
#define	STATS_ADD_REAPED(x,y)	((x)->reaped += (y))
A
Andrew Morton 已提交
303 304 305 306 307
#define	STATS_SET_HIGH(x)						\
	do {								\
		if ((x)->num_active > (x)->high_mark)			\
			(x)->high_mark = (x)->num_active;		\
	} while (0)
L
Linus Torvalds 已提交
308 309
#define	STATS_INC_ERR(x)	((x)->errors++)
#define	STATS_INC_NODEALLOCS(x)	((x)->node_allocs++)
310
#define	STATS_INC_NODEFREES(x)	((x)->node_frees++)
311
#define STATS_INC_ACOVERFLOW(x)   ((x)->node_overflow++)
A
Andrew Morton 已提交
312 313 314 315 316
#define	STATS_SET_FREEABLE(x, i)					\
	do {								\
		if ((x)->max_freeable < i)				\
			(x)->max_freeable = i;				\
	} while (0)
L
Linus Torvalds 已提交
317 318 319 320 321 322 323 324 325
#define STATS_INC_ALLOCHIT(x)	atomic_inc(&(x)->allochit)
#define STATS_INC_ALLOCMISS(x)	atomic_inc(&(x)->allocmiss)
#define STATS_INC_FREEHIT(x)	atomic_inc(&(x)->freehit)
#define STATS_INC_FREEMISS(x)	atomic_inc(&(x)->freemiss)
#else
#define	STATS_INC_ACTIVE(x)	do { } while (0)
#define	STATS_DEC_ACTIVE(x)	do { } while (0)
#define	STATS_INC_ALLOCED(x)	do { } while (0)
#define	STATS_INC_GROWN(x)	do { } while (0)
326
#define	STATS_ADD_REAPED(x,y)	do { (void)(y); } while (0)
L
Linus Torvalds 已提交
327 328 329
#define	STATS_SET_HIGH(x)	do { } while (0)
#define	STATS_INC_ERR(x)	do { } while (0)
#define	STATS_INC_NODEALLOCS(x)	do { } while (0)
330
#define	STATS_INC_NODEFREES(x)	do { } while (0)
331
#define STATS_INC_ACOVERFLOW(x)   do { } while (0)
A
Andrew Morton 已提交
332
#define	STATS_SET_FREEABLE(x, i) do { } while (0)
L
Linus Torvalds 已提交
333 334 335 336 337 338 339 340
#define STATS_INC_ALLOCHIT(x)	do { } while (0)
#define STATS_INC_ALLOCMISS(x)	do { } while (0)
#define STATS_INC_FREEHIT(x)	do { } while (0)
#define STATS_INC_FREEMISS(x)	do { } while (0)
#endif

#if DEBUG

A
Andrew Morton 已提交
341 342
/*
 * memory layout of objects:
L
Linus Torvalds 已提交
343
 * 0		: objp
344
 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
L
Linus Torvalds 已提交
345 346
 * 		the end of an object is aligned with the end of the real
 * 		allocation. Catches writes behind the end of the allocation.
347
 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
L
Linus Torvalds 已提交
348
 * 		redzone word.
349
 * cachep->obj_offset: The real object.
350 351
 * cachep->size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
 * cachep->size - 1* BYTES_PER_WORD: last caller address
A
Andrew Morton 已提交
352
 *					[BYTES_PER_WORD long]
L
Linus Torvalds 已提交
353
 */
354
static int obj_offset(struct kmem_cache *cachep)
L
Linus Torvalds 已提交
355
{
356
	return cachep->obj_offset;
L
Linus Torvalds 已提交
357 358
}

359
static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
L
Linus Torvalds 已提交
360 361
{
	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
362 363
	return (unsigned long long*) (objp + obj_offset(cachep) -
				      sizeof(unsigned long long));
L
Linus Torvalds 已提交
364 365
}

366
static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
L
Linus Torvalds 已提交
367 368 369
{
	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
	if (cachep->flags & SLAB_STORE_USER)
370
		return (unsigned long long *)(objp + cachep->size -
371
					      sizeof(unsigned long long) -
D
David Woodhouse 已提交
372
					      REDZONE_ALIGN);
373
	return (unsigned long long *) (objp + cachep->size -
374
				       sizeof(unsigned long long));
L
Linus Torvalds 已提交
375 376
}

377
static void **dbg_userword(struct kmem_cache *cachep, void *objp)
L
Linus Torvalds 已提交
378 379
{
	BUG_ON(!(cachep->flags & SLAB_STORE_USER));
380
	return (void **)(objp + cachep->size - BYTES_PER_WORD);
L
Linus Torvalds 已提交
381 382 383 384
}

#else

385
#define obj_offset(x)			0
386 387
#define dbg_redzone1(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
#define dbg_redzone2(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
L
Linus Torvalds 已提交
388 389 390 391
#define dbg_userword(cachep, objp)	({BUG(); (void **)NULL;})

#endif

392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424
#define OBJECT_FREE (0)
#define OBJECT_ACTIVE (1)

#ifdef CONFIG_DEBUG_SLAB_LEAK

static void set_obj_status(struct page *page, int idx, int val)
{
	int freelist_size;
	char *status;
	struct kmem_cache *cachep = page->slab_cache;

	freelist_size = cachep->num * sizeof(freelist_idx_t);
	status = (char *)page->freelist + freelist_size;
	status[idx] = val;
}

static inline unsigned int get_obj_status(struct page *page, int idx)
{
	int freelist_size;
	char *status;
	struct kmem_cache *cachep = page->slab_cache;

	freelist_size = cachep->num * sizeof(freelist_idx_t);
	status = (char *)page->freelist + freelist_size;

	return status[idx];
}

#else
static inline void set_obj_status(struct page *page, int idx, int val) {}

#endif

L
Linus Torvalds 已提交
425
/*
426 427
 * Do not go above this order unless 0 objects fit into the slab or
 * overridden on the command line.
L
Linus Torvalds 已提交
428
 */
429 430 431
#define	SLAB_MAX_ORDER_HI	1
#define	SLAB_MAX_ORDER_LO	0
static int slab_max_order = SLAB_MAX_ORDER_LO;
432
static bool slab_max_order_set __initdata;
L
Linus Torvalds 已提交
433

434 435
static inline struct kmem_cache *virt_to_cache(const void *obj)
{
436
	struct page *page = virt_to_head_page(obj);
C
Christoph Lameter 已提交
437
	return page->slab_cache;
438 439
}

440
static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
441 442
				 unsigned int idx)
{
443
	return page->s_mem + cache->size * idx;
444 445
}

446
/*
447 448 449
 * We want to avoid an expensive divide : (offset / cache->size)
 *   Using the fact that size is a constant for a particular cache,
 *   we can replace (offset / cache->size) by
450 451 452
 *   reciprocal_divide(offset, cache->reciprocal_buffer_size)
 */
static inline unsigned int obj_to_index(const struct kmem_cache *cache,
453
					const struct page *page, void *obj)
454
{
455
	u32 offset = (obj - page->s_mem);
456
	return reciprocal_divide(offset, cache->reciprocal_buffer_size);
457 458
}

L
Linus Torvalds 已提交
459
/* internal cache of cache description objs */
460
static struct kmem_cache kmem_cache_boot = {
P
Pekka Enberg 已提交
461 462 463
	.batchcount = 1,
	.limit = BOOT_CPUCACHE_ENTRIES,
	.shared = 1,
464
	.size = sizeof(struct kmem_cache),
P
Pekka Enberg 已提交
465
	.name = "kmem_cache",
L
Linus Torvalds 已提交
466 467
};

468 469
#define BAD_ALIEN_MAGIC 0x01020304ul

470
static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
L
Linus Torvalds 已提交
471

472
static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
L
Linus Torvalds 已提交
473
{
474
	return this_cpu_ptr(cachep->cpu_cache);
L
Linus Torvalds 已提交
475 476
}

477 478 479 480 481 482 483 484 485 486 487 488 489 490
static size_t calculate_freelist_size(int nr_objs, size_t align)
{
	size_t freelist_size;

	freelist_size = nr_objs * sizeof(freelist_idx_t);
	if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK))
		freelist_size += nr_objs * sizeof(char);

	if (align)
		freelist_size = ALIGN(freelist_size, align);

	return freelist_size;
}

491 492
static int calculate_nr_objs(size_t slab_size, size_t buffer_size,
				size_t idx_size, size_t align)
L
Linus Torvalds 已提交
493
{
494
	int nr_objs;
495
	size_t remained_size;
496
	size_t freelist_size;
497
	int extra_space = 0;
498

499 500
	if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK))
		extra_space = sizeof(char);
501 502 503 504 505 506 507 508
	/*
	 * Ignore padding for the initial guess. The padding
	 * is at most @align-1 bytes, and @buffer_size is at
	 * least @align. In the worst case, this result will
	 * be one greater than the number of objects that fit
	 * into the memory allocation when taking the padding
	 * into account.
	 */
509
	nr_objs = slab_size / (buffer_size + idx_size + extra_space);
510 511 512 513 514

	/*
	 * This calculated number will be either the right
	 * amount, or one greater than what we want.
	 */
515 516 517
	remained_size = slab_size - nr_objs * buffer_size;
	freelist_size = calculate_freelist_size(nr_objs, align);
	if (remained_size < freelist_size)
518 519 520
		nr_objs--;

	return nr_objs;
521
}
L
Linus Torvalds 已提交
522

A
Andrew Morton 已提交
523 524 525
/*
 * Calculate the number of objects and left-over bytes for a given buffer size.
 */
526 527 528 529 530 531 532
static void cache_estimate(unsigned long gfporder, size_t buffer_size,
			   size_t align, int flags, size_t *left_over,
			   unsigned int *num)
{
	int nr_objs;
	size_t mgmt_size;
	size_t slab_size = PAGE_SIZE << gfporder;
L
Linus Torvalds 已提交
533

534 535 536 537 538
	/*
	 * The slab management structure can be either off the slab or
	 * on it. For the latter case, the memory allocated for a
	 * slab is used for:
	 *
J
Joonsoo Kim 已提交
539
	 * - One unsigned int for each object
540 541 542 543 544 545 546 547 548 549 550 551 552
	 * - Padding to respect alignment of @align
	 * - @buffer_size bytes for each object
	 *
	 * If the slab management structure is off the slab, then the
	 * alignment will already be calculated into the size. Because
	 * the slabs are all pages aligned, the objects will be at the
	 * correct alignment when allocated.
	 */
	if (flags & CFLGS_OFF_SLAB) {
		mgmt_size = 0;
		nr_objs = slab_size / buffer_size;

	} else {
553
		nr_objs = calculate_nr_objs(slab_size, buffer_size,
554
					sizeof(freelist_idx_t), align);
555
		mgmt_size = calculate_freelist_size(nr_objs, align);
556 557 558
	}
	*num = nr_objs;
	*left_over = slab_size - nr_objs*buffer_size - mgmt_size;
L
Linus Torvalds 已提交
559 560
}

561
#if DEBUG
562
#define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
L
Linus Torvalds 已提交
563

A
Andrew Morton 已提交
564 565
static void __slab_error(const char *function, struct kmem_cache *cachep,
			char *msg)
L
Linus Torvalds 已提交
566 567
{
	printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
P
Pekka Enberg 已提交
568
	       function, cachep->name, msg);
L
Linus Torvalds 已提交
569
	dump_stack();
570
	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
L
Linus Torvalds 已提交
571
}
572
#endif
L
Linus Torvalds 已提交
573

574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589
/*
 * By default on NUMA we use alien caches to stage the freeing of
 * objects allocated from other nodes. This causes massive memory
 * inefficiencies when using fake NUMA setup to split memory into a
 * large number of small nodes, so it can be disabled on the command
 * line
  */

static int use_alien_caches __read_mostly = 1;
static int __init noaliencache_setup(char *s)
{
	use_alien_caches = 0;
	return 1;
}
__setup("noaliencache", noaliencache_setup);

590 591 592 593 594 595 596 597 598 599 600
static int __init slab_max_order_setup(char *str)
{
	get_option(&str, &slab_max_order);
	slab_max_order = slab_max_order < 0 ? 0 :
				min(slab_max_order, MAX_ORDER - 1);
	slab_max_order_set = true;

	return 1;
}
__setup("slab_max_order=", slab_max_order_setup);

601 602 603 604 605 606 607
#ifdef CONFIG_NUMA
/*
 * Special reaping functions for NUMA systems called from cache_reap().
 * These take care of doing round robin flushing of alien caches (containing
 * objects freed on different nodes from which they were allocated) and the
 * flushing of remote pcps by calling drain_node_pages.
 */
608
static DEFINE_PER_CPU(unsigned long, slab_reap_node);
609 610 611 612 613

static void init_reap_node(int cpu)
{
	int node;

614
	node = next_node(cpu_to_mem(cpu), node_online_map);
615
	if (node == MAX_NUMNODES)
616
		node = first_node(node_online_map);
617

618
	per_cpu(slab_reap_node, cpu) = node;
619 620 621 622
}

static void next_reap_node(void)
{
623
	int node = __this_cpu_read(slab_reap_node);
624 625 626 627

	node = next_node(node, node_online_map);
	if (unlikely(node >= MAX_NUMNODES))
		node = first_node(node_online_map);
628
	__this_cpu_write(slab_reap_node, node);
629 630 631 632 633 634 635
}

#else
#define init_reap_node(cpu) do { } while (0)
#define next_reap_node(void) do { } while (0)
#endif

L
Linus Torvalds 已提交
636 637 638 639 640 641 642
/*
 * Initiate the reap timer running on the target CPU.  We run at around 1 to 2Hz
 * via the workqueue/eventd.
 * Add the CPU number into the expiration time to minimize the possibility of
 * the CPUs getting into lockstep and contending for the global cache chain
 * lock.
 */
643
static void start_cpu_timer(int cpu)
L
Linus Torvalds 已提交
644
{
645
	struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
L
Linus Torvalds 已提交
646 647 648 649 650 651

	/*
	 * When this gets called from do_initcalls via cpucache_init(),
	 * init_workqueues() has already run, so keventd will be setup
	 * at that time.
	 */
652
	if (keventd_up() && reap_work->work.func == NULL) {
653
		init_reap_node(cpu);
654
		INIT_DEFERRABLE_WORK(reap_work, cache_reap);
655 656
		schedule_delayed_work_on(cpu, reap_work,
					__round_jiffies_relative(HZ, cpu));
L
Linus Torvalds 已提交
657 658 659
	}
}

660
static void init_arraycache(struct array_cache *ac, int limit, int batch)
L
Linus Torvalds 已提交
661
{
662 663
	/*
	 * The array_cache structures contain pointers to free object.
L
Lucas De Marchi 已提交
664
	 * However, when such objects are allocated or transferred to another
665 666 667 668
	 * cache the pointers are not cleared and they could be counted as
	 * valid references during a kmemleak scan. Therefore, kmemleak must
	 * not scan such objects.
	 */
669 670 671 672 673 674
	kmemleak_no_scan(ac);
	if (ac) {
		ac->avail = 0;
		ac->limit = limit;
		ac->batchcount = batch;
		ac->touched = 0;
L
Linus Torvalds 已提交
675
	}
676 677 678 679 680
}

static struct array_cache *alloc_arraycache(int node, int entries,
					    int batchcount, gfp_t gfp)
{
681
	size_t memsize = sizeof(void *) * entries + sizeof(struct array_cache);
682 683 684 685 686
	struct array_cache *ac = NULL;

	ac = kmalloc_node(memsize, gfp, node);
	init_arraycache(ac, entries, batchcount);
	return ac;
L
Linus Torvalds 已提交
687 688
}

689
static inline bool is_slab_pfmemalloc(struct page *page)
690 691 692 693 694 695 696 697
{
	return PageSlabPfmemalloc(page);
}

/* Clears pfmemalloc_active if no slabs have pfmalloc set */
static void recheck_pfmemalloc_active(struct kmem_cache *cachep,
						struct array_cache *ac)
{
698
	struct kmem_cache_node *n = get_node(cachep, numa_mem_id());
699
	struct page *page;
700 701 702 703 704
	unsigned long flags;

	if (!pfmemalloc_active)
		return;

705
	spin_lock_irqsave(&n->list_lock, flags);
706 707
	list_for_each_entry(page, &n->slabs_full, lru)
		if (is_slab_pfmemalloc(page))
708 709
			goto out;

710 711
	list_for_each_entry(page, &n->slabs_partial, lru)
		if (is_slab_pfmemalloc(page))
712 713
			goto out;

714 715
	list_for_each_entry(page, &n->slabs_free, lru)
		if (is_slab_pfmemalloc(page))
716 717 718 719
			goto out;

	pfmemalloc_active = false;
out:
720
	spin_unlock_irqrestore(&n->list_lock, flags);
721 722
}

723
static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac,
724 725 726 727 728 729 730
						gfp_t flags, bool force_refill)
{
	int i;
	void *objp = ac->entry[--ac->avail];

	/* Ensure the caller is allowed to use objects from PFMEMALLOC slab */
	if (unlikely(is_obj_pfmemalloc(objp))) {
731
		struct kmem_cache_node *n;
732 733 734 735 736 737 738

		if (gfp_pfmemalloc_allowed(flags)) {
			clear_obj_pfmemalloc(&objp);
			return objp;
		}

		/* The caller cannot use PFMEMALLOC objects, find another one */
739
		for (i = 0; i < ac->avail; i++) {
740 741 742 743 744 745 746 747 748 749 750 751 752
			/* If a !PFMEMALLOC object is found, swap them */
			if (!is_obj_pfmemalloc(ac->entry[i])) {
				objp = ac->entry[i];
				ac->entry[i] = ac->entry[ac->avail];
				ac->entry[ac->avail] = objp;
				return objp;
			}
		}

		/*
		 * If there are empty slabs on the slabs_free list and we are
		 * being forced to refill the cache, mark this one !pfmemalloc.
		 */
753
		n = get_node(cachep, numa_mem_id());
754
		if (!list_empty(&n->slabs_free) && force_refill) {
755
			struct page *page = virt_to_head_page(objp);
756
			ClearPageSlabPfmemalloc(page);
757 758 759 760 761 762 763 764 765 766 767 768 769
			clear_obj_pfmemalloc(&objp);
			recheck_pfmemalloc_active(cachep, ac);
			return objp;
		}

		/* No !PFMEMALLOC objects available */
		ac->avail++;
		objp = NULL;
	}

	return objp;
}

770 771 772 773 774 775 776 777 778 779 780 781 782
static inline void *ac_get_obj(struct kmem_cache *cachep,
			struct array_cache *ac, gfp_t flags, bool force_refill)
{
	void *objp;

	if (unlikely(sk_memalloc_socks()))
		objp = __ac_get_obj(cachep, ac, flags, force_refill);
	else
		objp = ac->entry[--ac->avail];

	return objp;
}

J
Joonsoo Kim 已提交
783 784
static noinline void *__ac_put_obj(struct kmem_cache *cachep,
			struct array_cache *ac, void *objp)
785 786 787
{
	if (unlikely(pfmemalloc_active)) {
		/* Some pfmemalloc slabs exist, check if this is one */
788
		struct page *page = virt_to_head_page(objp);
789 790 791 792
		if (PageSlabPfmemalloc(page))
			set_obj_pfmemalloc(&objp);
	}

793 794 795 796 797 798 799 800 801
	return objp;
}

static inline void ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac,
								void *objp)
{
	if (unlikely(sk_memalloc_socks()))
		objp = __ac_put_obj(cachep, ac, objp);

802 803 804
	ac->entry[ac->avail++] = objp;
}

805 806 807 808 809 810 811 812 813 814
/*
 * Transfer objects in one arraycache to another.
 * Locking must be handled by the caller.
 *
 * Return the number of entries transferred.
 */
static int transfer_objects(struct array_cache *to,
		struct array_cache *from, unsigned int max)
{
	/* Figure out how many entries to transfer */
815
	int nr = min3(from->avail, max, to->limit - to->avail);
816 817 818 819 820 821 822 823 824 825 826 827

	if (!nr)
		return 0;

	memcpy(to->entry + to->avail, from->entry + from->avail -nr,
			sizeof(void *) *nr);

	from->avail -= nr;
	to->avail += nr;
	return nr;
}

828 829 830
#ifndef CONFIG_NUMA

#define drain_alien_cache(cachep, alien) do { } while (0)
831
#define reap_alien(cachep, n) do { } while (0)
832

J
Joonsoo Kim 已提交
833 834
static inline struct alien_cache **alloc_alien_cache(int node,
						int limit, gfp_t gfp)
835
{
836
	return (struct alien_cache **)BAD_ALIEN_MAGIC;
837 838
}

J
Joonsoo Kim 已提交
839
static inline void free_alien_cache(struct alien_cache **ac_ptr)
840 841 842 843 844 845 846 847 848 849 850 851 852 853
{
}

static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
{
	return 0;
}

static inline void *alternate_node_alloc(struct kmem_cache *cachep,
		gfp_t flags)
{
	return NULL;
}

854
static inline void *____cache_alloc_node(struct kmem_cache *cachep,
855 856 857 858 859
		 gfp_t flags, int nodeid)
{
	return NULL;
}

D
David Rientjes 已提交
860 861 862 863 864
static inline gfp_t gfp_exact_node(gfp_t flags)
{
	return flags;
}

865 866
#else	/* CONFIG_NUMA */

867
static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
868
static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
869

J
Joonsoo Kim 已提交
870 871 872
static struct alien_cache *__alloc_alien_cache(int node, int entries,
						int batch, gfp_t gfp)
{
873
	size_t memsize = sizeof(void *) * entries + sizeof(struct alien_cache);
J
Joonsoo Kim 已提交
874 875 876 877
	struct alien_cache *alc = NULL;

	alc = kmalloc_node(memsize, gfp, node);
	init_arraycache(&alc->ac, entries, batch);
878
	spin_lock_init(&alc->lock);
J
Joonsoo Kim 已提交
879 880 881 882
	return alc;
}

static struct alien_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
883
{
J
Joonsoo Kim 已提交
884
	struct alien_cache **alc_ptr;
885
	size_t memsize = sizeof(void *) * nr_node_ids;
886 887 888 889
	int i;

	if (limit > 1)
		limit = 12;
J
Joonsoo Kim 已提交
890 891 892 893 894 895 896 897 898 899 900 901 902
	alc_ptr = kzalloc_node(memsize, gfp, node);
	if (!alc_ptr)
		return NULL;

	for_each_node(i) {
		if (i == node || !node_online(i))
			continue;
		alc_ptr[i] = __alloc_alien_cache(node, limit, 0xbaadf00d, gfp);
		if (!alc_ptr[i]) {
			for (i--; i >= 0; i--)
				kfree(alc_ptr[i]);
			kfree(alc_ptr);
			return NULL;
903 904
		}
	}
J
Joonsoo Kim 已提交
905
	return alc_ptr;
906 907
}

J
Joonsoo Kim 已提交
908
static void free_alien_cache(struct alien_cache **alc_ptr)
909 910 911
{
	int i;

J
Joonsoo Kim 已提交
912
	if (!alc_ptr)
913 914
		return;
	for_each_node(i)
J
Joonsoo Kim 已提交
915 916
	    kfree(alc_ptr[i]);
	kfree(alc_ptr);
917 918
}

919
static void __drain_alien_cache(struct kmem_cache *cachep,
920 921
				struct array_cache *ac, int node,
				struct list_head *list)
922
{
923
	struct kmem_cache_node *n = get_node(cachep, node);
924 925

	if (ac->avail) {
926
		spin_lock(&n->list_lock);
927 928 929 930 931
		/*
		 * Stuff objects into the remote nodes shared array first.
		 * That way we could avoid the overhead of putting the objects
		 * into the free lists and getting them back later.
		 */
932 933
		if (n->shared)
			transfer_objects(n->shared, ac, ac->limit);
934

935
		free_block(cachep, ac->entry, ac->avail, node, list);
936
		ac->avail = 0;
937
		spin_unlock(&n->list_lock);
938 939 940
	}
}

941 942 943
/*
 * Called from cache_reap() to regularly drain alien caches round robin.
 */
944
static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n)
945
{
946
	int node = __this_cpu_read(slab_reap_node);
947

948
	if (n->alien) {
J
Joonsoo Kim 已提交
949 950 951 952 953
		struct alien_cache *alc = n->alien[node];
		struct array_cache *ac;

		if (alc) {
			ac = &alc->ac;
954
			if (ac->avail && spin_trylock_irq(&alc->lock)) {
955 956 957
				LIST_HEAD(list);

				__drain_alien_cache(cachep, ac, node, &list);
958
				spin_unlock_irq(&alc->lock);
959
				slabs_destroy(cachep, &list);
J
Joonsoo Kim 已提交
960
			}
961 962 963 964
		}
	}
}

A
Andrew Morton 已提交
965
static void drain_alien_cache(struct kmem_cache *cachep,
J
Joonsoo Kim 已提交
966
				struct alien_cache **alien)
967
{
P
Pekka Enberg 已提交
968
	int i = 0;
J
Joonsoo Kim 已提交
969
	struct alien_cache *alc;
970 971 972 973
	struct array_cache *ac;
	unsigned long flags;

	for_each_online_node(i) {
J
Joonsoo Kim 已提交
974 975
		alc = alien[i];
		if (alc) {
976 977
			LIST_HEAD(list);

J
Joonsoo Kim 已提交
978
			ac = &alc->ac;
979
			spin_lock_irqsave(&alc->lock, flags);
980
			__drain_alien_cache(cachep, ac, i, &list);
981
			spin_unlock_irqrestore(&alc->lock, flags);
982
			slabs_destroy(cachep, &list);
983 984 985
		}
	}
}
986

987 988
static int __cache_free_alien(struct kmem_cache *cachep, void *objp,
				int node, int page_node)
989
{
990
	struct kmem_cache_node *n;
J
Joonsoo Kim 已提交
991 992
	struct alien_cache *alien = NULL;
	struct array_cache *ac;
993
	LIST_HEAD(list);
P
Pekka Enberg 已提交
994

995
	n = get_node(cachep, node);
996
	STATS_INC_NODEFREES(cachep);
997 998
	if (n->alien && n->alien[page_node]) {
		alien = n->alien[page_node];
J
Joonsoo Kim 已提交
999
		ac = &alien->ac;
1000
		spin_lock(&alien->lock);
J
Joonsoo Kim 已提交
1001
		if (unlikely(ac->avail == ac->limit)) {
1002
			STATS_INC_ACOVERFLOW(cachep);
1003
			__drain_alien_cache(cachep, ac, page_node, &list);
1004
		}
J
Joonsoo Kim 已提交
1005
		ac_put_obj(cachep, ac, objp);
1006
		spin_unlock(&alien->lock);
1007
		slabs_destroy(cachep, &list);
1008
	} else {
1009
		n = get_node(cachep, page_node);
1010
		spin_lock(&n->list_lock);
1011
		free_block(cachep, &objp, 1, page_node, &list);
1012
		spin_unlock(&n->list_lock);
1013
		slabs_destroy(cachep, &list);
1014 1015 1016
	}
	return 1;
}
1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030

static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
{
	int page_node = page_to_nid(virt_to_page(objp));
	int node = numa_mem_id();
	/*
	 * Make sure we are not freeing a object from another node to the array
	 * cache on this cpu.
	 */
	if (likely(node == page_node))
		return 0;

	return __cache_free_alien(cachep, objp, node, page_node);
}
D
David Rientjes 已提交
1031 1032 1033 1034 1035 1036 1037 1038 1039

/*
 * Construct gfp mask to allocate from a specific node but do not invoke reclaim
 * or warn about failures.
 */
static inline gfp_t gfp_exact_node(gfp_t flags)
{
	return (flags | __GFP_THISNODE | __GFP_NOWARN) & ~__GFP_WAIT;
}
1040 1041
#endif

1042
/*
1043
 * Allocates and initializes node for a node on each slab cache, used for
1044
 * either memory or cpu hotplug.  If memory is being hot-added, the kmem_cache_node
1045
 * will be allocated off-node since memory is not yet online for the new node.
1046
 * When hotplugging memory or a cpu, existing node are not replaced if
1047 1048
 * already in use.
 *
1049
 * Must hold slab_mutex.
1050
 */
1051
static int init_cache_node_node(int node)
1052 1053
{
	struct kmem_cache *cachep;
1054
	struct kmem_cache_node *n;
1055
	const size_t memsize = sizeof(struct kmem_cache_node);
1056

1057
	list_for_each_entry(cachep, &slab_caches, list) {
1058
		/*
1059
		 * Set up the kmem_cache_node for cpu before we can
1060 1061 1062
		 * begin anything. Make sure some other cpu on this
		 * node has not already allocated this
		 */
1063 1064
		n = get_node(cachep, node);
		if (!n) {
1065 1066
			n = kmalloc_node(memsize, GFP_KERNEL, node);
			if (!n)
1067
				return -ENOMEM;
1068
			kmem_cache_node_init(n);
1069 1070
			n->next_reap = jiffies + REAPTIMEOUT_NODE +
			    ((unsigned long)cachep) % REAPTIMEOUT_NODE;
1071 1072

			/*
1073 1074
			 * The kmem_cache_nodes don't come and go as CPUs
			 * come and go.  slab_mutex is sufficient
1075 1076
			 * protection here.
			 */
1077
			cachep->node[node] = n;
1078 1079
		}

1080 1081
		spin_lock_irq(&n->list_lock);
		n->free_limit =
1082 1083
			(1 + nr_cpus_node(node)) *
			cachep->batchcount + cachep->num;
1084
		spin_unlock_irq(&n->list_lock);
1085 1086 1087 1088
	}
	return 0;
}

1089 1090 1091 1092 1093 1094
static inline int slabs_tofree(struct kmem_cache *cachep,
						struct kmem_cache_node *n)
{
	return (n->free_objects + cachep->num - 1) / cachep->num;
}

1095
static void cpuup_canceled(long cpu)
1096 1097
{
	struct kmem_cache *cachep;
1098
	struct kmem_cache_node *n = NULL;
1099
	int node = cpu_to_mem(cpu);
1100
	const struct cpumask *mask = cpumask_of_node(node);
1101

1102
	list_for_each_entry(cachep, &slab_caches, list) {
1103 1104
		struct array_cache *nc;
		struct array_cache *shared;
J
Joonsoo Kim 已提交
1105
		struct alien_cache **alien;
1106
		LIST_HEAD(list);
1107

1108
		n = get_node(cachep, node);
1109
		if (!n)
1110
			continue;
1111

1112
		spin_lock_irq(&n->list_lock);
1113

1114 1115
		/* Free limit for this kmem_cache_node */
		n->free_limit -= cachep->batchcount;
1116 1117 1118 1119

		/* cpu is dead; no one can alloc from it. */
		nc = per_cpu_ptr(cachep->cpu_cache, cpu);
		if (nc) {
1120
			free_block(cachep, nc->entry, nc->avail, node, &list);
1121 1122
			nc->avail = 0;
		}
1123

1124
		if (!cpumask_empty(mask)) {
1125
			spin_unlock_irq(&n->list_lock);
1126
			goto free_slab;
1127 1128
		}

1129
		shared = n->shared;
1130 1131
		if (shared) {
			free_block(cachep, shared->entry,
1132
				   shared->avail, node, &list);
1133
			n->shared = NULL;
1134 1135
		}

1136 1137
		alien = n->alien;
		n->alien = NULL;
1138

1139
		spin_unlock_irq(&n->list_lock);
1140 1141 1142 1143 1144 1145

		kfree(shared);
		if (alien) {
			drain_alien_cache(cachep, alien);
			free_alien_cache(alien);
		}
1146 1147

free_slab:
1148
		slabs_destroy(cachep, &list);
1149 1150 1151 1152 1153 1154
	}
	/*
	 * In the previous loop, all the objects were freed to
	 * the respective cache's slabs,  now we can go ahead and
	 * shrink each nodelist to its limit.
	 */
1155
	list_for_each_entry(cachep, &slab_caches, list) {
1156
		n = get_node(cachep, node);
1157
		if (!n)
1158
			continue;
1159
		drain_freelist(cachep, n, slabs_tofree(cachep, n));
1160 1161 1162
	}
}

1163
static int cpuup_prepare(long cpu)
L
Linus Torvalds 已提交
1164
{
1165
	struct kmem_cache *cachep;
1166
	struct kmem_cache_node *n = NULL;
1167
	int node = cpu_to_mem(cpu);
1168
	int err;
L
Linus Torvalds 已提交
1169

1170 1171 1172 1173
	/*
	 * We need to do this right in the beginning since
	 * alloc_arraycache's are going to use this list.
	 * kmalloc_node allows us to add the slab to the right
1174
	 * kmem_cache_node and not this cpu's kmem_cache_node
1175
	 */
1176
	err = init_cache_node_node(node);
1177 1178
	if (err < 0)
		goto bad;
1179 1180 1181 1182 1183

	/*
	 * Now we can go ahead with allocating the shared arrays and
	 * array caches
	 */
1184
	list_for_each_entry(cachep, &slab_caches, list) {
1185
		struct array_cache *shared = NULL;
J
Joonsoo Kim 已提交
1186
		struct alien_cache **alien = NULL;
1187 1188 1189 1190

		if (cachep->shared) {
			shared = alloc_arraycache(node,
				cachep->shared * cachep->batchcount,
1191
				0xbaadf00d, GFP_KERNEL);
1192
			if (!shared)
L
Linus Torvalds 已提交
1193
				goto bad;
1194 1195
		}
		if (use_alien_caches) {
1196
			alien = alloc_alien_cache(node, cachep->limit, GFP_KERNEL);
1197 1198
			if (!alien) {
				kfree(shared);
1199
				goto bad;
1200
			}
1201
		}
1202
		n = get_node(cachep, node);
1203
		BUG_ON(!n);
1204

1205 1206
		spin_lock_irq(&n->list_lock);
		if (!n->shared) {
1207 1208 1209 1210
			/*
			 * We are serialised from CPU_DEAD or
			 * CPU_UP_CANCELLED by the cpucontrol lock
			 */
1211
			n->shared = shared;
1212 1213
			shared = NULL;
		}
1214
#ifdef CONFIG_NUMA
1215 1216
		if (!n->alien) {
			n->alien = alien;
1217
			alien = NULL;
L
Linus Torvalds 已提交
1218
		}
1219
#endif
1220
		spin_unlock_irq(&n->list_lock);
1221 1222 1223
		kfree(shared);
		free_alien_cache(alien);
	}
1224

1225 1226
	return 0;
bad:
1227
	cpuup_canceled(cpu);
1228 1229 1230
	return -ENOMEM;
}

1231
static int cpuup_callback(struct notifier_block *nfb,
1232 1233 1234 1235 1236 1237 1238 1239
				    unsigned long action, void *hcpu)
{
	long cpu = (long)hcpu;
	int err = 0;

	switch (action) {
	case CPU_UP_PREPARE:
	case CPU_UP_PREPARE_FROZEN:
1240
		mutex_lock(&slab_mutex);
1241
		err = cpuup_prepare(cpu);
1242
		mutex_unlock(&slab_mutex);
L
Linus Torvalds 已提交
1243 1244
		break;
	case CPU_ONLINE:
1245
	case CPU_ONLINE_FROZEN:
L
Linus Torvalds 已提交
1246 1247 1248
		start_cpu_timer(cpu);
		break;
#ifdef CONFIG_HOTPLUG_CPU
1249
  	case CPU_DOWN_PREPARE:
1250
  	case CPU_DOWN_PREPARE_FROZEN:
1251
		/*
1252
		 * Shutdown cache reaper. Note that the slab_mutex is
1253 1254 1255 1256
		 * held so that if cache_reap() is invoked it cannot do
		 * anything expensive but will only modify reap_work
		 * and reschedule the timer.
		*/
1257
		cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu));
1258
		/* Now the cache_reaper is guaranteed to be not running. */
1259
		per_cpu(slab_reap_work, cpu).work.func = NULL;
1260 1261
  		break;
  	case CPU_DOWN_FAILED:
1262
  	case CPU_DOWN_FAILED_FROZEN:
1263 1264
		start_cpu_timer(cpu);
  		break;
L
Linus Torvalds 已提交
1265
	case CPU_DEAD:
1266
	case CPU_DEAD_FROZEN:
1267 1268
		/*
		 * Even if all the cpus of a node are down, we don't free the
1269
		 * kmem_cache_node of any cache. This to avoid a race between
1270
		 * cpu_down, and a kmalloc allocation from another cpu for
1271
		 * memory from the node of the cpu going down.  The node
1272 1273 1274
		 * structure is usually allocated from kmem_cache_create() and
		 * gets destroyed at kmem_cache_destroy().
		 */
S
Simon Arlott 已提交
1275
		/* fall through */
1276
#endif
L
Linus Torvalds 已提交
1277
	case CPU_UP_CANCELED:
1278
	case CPU_UP_CANCELED_FROZEN:
1279
		mutex_lock(&slab_mutex);
1280
		cpuup_canceled(cpu);
1281
		mutex_unlock(&slab_mutex);
L
Linus Torvalds 已提交
1282 1283
		break;
	}
1284
	return notifier_from_errno(err);
L
Linus Torvalds 已提交
1285 1286
}

1287
static struct notifier_block cpucache_notifier = {
1288 1289
	&cpuup_callback, NULL, 0
};
L
Linus Torvalds 已提交
1290

1291 1292 1293 1294 1295 1296
#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
/*
 * Drains freelist for a node on each slab cache, used for memory hot-remove.
 * Returns -EBUSY if all objects cannot be drained so that the node is not
 * removed.
 *
1297
 * Must hold slab_mutex.
1298
 */
1299
static int __meminit drain_cache_node_node(int node)
1300 1301 1302 1303
{
	struct kmem_cache *cachep;
	int ret = 0;

1304
	list_for_each_entry(cachep, &slab_caches, list) {
1305
		struct kmem_cache_node *n;
1306

1307
		n = get_node(cachep, node);
1308
		if (!n)
1309 1310
			continue;

1311
		drain_freelist(cachep, n, slabs_tofree(cachep, n));
1312

1313 1314
		if (!list_empty(&n->slabs_full) ||
		    !list_empty(&n->slabs_partial)) {
1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334
			ret = -EBUSY;
			break;
		}
	}
	return ret;
}

static int __meminit slab_memory_callback(struct notifier_block *self,
					unsigned long action, void *arg)
{
	struct memory_notify *mnb = arg;
	int ret = 0;
	int nid;

	nid = mnb->status_change_nid;
	if (nid < 0)
		goto out;

	switch (action) {
	case MEM_GOING_ONLINE:
1335
		mutex_lock(&slab_mutex);
1336
		ret = init_cache_node_node(nid);
1337
		mutex_unlock(&slab_mutex);
1338 1339
		break;
	case MEM_GOING_OFFLINE:
1340
		mutex_lock(&slab_mutex);
1341
		ret = drain_cache_node_node(nid);
1342
		mutex_unlock(&slab_mutex);
1343 1344 1345 1346 1347 1348 1349 1350
		break;
	case MEM_ONLINE:
	case MEM_OFFLINE:
	case MEM_CANCEL_ONLINE:
	case MEM_CANCEL_OFFLINE:
		break;
	}
out:
1351
	return notifier_from_errno(ret);
1352 1353 1354
}
#endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */

1355
/*
1356
 * swap the static kmem_cache_node with kmalloced memory
1357
 */
1358
static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *list,
1359
				int nodeid)
1360
{
1361
	struct kmem_cache_node *ptr;
1362

1363
	ptr = kmalloc_node(sizeof(struct kmem_cache_node), GFP_NOWAIT, nodeid);
1364 1365
	BUG_ON(!ptr);

1366
	memcpy(ptr, list, sizeof(struct kmem_cache_node));
1367 1368 1369 1370 1371
	/*
	 * Do not assume that spinlocks can be initialized via memcpy:
	 */
	spin_lock_init(&ptr->list_lock);

1372
	MAKE_ALL_LISTS(cachep, ptr, nodeid);
1373
	cachep->node[nodeid] = ptr;
1374 1375
}

1376
/*
1377 1378
 * For setting up all the kmem_cache_node for cache whose buffer_size is same as
 * size of kmem_cache_node.
1379
 */
1380
static void __init set_up_node(struct kmem_cache *cachep, int index)
1381 1382 1383 1384
{
	int node;

	for_each_online_node(node) {
1385
		cachep->node[node] = &init_kmem_cache_node[index + node];
1386
		cachep->node[node]->next_reap = jiffies +
1387 1388
		    REAPTIMEOUT_NODE +
		    ((unsigned long)cachep) % REAPTIMEOUT_NODE;
1389 1390 1391
	}
}

A
Andrew Morton 已提交
1392 1393 1394
/*
 * Initialisation.  Called after the page allocator have been initialised and
 * before smp_init().
L
Linus Torvalds 已提交
1395 1396 1397
 */
void __init kmem_cache_init(void)
{
1398 1399
	int i;

1400 1401
	BUILD_BUG_ON(sizeof(((struct page *)NULL)->lru) <
					sizeof(struct rcu_head));
1402 1403
	kmem_cache = &kmem_cache_boot;

1404
	if (num_possible_nodes() == 1)
1405 1406
		use_alien_caches = 0;

C
Christoph Lameter 已提交
1407
	for (i = 0; i < NUM_INIT_LISTS; i++)
1408
		kmem_cache_node_init(&init_kmem_cache_node[i]);
C
Christoph Lameter 已提交
1409

L
Linus Torvalds 已提交
1410 1411
	/*
	 * Fragmentation resistance on low memory - only use bigger
1412 1413
	 * page orders on machines with more than 32MB of memory if
	 * not overridden on the command line.
L
Linus Torvalds 已提交
1414
	 */
1415
	if (!slab_max_order_set && totalram_pages > (32 << 20) >> PAGE_SHIFT)
1416
		slab_max_order = SLAB_MAX_ORDER_HI;
L
Linus Torvalds 已提交
1417 1418 1419

	/* Bootstrap is tricky, because several objects are allocated
	 * from caches that do not exist yet:
1420 1421 1422
	 * 1) initialize the kmem_cache cache: it contains the struct
	 *    kmem_cache structures of all caches, except kmem_cache itself:
	 *    kmem_cache is statically allocated.
1423
	 *    Initially an __init data area is used for the head array and the
1424
	 *    kmem_cache_node structures, it's replaced with a kmalloc allocated
1425
	 *    array at the end of the bootstrap.
L
Linus Torvalds 已提交
1426
	 * 2) Create the first kmalloc cache.
1427
	 *    The struct kmem_cache for the new cache is allocated normally.
1428 1429 1430
	 *    An __init data area is used for the head array.
	 * 3) Create the remaining kmalloc caches, with minimally sized
	 *    head arrays.
1431
	 * 4) Replace the __init data head arrays for kmem_cache and the first
L
Linus Torvalds 已提交
1432
	 *    kmalloc cache with kmalloc allocated arrays.
1433
	 * 5) Replace the __init data for kmem_cache_node for kmem_cache and
1434 1435
	 *    the other cache's with kmalloc allocated memory.
	 * 6) Resize the head arrays of the kmalloc caches to their final sizes.
L
Linus Torvalds 已提交
1436 1437
	 */

1438
	/* 1) create the kmem_cache */
L
Linus Torvalds 已提交
1439

E
Eric Dumazet 已提交
1440
	/*
1441
	 * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids
E
Eric Dumazet 已提交
1442
	 */
1443
	create_boot_cache(kmem_cache, "kmem_cache",
1444
		offsetof(struct kmem_cache, node) +
1445
				  nr_node_ids * sizeof(struct kmem_cache_node *),
1446 1447
				  SLAB_HWCACHE_ALIGN);
	list_add(&kmem_cache->list, &slab_caches);
1448
	slab_state = PARTIAL;
L
Linus Torvalds 已提交
1449

A
Andrew Morton 已提交
1450
	/*
1451 1452
	 * Initialize the caches that provide memory for the  kmem_cache_node
	 * structures first.  Without this, further allocations will bug.
1453
	 */
1454
	kmalloc_caches[INDEX_NODE] = create_kmalloc_cache("kmalloc-node",
1455
				kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
1456
	slab_state = PARTIAL_NODE;
1457

1458 1459
	slab_early_init = 0;

1460
	/* 5) Replace the bootstrap kmem_cache_node */
1461
	{
P
Pekka Enberg 已提交
1462 1463
		int nid;

1464
		for_each_online_node(nid) {
1465
			init_list(kmem_cache, &init_kmem_cache_node[CACHE_CACHE + nid], nid);
1466

1467
			init_list(kmalloc_caches[INDEX_NODE],
1468
					  &init_kmem_cache_node[SIZE_NODE + nid], nid);
1469 1470
		}
	}
L
Linus Torvalds 已提交
1471

1472
	create_kmalloc_caches(ARCH_KMALLOC_FLAGS);
1473 1474 1475 1476 1477 1478
}

void __init kmem_cache_init_late(void)
{
	struct kmem_cache *cachep;

1479
	slab_state = UP;
P
Peter Zijlstra 已提交
1480

1481
	/* 6) resize the head arrays to their final sizes */
1482 1483
	mutex_lock(&slab_mutex);
	list_for_each_entry(cachep, &slab_caches, list)
1484 1485
		if (enable_cpucache(cachep, GFP_NOWAIT))
			BUG();
1486
	mutex_unlock(&slab_mutex);
1487

1488 1489 1490
	/* Done! */
	slab_state = FULL;

A
Andrew Morton 已提交
1491 1492 1493
	/*
	 * Register a cpu startup notifier callback that initializes
	 * cpu_cache_get for all new cpus
L
Linus Torvalds 已提交
1494 1495 1496
	 */
	register_cpu_notifier(&cpucache_notifier);

1497 1498 1499
#ifdef CONFIG_NUMA
	/*
	 * Register a memory hotplug callback that initializes and frees
1500
	 * node.
1501 1502 1503 1504
	 */
	hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
#endif

A
Andrew Morton 已提交
1505 1506 1507
	/*
	 * The reap timers are started later, with a module init call: That part
	 * of the kernel is not yet operational.
L
Linus Torvalds 已提交
1508 1509 1510 1511 1512 1513 1514
	 */
}

static int __init cpucache_init(void)
{
	int cpu;

A
Andrew Morton 已提交
1515 1516
	/*
	 * Register the timers that return unneeded pages to the page allocator
L
Linus Torvalds 已提交
1517
	 */
1518
	for_each_online_cpu(cpu)
A
Andrew Morton 已提交
1519
		start_cpu_timer(cpu);
1520 1521

	/* Done! */
1522
	slab_state = FULL;
L
Linus Torvalds 已提交
1523 1524 1525 1526
	return 0;
}
__initcall(cpucache_init);

1527 1528 1529
static noinline void
slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
{
1530
#if DEBUG
1531
	struct kmem_cache_node *n;
1532
	struct page *page;
1533 1534
	unsigned long flags;
	int node;
1535 1536 1537 1538 1539
	static DEFINE_RATELIMIT_STATE(slab_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
				      DEFAULT_RATELIMIT_BURST);

	if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slab_oom_rs))
		return;
1540 1541 1542 1543 1544

	printk(KERN_WARNING
		"SLAB: Unable to allocate memory on node %d (gfp=0x%x)\n",
		nodeid, gfpflags);
	printk(KERN_WARNING "  cache: %s, object size: %d, order: %d\n",
1545
		cachep->name, cachep->size, cachep->gfporder);
1546

1547
	for_each_kmem_cache_node(cachep, node, n) {
1548 1549 1550
		unsigned long active_objs = 0, num_objs = 0, free_objects = 0;
		unsigned long active_slabs = 0, num_slabs = 0;

1551
		spin_lock_irqsave(&n->list_lock, flags);
1552
		list_for_each_entry(page, &n->slabs_full, lru) {
1553 1554 1555
			active_objs += cachep->num;
			active_slabs++;
		}
1556 1557
		list_for_each_entry(page, &n->slabs_partial, lru) {
			active_objs += page->active;
1558 1559
			active_slabs++;
		}
1560
		list_for_each_entry(page, &n->slabs_free, lru)
1561 1562
			num_slabs++;

1563 1564
		free_objects += n->free_objects;
		spin_unlock_irqrestore(&n->list_lock, flags);
1565 1566 1567 1568 1569 1570 1571 1572

		num_slabs += active_slabs;
		num_objs = num_slabs * cachep->num;
		printk(KERN_WARNING
			"  node %d: slabs: %ld/%ld, objs: %ld/%ld, free: %ld\n",
			node, active_slabs, num_slabs, active_objs, num_objs,
			free_objects);
	}
1573
#endif
1574 1575
}

L
Linus Torvalds 已提交
1576
/*
W
Wang Sheng-Hui 已提交
1577 1578
 * Interface to system's page allocator. No need to hold the
 * kmem_cache_node ->list_lock.
L
Linus Torvalds 已提交
1579 1580 1581 1582 1583
 *
 * If we requested dmaable memory, we will get it. Even if we
 * did not request dmaable memory, we might get it, but that
 * would be relatively rare and ignorable.
 */
1584 1585
static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
								int nodeid)
L
Linus Torvalds 已提交
1586 1587
{
	struct page *page;
1588
	int nr_pages;
1589

1590
	flags |= cachep->allocflags;
1591 1592
	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
		flags |= __GFP_RECLAIMABLE;
1593

1594 1595 1596
	if (memcg_charge_slab(cachep, flags, cachep->gfporder))
		return NULL;

L
Linus Torvalds 已提交
1597
	page = alloc_pages_exact_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder);
1598
	if (!page) {
1599
		memcg_uncharge_slab(cachep, cachep->gfporder);
1600
		slab_out_of_memory(cachep, flags, nodeid);
L
Linus Torvalds 已提交
1601
		return NULL;
1602
	}
L
Linus Torvalds 已提交
1603

1604
	/* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
1605 1606 1607
	if (unlikely(page->pfmemalloc))
		pfmemalloc_active = true;

1608
	nr_pages = (1 << cachep->gfporder);
L
Linus Torvalds 已提交
1609
	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1610 1611 1612 1613 1614
		add_zone_page_state(page_zone(page),
			NR_SLAB_RECLAIMABLE, nr_pages);
	else
		add_zone_page_state(page_zone(page),
			NR_SLAB_UNRECLAIMABLE, nr_pages);
1615 1616 1617
	__SetPageSlab(page);
	if (page->pfmemalloc)
		SetPageSlabPfmemalloc(page);
1618

1619 1620 1621 1622 1623 1624 1625 1626
	if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
		kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid);

		if (cachep->ctor)
			kmemcheck_mark_uninitialized_pages(page, nr_pages);
		else
			kmemcheck_mark_unallocated_pages(page, nr_pages);
	}
P
Pekka Enberg 已提交
1627

1628
	return page;
L
Linus Torvalds 已提交
1629 1630 1631 1632 1633
}

/*
 * Interface to system's page release.
 */
1634
static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
L
Linus Torvalds 已提交
1635
{
1636
	const unsigned long nr_freed = (1 << cachep->gfporder);
L
Linus Torvalds 已提交
1637

1638
	kmemcheck_free_shadow(page, cachep->gfporder);
P
Pekka Enberg 已提交
1639

1640 1641 1642 1643 1644 1645
	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
		sub_zone_page_state(page_zone(page),
				NR_SLAB_RECLAIMABLE, nr_freed);
	else
		sub_zone_page_state(page_zone(page),
				NR_SLAB_UNRECLAIMABLE, nr_freed);
J
Joonsoo Kim 已提交
1646

1647
	BUG_ON(!PageSlab(page));
J
Joonsoo Kim 已提交
1648
	__ClearPageSlabPfmemalloc(page);
1649
	__ClearPageSlab(page);
1650 1651
	page_mapcount_reset(page);
	page->mapping = NULL;
G
Glauber Costa 已提交
1652

L
Linus Torvalds 已提交
1653 1654
	if (current->reclaim_state)
		current->reclaim_state->reclaimed_slab += nr_freed;
1655 1656
	__free_pages(page, cachep->gfporder);
	memcg_uncharge_slab(cachep, cachep->gfporder);
L
Linus Torvalds 已提交
1657 1658 1659 1660
}

static void kmem_rcu_free(struct rcu_head *head)
{
1661 1662
	struct kmem_cache *cachep;
	struct page *page;
L
Linus Torvalds 已提交
1663

1664 1665 1666 1667
	page = container_of(head, struct page, rcu_head);
	cachep = page->slab_cache;

	kmem_freepages(cachep, page);
L
Linus Torvalds 已提交
1668 1669 1670 1671 1672
}

#if DEBUG

#ifdef CONFIG_DEBUG_PAGEALLOC
1673
static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
P
Pekka Enberg 已提交
1674
			    unsigned long caller)
L
Linus Torvalds 已提交
1675
{
1676
	int size = cachep->object_size;
L
Linus Torvalds 已提交
1677

1678
	addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];
L
Linus Torvalds 已提交
1679

P
Pekka Enberg 已提交
1680
	if (size < 5 * sizeof(unsigned long))
L
Linus Torvalds 已提交
1681 1682
		return;

P
Pekka Enberg 已提交
1683 1684 1685 1686
	*addr++ = 0x12345678;
	*addr++ = caller;
	*addr++ = smp_processor_id();
	size -= 3 * sizeof(unsigned long);
L
Linus Torvalds 已提交
1687 1688 1689 1690 1691 1692 1693
	{
		unsigned long *sptr = &caller;
		unsigned long svalue;

		while (!kstack_end(sptr)) {
			svalue = *sptr++;
			if (kernel_text_address(svalue)) {
P
Pekka Enberg 已提交
1694
				*addr++ = svalue;
L
Linus Torvalds 已提交
1695 1696 1697 1698 1699 1700 1701
				size -= sizeof(unsigned long);
				if (size <= sizeof(unsigned long))
					break;
			}
		}

	}
P
Pekka Enberg 已提交
1702
	*addr++ = 0x87654321;
L
Linus Torvalds 已提交
1703 1704 1705
}
#endif

1706
static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
L
Linus Torvalds 已提交
1707
{
1708
	int size = cachep->object_size;
1709
	addr = &((char *)addr)[obj_offset(cachep)];
L
Linus Torvalds 已提交
1710 1711

	memset(addr, val, size);
P
Pekka Enberg 已提交
1712
	*(unsigned char *)(addr + size - 1) = POISON_END;
L
Linus Torvalds 已提交
1713 1714 1715 1716 1717
}

static void dump_line(char *data, int offset, int limit)
{
	int i;
D
Dave Jones 已提交
1718 1719 1720
	unsigned char error = 0;
	int bad_count = 0;

1721
	printk(KERN_ERR "%03x: ", offset);
D
Dave Jones 已提交
1722 1723 1724 1725 1726 1727
	for (i = 0; i < limit; i++) {
		if (data[offset + i] != POISON_FREE) {
			error = data[offset + i];
			bad_count++;
		}
	}
1728 1729
	print_hex_dump(KERN_CONT, "", 0, 16, 1,
			&data[offset], limit, 1);
D
Dave Jones 已提交
1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743

	if (bad_count == 1) {
		error ^= POISON_FREE;
		if (!(error & (error - 1))) {
			printk(KERN_ERR "Single bit error detected. Probably "
					"bad RAM.\n");
#ifdef CONFIG_X86
			printk(KERN_ERR "Run memtest86+ or a similar memory "
					"test tool.\n");
#else
			printk(KERN_ERR "Run a memory test tool.\n");
#endif
		}
	}
L
Linus Torvalds 已提交
1744 1745 1746 1747 1748
}
#endif

#if DEBUG

1749
static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
L
Linus Torvalds 已提交
1750 1751 1752 1753 1754
{
	int i, size;
	char *realobj;

	if (cachep->flags & SLAB_RED_ZONE) {
1755
		printk(KERN_ERR "Redzone: 0x%llx/0x%llx.\n",
A
Andrew Morton 已提交
1756 1757
			*dbg_redzone1(cachep, objp),
			*dbg_redzone2(cachep, objp));
L
Linus Torvalds 已提交
1758 1759 1760
	}

	if (cachep->flags & SLAB_STORE_USER) {
J
Joe Perches 已提交
1761 1762 1763
		printk(KERN_ERR "Last user: [<%p>](%pSR)\n",
		       *dbg_userword(cachep, objp),
		       *dbg_userword(cachep, objp));
L
Linus Torvalds 已提交
1764
	}
1765
	realobj = (char *)objp + obj_offset(cachep);
1766
	size = cachep->object_size;
P
Pekka Enberg 已提交
1767
	for (i = 0; i < size && lines; i += 16, lines--) {
L
Linus Torvalds 已提交
1768 1769
		int limit;
		limit = 16;
P
Pekka Enberg 已提交
1770 1771
		if (i + limit > size)
			limit = size - i;
L
Linus Torvalds 已提交
1772 1773 1774 1775
		dump_line(realobj, i, limit);
	}
}

1776
static void check_poison_obj(struct kmem_cache *cachep, void *objp)
L
Linus Torvalds 已提交
1777 1778 1779 1780 1781
{
	char *realobj;
	int size, i;
	int lines = 0;

1782
	realobj = (char *)objp + obj_offset(cachep);
1783
	size = cachep->object_size;
L
Linus Torvalds 已提交
1784

P
Pekka Enberg 已提交
1785
	for (i = 0; i < size; i++) {
L
Linus Torvalds 已提交
1786
		char exp = POISON_FREE;
P
Pekka Enberg 已提交
1787
		if (i == size - 1)
L
Linus Torvalds 已提交
1788 1789 1790 1791 1792 1793
			exp = POISON_END;
		if (realobj[i] != exp) {
			int limit;
			/* Mismatch ! */
			/* Print header */
			if (lines == 0) {
P
Pekka Enberg 已提交
1794
				printk(KERN_ERR
1795 1796
					"Slab corruption (%s): %s start=%p, len=%d\n",
					print_tainted(), cachep->name, realobj, size);
L
Linus Torvalds 已提交
1797 1798 1799
				print_objinfo(cachep, objp, 0);
			}
			/* Hexdump the affected line */
P
Pekka Enberg 已提交
1800
			i = (i / 16) * 16;
L
Linus Torvalds 已提交
1801
			limit = 16;
P
Pekka Enberg 已提交
1802 1803
			if (i + limit > size)
				limit = size - i;
L
Linus Torvalds 已提交
1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815
			dump_line(realobj, i, limit);
			i += 16;
			lines++;
			/* Limit to 5 lines */
			if (lines > 5)
				break;
		}
	}
	if (lines != 0) {
		/* Print some data about the neighboring objects, if they
		 * exist:
		 */
1816
		struct page *page = virt_to_head_page(objp);
1817
		unsigned int objnr;
L
Linus Torvalds 已提交
1818

1819
		objnr = obj_to_index(cachep, page, objp);
L
Linus Torvalds 已提交
1820
		if (objnr) {
1821
			objp = index_to_obj(cachep, page, objnr - 1);
1822
			realobj = (char *)objp + obj_offset(cachep);
L
Linus Torvalds 已提交
1823
			printk(KERN_ERR "Prev obj: start=%p, len=%d\n",
P
Pekka Enberg 已提交
1824
			       realobj, size);
L
Linus Torvalds 已提交
1825 1826
			print_objinfo(cachep, objp, 2);
		}
P
Pekka Enberg 已提交
1827
		if (objnr + 1 < cachep->num) {
1828
			objp = index_to_obj(cachep, page, objnr + 1);
1829
			realobj = (char *)objp + obj_offset(cachep);
L
Linus Torvalds 已提交
1830
			printk(KERN_ERR "Next obj: start=%p, len=%d\n",
P
Pekka Enberg 已提交
1831
			       realobj, size);
L
Linus Torvalds 已提交
1832 1833 1834 1835 1836 1837
			print_objinfo(cachep, objp, 2);
		}
	}
}
#endif

1838
#if DEBUG
1839 1840
static void slab_destroy_debugcheck(struct kmem_cache *cachep,
						struct page *page)
L
Linus Torvalds 已提交
1841 1842 1843
{
	int i;
	for (i = 0; i < cachep->num; i++) {
1844
		void *objp = index_to_obj(cachep, page, i);
L
Linus Torvalds 已提交
1845 1846 1847

		if (cachep->flags & SLAB_POISON) {
#ifdef CONFIG_DEBUG_PAGEALLOC
1848
			if (cachep->size % PAGE_SIZE == 0 &&
A
Andrew Morton 已提交
1849
					OFF_SLAB(cachep))
P
Pekka Enberg 已提交
1850
				kernel_map_pages(virt_to_page(objp),
1851
					cachep->size / PAGE_SIZE, 1);
L
Linus Torvalds 已提交
1852 1853 1854 1855 1856 1857 1858 1859 1860
			else
				check_poison_obj(cachep, objp);
#else
			check_poison_obj(cachep, objp);
#endif
		}
		if (cachep->flags & SLAB_RED_ZONE) {
			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
				slab_error(cachep, "start of a freed object "
P
Pekka Enberg 已提交
1861
					   "was overwritten");
L
Linus Torvalds 已提交
1862 1863
			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
				slab_error(cachep, "end of a freed object "
P
Pekka Enberg 已提交
1864
					   "was overwritten");
L
Linus Torvalds 已提交
1865 1866
		}
	}
1867
}
L
Linus Torvalds 已提交
1868
#else
1869 1870
static void slab_destroy_debugcheck(struct kmem_cache *cachep,
						struct page *page)
1871 1872
{
}
L
Linus Torvalds 已提交
1873 1874
#endif

1875 1876 1877
/**
 * slab_destroy - destroy and release all objects in a slab
 * @cachep: cache pointer being destroyed
1878
 * @page: page pointer being destroyed
1879
 *
W
Wang Sheng-Hui 已提交
1880 1881 1882
 * Destroy all the objs in a slab page, and release the mem back to the system.
 * Before calling the slab page must have been unlinked from the cache. The
 * kmem_cache_node ->list_lock is not held/needed.
1883
 */
1884
static void slab_destroy(struct kmem_cache *cachep, struct page *page)
1885
{
1886
	void *freelist;
1887

1888 1889
	freelist = page->freelist;
	slab_destroy_debugcheck(cachep, page);
L
Linus Torvalds 已提交
1890
	if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
1891 1892 1893 1894 1895 1896 1897 1898 1899 1900
		struct rcu_head *head;

		/*
		 * RCU free overloads the RCU head over the LRU.
		 * slab_page has been overloeaded over the LRU,
		 * however it is not used from now on so that
		 * we can use it safely.
		 */
		head = (void *)&page->rcu_head;
		call_rcu(head, kmem_rcu_free);
L
Linus Torvalds 已提交
1901 1902

	} else {
1903
		kmem_freepages(cachep, page);
L
Linus Torvalds 已提交
1904
	}
1905 1906

	/*
1907
	 * From now on, we don't use freelist
1908 1909 1910
	 * although actual page can be freed in rcu context
	 */
	if (OFF_SLAB(cachep))
1911
		kmem_cache_free(cachep->freelist_cache, freelist);
L
Linus Torvalds 已提交
1912 1913
}

1914 1915 1916 1917 1918 1919 1920 1921 1922 1923
static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list)
{
	struct page *page, *n;

	list_for_each_entry_safe(page, n, list, lru) {
		list_del(&page->lru);
		slab_destroy(cachep, page);
	}
}

1924
/**
1925 1926 1927 1928 1929 1930 1931
 * calculate_slab_order - calculate size (page order) of slabs
 * @cachep: pointer to the cache that is being created
 * @size: size of objects to be created in this cache.
 * @align: required alignment for the objects.
 * @flags: slab allocation flags
 *
 * Also calculates the number of objects per slab.
1932 1933 1934 1935 1936
 *
 * This could be made much more intelligent.  For now, try to avoid using
 * high order pages for slabs.  When the gfp() functions are more friendly
 * towards high-order requests, this should be changed.
 */
A
Andrew Morton 已提交
1937
static size_t calculate_slab_order(struct kmem_cache *cachep,
R
Randy Dunlap 已提交
1938
			size_t size, size_t align, unsigned long flags)
1939
{
1940
	unsigned long offslab_limit;
1941
	size_t left_over = 0;
1942
	int gfporder;
1943

1944
	for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) {
1945 1946 1947
		unsigned int num;
		size_t remainder;

1948
		cache_estimate(gfporder, size, align, flags, &remainder, &num);
1949 1950
		if (!num)
			continue;
1951

1952 1953 1954 1955
		/* Can't handle number of objects more than SLAB_OBJ_MAX_NUM */
		if (num > SLAB_OBJ_MAX_NUM)
			break;

1956
		if (flags & CFLGS_OFF_SLAB) {
1957
			size_t freelist_size_per_obj = sizeof(freelist_idx_t);
1958 1959 1960 1961 1962
			/*
			 * Max number of objs-per-slab for caches which
			 * use off-slab slabs. Needed to avoid a possible
			 * looping condition in cache_grow().
			 */
1963 1964
			if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK))
				freelist_size_per_obj += sizeof(char);
1965
			offslab_limit = size;
1966
			offslab_limit /= freelist_size_per_obj;
1967 1968 1969 1970

 			if (num > offslab_limit)
				break;
		}
1971

1972
		/* Found something acceptable - save it away */
1973
		cachep->num = num;
1974
		cachep->gfporder = gfporder;
1975 1976
		left_over = remainder;

1977 1978 1979 1980 1981 1982 1983 1984
		/*
		 * A VFS-reclaimable slab tends to have most allocations
		 * as GFP_NOFS and we really don't want to have to be allocating
		 * higher-order pages when we are unable to shrink dcache.
		 */
		if (flags & SLAB_RECLAIM_ACCOUNT)
			break;

1985 1986 1987 1988
		/*
		 * Large number of objects is good, but very large slabs are
		 * currently bad for the gfp()s.
		 */
1989
		if (gfporder >= slab_max_order)
1990 1991
			break;

1992 1993 1994
		/*
		 * Acceptable internal fragmentation?
		 */
A
Andrew Morton 已提交
1995
		if (left_over * 8 <= (PAGE_SIZE << gfporder))
1996 1997 1998 1999 2000
			break;
	}
	return left_over;
}

2001 2002 2003 2004 2005 2006 2007 2008
static struct array_cache __percpu *alloc_kmem_cache_cpus(
		struct kmem_cache *cachep, int entries, int batchcount)
{
	int cpu;
	size_t size;
	struct array_cache __percpu *cpu_cache;

	size = sizeof(void *) * entries + sizeof(struct array_cache);
2009
	cpu_cache = __alloc_percpu(size, sizeof(void *));
2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021

	if (!cpu_cache)
		return NULL;

	for_each_possible_cpu(cpu) {
		init_arraycache(per_cpu_ptr(cpu_cache, cpu),
				entries, batchcount);
	}

	return cpu_cache;
}

2022
static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
2023
{
2024
	if (slab_state >= FULL)
2025
		return enable_cpucache(cachep, gfp);
2026

2027 2028 2029 2030
	cachep->cpu_cache = alloc_kmem_cache_cpus(cachep, 1, 1);
	if (!cachep->cpu_cache)
		return 1;

2031
	if (slab_state == DOWN) {
2032 2033
		/* Creation of first cache (kmem_cache). */
		set_up_node(kmem_cache, CACHE_CACHE);
2034
	} else if (slab_state == PARTIAL) {
2035 2036
		/* For kmem_cache_node */
		set_up_node(cachep, SIZE_NODE);
2037
	} else {
2038
		int node;
2039

2040 2041 2042 2043 2044
		for_each_online_node(node) {
			cachep->node[node] = kmalloc_node(
				sizeof(struct kmem_cache_node), gfp, node);
			BUG_ON(!cachep->node[node]);
			kmem_cache_node_init(cachep->node[node]);
2045 2046
		}
	}
2047

2048
	cachep->node[numa_mem_id()]->next_reap =
2049 2050
			jiffies + REAPTIMEOUT_NODE +
			((unsigned long)cachep) % REAPTIMEOUT_NODE;
2051 2052 2053 2054 2055 2056 2057

	cpu_cache_get(cachep)->avail = 0;
	cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
	cpu_cache_get(cachep)->batchcount = 1;
	cpu_cache_get(cachep)->touched = 0;
	cachep->batchcount = 1;
	cachep->limit = BOOT_CPUCACHE_ENTRIES;
2058
	return 0;
2059 2060
}

J
Joonsoo Kim 已提交
2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086
unsigned long kmem_cache_flags(unsigned long object_size,
	unsigned long flags, const char *name,
	void (*ctor)(void *))
{
	return flags;
}

struct kmem_cache *
__kmem_cache_alias(const char *name, size_t size, size_t align,
		   unsigned long flags, void (*ctor)(void *))
{
	struct kmem_cache *cachep;

	cachep = find_mergeable(size, align, flags, name, ctor);
	if (cachep) {
		cachep->refcount++;

		/*
		 * Adjust the object sizes so that we clear
		 * the complete object on kzalloc.
		 */
		cachep->object_size = max_t(int, cachep->object_size, size);
	}
	return cachep;
}

L
Linus Torvalds 已提交
2087
/**
2088
 * __kmem_cache_create - Create a cache.
R
Randy Dunlap 已提交
2089
 * @cachep: cache management descriptor
L
Linus Torvalds 已提交
2090 2091 2092 2093
 * @flags: SLAB flags
 *
 * Returns a ptr to the cache on success, NULL on failure.
 * Cannot be called within a int, but can be interrupted.
2094
 * The @ctor is run when new pages are allocated by the cache.
L
Linus Torvalds 已提交
2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107
 *
 * The flags are
 *
 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
 * to catch references to uninitialised memory.
 *
 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
 * for buffer overruns.
 *
 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
 * cacheline.  This can be beneficial if you're counting cycles as closely
 * as davem.
 */
2108
int
2109
__kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
L
Linus Torvalds 已提交
2110
{
2111 2112
	size_t left_over, freelist_size;
	size_t ralign = BYTES_PER_WORD;
2113
	gfp_t gfp;
2114
	int err;
2115
	size_t size = cachep->size;
L
Linus Torvalds 已提交
2116 2117 2118 2119 2120 2121 2122 2123 2124

#if DEBUG
#if FORCED_DEBUG
	/*
	 * Enable redzoning and last user accounting, except for caches with
	 * large objects, if the increased size would increase the object size
	 * above the next power of two: caches with object sizes just above a
	 * power of two have a significant amount of internal fragmentation.
	 */
D
David Woodhouse 已提交
2125 2126
	if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN +
						2 * sizeof(unsigned long long)))
P
Pekka Enberg 已提交
2127
		flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
L
Linus Torvalds 已提交
2128 2129 2130 2131 2132 2133 2134
	if (!(flags & SLAB_DESTROY_BY_RCU))
		flags |= SLAB_POISON;
#endif
	if (flags & SLAB_DESTROY_BY_RCU)
		BUG_ON(flags & SLAB_POISON);
#endif

A
Andrew Morton 已提交
2135 2136
	/*
	 * Check that size is in terms of words.  This is needed to avoid
L
Linus Torvalds 已提交
2137 2138 2139
	 * unaligned accesses for some archs when redzoning is used, and makes
	 * sure any on-slab bufctl's are also correctly aligned.
	 */
P
Pekka Enberg 已提交
2140 2141 2142
	if (size & (BYTES_PER_WORD - 1)) {
		size += (BYTES_PER_WORD - 1);
		size &= ~(BYTES_PER_WORD - 1);
L
Linus Torvalds 已提交
2143 2144
	}

D
David Woodhouse 已提交
2145 2146 2147 2148 2149 2150 2151
	if (flags & SLAB_RED_ZONE) {
		ralign = REDZONE_ALIGN;
		/* If redzoning, ensure that the second redzone is suitably
		 * aligned, by adjusting the object size accordingly. */
		size += REDZONE_ALIGN - 1;
		size &= ~(REDZONE_ALIGN - 1);
	}
2152

2153
	/* 3) caller mandated alignment */
2154 2155
	if (ralign < cachep->align) {
		ralign = cachep->align;
L
Linus Torvalds 已提交
2156
	}
2157 2158
	/* disable debug if necessary */
	if (ralign > __alignof__(unsigned long long))
2159
		flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
A
Andrew Morton 已提交
2160
	/*
2161
	 * 4) Store it.
L
Linus Torvalds 已提交
2162
	 */
2163
	cachep->align = ralign;
L
Linus Torvalds 已提交
2164

2165 2166 2167 2168 2169
	if (slab_is_available())
		gfp = GFP_KERNEL;
	else
		gfp = GFP_NOWAIT;

L
Linus Torvalds 已提交
2170 2171
#if DEBUG

2172 2173 2174 2175
	/*
	 * Both debugging options require word-alignment which is calculated
	 * into align above.
	 */
L
Linus Torvalds 已提交
2176 2177
	if (flags & SLAB_RED_ZONE) {
		/* add space for red zone words */
2178 2179
		cachep->obj_offset += sizeof(unsigned long long);
		size += 2 * sizeof(unsigned long long);
L
Linus Torvalds 已提交
2180 2181
	}
	if (flags & SLAB_STORE_USER) {
2182
		/* user store requires one word storage behind the end of
D
David Woodhouse 已提交
2183 2184
		 * the real object. But if the second red zone needs to be
		 * aligned to 64 bits, we must allow that much space.
L
Linus Torvalds 已提交
2185
		 */
D
David Woodhouse 已提交
2186 2187 2188 2189
		if (flags & SLAB_RED_ZONE)
			size += REDZONE_ALIGN;
		else
			size += BYTES_PER_WORD;
L
Linus Torvalds 已提交
2190 2191
	}
#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
2192
	if (size >= kmalloc_size(INDEX_NODE + 1)
2193 2194 2195
	    && cachep->object_size > cache_line_size()
	    && ALIGN(size, cachep->align) < PAGE_SIZE) {
		cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align);
L
Linus Torvalds 已提交
2196 2197 2198 2199 2200
		size = PAGE_SIZE;
	}
#endif
#endif

2201 2202 2203
	/*
	 * Determine if the slab management is 'on' or 'off' slab.
	 * (bootstrapping cannot cope with offslab caches so don't do
2204 2205
	 * it too early on. Always use on-slab management when
	 * SLAB_NOLEAKTRACE to avoid recursive calls into kmemleak)
2206
	 */
2207
	if ((size >= (PAGE_SIZE >> 5)) && !slab_early_init &&
2208
	    !(flags & SLAB_NOLEAKTRACE))
L
Linus Torvalds 已提交
2209 2210 2211 2212 2213 2214
		/*
		 * Size is large, assume best to place the slab management obj
		 * off-slab (should allow better packing of objs).
		 */
		flags |= CFLGS_OFF_SLAB;

2215
	size = ALIGN(size, cachep->align);
2216 2217 2218 2219 2220 2221
	/*
	 * We should restrict the number of objects in a slab to implement
	 * byte sized index. Refer comment on SLAB_OBJ_MIN_SIZE definition.
	 */
	if (FREELIST_BYTE_INDEX && size < SLAB_OBJ_MIN_SIZE)
		size = ALIGN(SLAB_OBJ_MIN_SIZE, cachep->align);
L
Linus Torvalds 已提交
2222

2223
	left_over = calculate_slab_order(cachep, size, cachep->align, flags);
L
Linus Torvalds 已提交
2224

2225
	if (!cachep->num)
2226
		return -E2BIG;
L
Linus Torvalds 已提交
2227

2228
	freelist_size = calculate_freelist_size(cachep->num, cachep->align);
L
Linus Torvalds 已提交
2229 2230 2231 2232 2233

	/*
	 * If the slab has been placed off-slab, and we have enough space then
	 * move it on-slab. This is at the expense of any extra colouring.
	 */
2234
	if (flags & CFLGS_OFF_SLAB && left_over >= freelist_size) {
L
Linus Torvalds 已提交
2235
		flags &= ~CFLGS_OFF_SLAB;
2236
		left_over -= freelist_size;
L
Linus Torvalds 已提交
2237 2238 2239 2240
	}

	if (flags & CFLGS_OFF_SLAB) {
		/* really off slab. No need for manual alignment */
2241
		freelist_size = calculate_freelist_size(cachep->num, 0);
2242 2243 2244 2245 2246 2247 2248 2249 2250

#ifdef CONFIG_PAGE_POISONING
		/* If we're going to use the generic kernel_map_pages()
		 * poisoning, then it's going to smash the contents of
		 * the redzone and userword anyhow, so switch them off.
		 */
		if (size % PAGE_SIZE == 0 && flags & SLAB_POISON)
			flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
#endif
L
Linus Torvalds 已提交
2251 2252 2253 2254
	}

	cachep->colour_off = cache_line_size();
	/* Offset must be a multiple of the alignment. */
2255 2256
	if (cachep->colour_off < cachep->align)
		cachep->colour_off = cachep->align;
P
Pekka Enberg 已提交
2257
	cachep->colour = left_over / cachep->colour_off;
2258
	cachep->freelist_size = freelist_size;
L
Linus Torvalds 已提交
2259
	cachep->flags = flags;
2260
	cachep->allocflags = __GFP_COMP;
2261
	if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA))
2262
		cachep->allocflags |= GFP_DMA;
2263
	cachep->size = size;
2264
	cachep->reciprocal_buffer_size = reciprocal_value(size);
L
Linus Torvalds 已提交
2265

2266
	if (flags & CFLGS_OFF_SLAB) {
2267
		cachep->freelist_cache = kmalloc_slab(freelist_size, 0u);
2268
		/*
2269
		 * This is a possibility for one of the kmalloc_{dma,}_caches.
2270
		 * But since we go off slab only for object size greater than
2271 2272
		 * PAGE_SIZE/8, and kmalloc_{dma,}_caches get created
		 * in ascending order,this should not happen at all.
2273 2274
		 * But leave a BUG_ON for some lucky dude.
		 */
2275
		BUG_ON(ZERO_OR_NULL_PTR(cachep->freelist_cache));
2276
	}
L
Linus Torvalds 已提交
2277

2278 2279
	err = setup_cpu_cache(cachep, gfp);
	if (err) {
2280
		__kmem_cache_shutdown(cachep);
2281
		return err;
2282
	}
L
Linus Torvalds 已提交
2283

2284
	return 0;
L
Linus Torvalds 已提交
2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297
}

#if DEBUG
static void check_irq_off(void)
{
	BUG_ON(!irqs_disabled());
}

static void check_irq_on(void)
{
	BUG_ON(irqs_disabled());
}

2298
static void check_spinlock_acquired(struct kmem_cache *cachep)
L
Linus Torvalds 已提交
2299 2300 2301
{
#ifdef CONFIG_SMP
	check_irq_off();
2302
	assert_spin_locked(&get_node(cachep, numa_mem_id())->list_lock);
L
Linus Torvalds 已提交
2303 2304
#endif
}
2305

2306
static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
2307 2308 2309
{
#ifdef CONFIG_SMP
	check_irq_off();
2310
	assert_spin_locked(&get_node(cachep, node)->list_lock);
2311 2312 2313
#endif
}

L
Linus Torvalds 已提交
2314 2315 2316 2317
#else
#define check_irq_off()	do { } while(0)
#define check_irq_on()	do { } while(0)
#define check_spinlock_acquired(x) do { } while(0)
2318
#define check_spinlock_acquired_node(x, y) do { } while(0)
L
Linus Torvalds 已提交
2319 2320
#endif

2321
static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
2322 2323 2324
			struct array_cache *ac,
			int force, int node);

L
Linus Torvalds 已提交
2325 2326
static void do_drain(void *arg)
{
A
Andrew Morton 已提交
2327
	struct kmem_cache *cachep = arg;
L
Linus Torvalds 已提交
2328
	struct array_cache *ac;
2329
	int node = numa_mem_id();
2330
	struct kmem_cache_node *n;
2331
	LIST_HEAD(list);
L
Linus Torvalds 已提交
2332 2333

	check_irq_off();
2334
	ac = cpu_cache_get(cachep);
2335 2336
	n = get_node(cachep, node);
	spin_lock(&n->list_lock);
2337
	free_block(cachep, ac->entry, ac->avail, node, &list);
2338
	spin_unlock(&n->list_lock);
2339
	slabs_destroy(cachep, &list);
L
Linus Torvalds 已提交
2340 2341 2342
	ac->avail = 0;
}

2343
static void drain_cpu_caches(struct kmem_cache *cachep)
L
Linus Torvalds 已提交
2344
{
2345
	struct kmem_cache_node *n;
2346 2347
	int node;

2348
	on_each_cpu(do_drain, cachep, 1);
L
Linus Torvalds 已提交
2349
	check_irq_on();
2350 2351
	for_each_kmem_cache_node(cachep, node, n)
		if (n->alien)
2352
			drain_alien_cache(cachep, n->alien);
2353

2354 2355
	for_each_kmem_cache_node(cachep, node, n)
		drain_array(cachep, n, n->shared, 1, node);
L
Linus Torvalds 已提交
2356 2357
}

2358 2359 2360 2361 2362 2363 2364
/*
 * Remove slabs from the list of free slabs.
 * Specify the number of slabs to drain in tofree.
 *
 * Returns the actual number of slabs released.
 */
static int drain_freelist(struct kmem_cache *cache,
2365
			struct kmem_cache_node *n, int tofree)
L
Linus Torvalds 已提交
2366
{
2367 2368
	struct list_head *p;
	int nr_freed;
2369
	struct page *page;
L
Linus Torvalds 已提交
2370

2371
	nr_freed = 0;
2372
	while (nr_freed < tofree && !list_empty(&n->slabs_free)) {
L
Linus Torvalds 已提交
2373

2374 2375 2376 2377
		spin_lock_irq(&n->list_lock);
		p = n->slabs_free.prev;
		if (p == &n->slabs_free) {
			spin_unlock_irq(&n->list_lock);
2378 2379
			goto out;
		}
L
Linus Torvalds 已提交
2380

2381
		page = list_entry(p, struct page, lru);
L
Linus Torvalds 已提交
2382
#if DEBUG
2383
		BUG_ON(page->active);
L
Linus Torvalds 已提交
2384
#endif
2385
		list_del(&page->lru);
2386 2387 2388 2389
		/*
		 * Safe to drop the lock. The slab is no longer linked
		 * to the cache.
		 */
2390 2391
		n->free_objects -= cache->num;
		spin_unlock_irq(&n->list_lock);
2392
		slab_destroy(cache, page);
2393
		nr_freed++;
L
Linus Torvalds 已提交
2394
	}
2395 2396
out:
	return nr_freed;
L
Linus Torvalds 已提交
2397 2398
}

2399
int __kmem_cache_shrink(struct kmem_cache *cachep, bool deactivate)
2400
{
2401 2402
	int ret = 0;
	int node;
2403
	struct kmem_cache_node *n;
2404 2405 2406 2407

	drain_cpu_caches(cachep);

	check_irq_on();
2408
	for_each_kmem_cache_node(cachep, node, n) {
2409
		drain_freelist(cachep, n, slabs_tofree(cachep, n));
2410

2411 2412
		ret += !list_empty(&n->slabs_full) ||
			!list_empty(&n->slabs_partial);
2413 2414 2415 2416
	}
	return (ret ? 1 : 0);
}

2417
int __kmem_cache_shutdown(struct kmem_cache *cachep)
L
Linus Torvalds 已提交
2418
{
2419
	int i;
2420
	struct kmem_cache_node *n;
2421
	int rc = __kmem_cache_shrink(cachep, false);
L
Linus Torvalds 已提交
2422

2423 2424
	if (rc)
		return rc;
L
Linus Torvalds 已提交
2425

2426
	free_percpu(cachep->cpu_cache);
L
Linus Torvalds 已提交
2427

2428
	/* NUMA: free the node structures */
2429 2430 2431 2432 2433
	for_each_kmem_cache_node(cachep, i, n) {
		kfree(n->shared);
		free_alien_cache(n->alien);
		kfree(n);
		cachep->node[i] = NULL;
2434 2435
	}
	return 0;
L
Linus Torvalds 已提交
2436 2437
}

2438 2439
/*
 * Get the memory for a slab management obj.
2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450
 *
 * For a slab cache when the slab descriptor is off-slab, the
 * slab descriptor can't come from the same cache which is being created,
 * Because if it is the case, that means we defer the creation of
 * the kmalloc_{dma,}_cache of size sizeof(slab descriptor) to this point.
 * And we eventually call down to __kmem_cache_create(), which
 * in turn looks up in the kmalloc_{dma,}_caches for the disired-size one.
 * This is a "chicken-and-egg" problem.
 *
 * So the off-slab slab descriptor shall come from the kmalloc_{dma,}_caches,
 * which are all initialized during kmem_cache_init().
2451
 */
2452
static void *alloc_slabmgmt(struct kmem_cache *cachep,
2453 2454
				   struct page *page, int colour_off,
				   gfp_t local_flags, int nodeid)
L
Linus Torvalds 已提交
2455
{
2456
	void *freelist;
2457
	void *addr = page_address(page);
P
Pekka Enberg 已提交
2458

L
Linus Torvalds 已提交
2459 2460
	if (OFF_SLAB(cachep)) {
		/* Slab management obj is off-slab. */
2461
		freelist = kmem_cache_alloc_node(cachep->freelist_cache,
2462
					      local_flags, nodeid);
2463
		if (!freelist)
L
Linus Torvalds 已提交
2464 2465
			return NULL;
	} else {
2466 2467
		freelist = addr + colour_off;
		colour_off += cachep->freelist_size;
L
Linus Torvalds 已提交
2468
	}
2469 2470 2471
	page->active = 0;
	page->s_mem = addr + colour_off;
	return freelist;
L
Linus Torvalds 已提交
2472 2473
}

2474
static inline freelist_idx_t get_free_obj(struct page *page, unsigned int idx)
L
Linus Torvalds 已提交
2475
{
2476
	return ((freelist_idx_t *)page->freelist)[idx];
2477 2478 2479
}

static inline void set_free_obj(struct page *page,
2480
					unsigned int idx, freelist_idx_t val)
2481
{
2482
	((freelist_idx_t *)(page->freelist))[idx] = val;
L
Linus Torvalds 已提交
2483 2484
}

2485
static void cache_init_objs(struct kmem_cache *cachep,
2486
			    struct page *page)
L
Linus Torvalds 已提交
2487 2488 2489 2490
{
	int i;

	for (i = 0; i < cachep->num; i++) {
2491
		void *objp = index_to_obj(cachep, page, i);
L
Linus Torvalds 已提交
2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503
#if DEBUG
		/* need to poison the objs? */
		if (cachep->flags & SLAB_POISON)
			poison_obj(cachep, objp, POISON_FREE);
		if (cachep->flags & SLAB_STORE_USER)
			*dbg_userword(cachep, objp) = NULL;

		if (cachep->flags & SLAB_RED_ZONE) {
			*dbg_redzone1(cachep, objp) = RED_INACTIVE;
			*dbg_redzone2(cachep, objp) = RED_INACTIVE;
		}
		/*
A
Andrew Morton 已提交
2504 2505 2506
		 * Constructors are not allowed to allocate memory from the same
		 * cache which they are a constructor for.  Otherwise, deadlock.
		 * They must also be threaded.
L
Linus Torvalds 已提交
2507 2508
		 */
		if (cachep->ctor && !(cachep->flags & SLAB_POISON))
2509
			cachep->ctor(objp + obj_offset(cachep));
L
Linus Torvalds 已提交
2510 2511 2512 2513

		if (cachep->flags & SLAB_RED_ZONE) {
			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
				slab_error(cachep, "constructor overwrote the"
P
Pekka Enberg 已提交
2514
					   " end of an object");
L
Linus Torvalds 已提交
2515 2516
			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
				slab_error(cachep, "constructor overwrote the"
P
Pekka Enberg 已提交
2517
					   " start of an object");
L
Linus Torvalds 已提交
2518
		}
2519
		if ((cachep->size % PAGE_SIZE) == 0 &&
A
Andrew Morton 已提交
2520
			    OFF_SLAB(cachep) && cachep->flags & SLAB_POISON)
P
Pekka Enberg 已提交
2521
			kernel_map_pages(virt_to_page(objp),
2522
					 cachep->size / PAGE_SIZE, 0);
L
Linus Torvalds 已提交
2523 2524
#else
		if (cachep->ctor)
2525
			cachep->ctor(objp);
L
Linus Torvalds 已提交
2526
#endif
2527
		set_obj_status(page, i, OBJECT_FREE);
2528
		set_free_obj(page, i, i);
L
Linus Torvalds 已提交
2529 2530 2531
	}
}

2532
static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
L
Linus Torvalds 已提交
2533
{
2534 2535
	if (CONFIG_ZONE_DMA_FLAG) {
		if (flags & GFP_DMA)
2536
			BUG_ON(!(cachep->allocflags & GFP_DMA));
2537
		else
2538
			BUG_ON(cachep->allocflags & GFP_DMA);
2539
	}
L
Linus Torvalds 已提交
2540 2541
}

2542
static void *slab_get_obj(struct kmem_cache *cachep, struct page *page,
A
Andrew Morton 已提交
2543
				int nodeid)
2544
{
2545
	void *objp;
2546

2547
	objp = index_to_obj(cachep, page, get_free_obj(page, page->active));
2548
	page->active++;
2549
#if DEBUG
J
Joonsoo Kim 已提交
2550
	WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid);
2551 2552 2553 2554 2555
#endif

	return objp;
}

2556
static void slab_put_obj(struct kmem_cache *cachep, struct page *page,
A
Andrew Morton 已提交
2557
				void *objp, int nodeid)
2558
{
2559
	unsigned int objnr = obj_to_index(cachep, page, objp);
2560
#if DEBUG
J
Joonsoo Kim 已提交
2561
	unsigned int i;
2562

2563
	/* Verify that the slab belongs to the intended node */
J
Joonsoo Kim 已提交
2564
	WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid);
2565

2566
	/* Verify double free bug */
2567
	for (i = page->active; i < cachep->num; i++) {
2568
		if (get_free_obj(page, i) == objnr) {
2569 2570 2571 2572
			printk(KERN_ERR "slab: double free detected in cache "
					"'%s', objp %p\n", cachep->name, objp);
			BUG();
		}
2573 2574
	}
#endif
2575
	page->active--;
2576
	set_free_obj(page, page->active, objnr);
2577 2578
}

2579 2580 2581
/*
 * Map pages beginning at addr to the given cache and slab. This is required
 * for the slab allocator to be able to lookup the cache and slab of a
2582
 * virtual address for kfree, ksize, and slab debugging.
2583
 */
2584
static void slab_map_pages(struct kmem_cache *cache, struct page *page,
2585
			   void *freelist)
L
Linus Torvalds 已提交
2586
{
2587
	page->slab_cache = cache;
2588
	page->freelist = freelist;
L
Linus Torvalds 已提交
2589 2590 2591 2592 2593 2594
}

/*
 * Grow (by 1) the number of slabs within a cache.  This is called by
 * kmem_cache_alloc() when there are no active objs left in a cache.
 */
2595
static int cache_grow(struct kmem_cache *cachep,
2596
		gfp_t flags, int nodeid, struct page *page)
L
Linus Torvalds 已提交
2597
{
2598
	void *freelist;
P
Pekka Enberg 已提交
2599 2600
	size_t offset;
	gfp_t local_flags;
2601
	struct kmem_cache_node *n;
L
Linus Torvalds 已提交
2602

A
Andrew Morton 已提交
2603 2604 2605
	/*
	 * Be lazy and only check for valid flags here,  keeping it out of the
	 * critical path in kmem_cache_alloc().
L
Linus Torvalds 已提交
2606
	 */
2607 2608 2609 2610
	if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
		pr_emerg("gfp: %u\n", flags & GFP_SLAB_BUG_MASK);
		BUG();
	}
C
Christoph Lameter 已提交
2611
	local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
L
Linus Torvalds 已提交
2612

2613
	/* Take the node list lock to change the colour_next on this node */
L
Linus Torvalds 已提交
2614
	check_irq_off();
2615
	n = get_node(cachep, nodeid);
2616
	spin_lock(&n->list_lock);
L
Linus Torvalds 已提交
2617 2618

	/* Get colour for the slab, and cal the next value. */
2619 2620 2621 2622 2623
	offset = n->colour_next;
	n->colour_next++;
	if (n->colour_next >= cachep->colour)
		n->colour_next = 0;
	spin_unlock(&n->list_lock);
L
Linus Torvalds 已提交
2624

2625
	offset *= cachep->colour_off;
L
Linus Torvalds 已提交
2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637

	if (local_flags & __GFP_WAIT)
		local_irq_enable();

	/*
	 * The test for missing atomic flag is performed here, rather than
	 * the more obvious place, simply to reduce the critical path length
	 * in kmem_cache_alloc(). If a caller is seriously mis-behaving they
	 * will eventually be caught here (where it matters).
	 */
	kmem_flagcheck(cachep, flags);

A
Andrew Morton 已提交
2638 2639 2640
	/*
	 * Get mem for the objs.  Attempt to allocate a physical page from
	 * 'nodeid'.
2641
	 */
2642 2643 2644
	if (!page)
		page = kmem_getpages(cachep, local_flags, nodeid);
	if (!page)
L
Linus Torvalds 已提交
2645 2646 2647
		goto failed;

	/* Get slab management. */
2648
	freelist = alloc_slabmgmt(cachep, page, offset,
C
Christoph Lameter 已提交
2649
			local_flags & ~GFP_CONSTRAINT_MASK, nodeid);
2650
	if (!freelist)
L
Linus Torvalds 已提交
2651 2652
		goto opps1;

2653
	slab_map_pages(cachep, page, freelist);
L
Linus Torvalds 已提交
2654

2655
	cache_init_objs(cachep, page);
L
Linus Torvalds 已提交
2656 2657 2658 2659

	if (local_flags & __GFP_WAIT)
		local_irq_disable();
	check_irq_off();
2660
	spin_lock(&n->list_lock);
L
Linus Torvalds 已提交
2661 2662

	/* Make slab active. */
2663
	list_add_tail(&page->lru, &(n->slabs_free));
L
Linus Torvalds 已提交
2664
	STATS_INC_GROWN(cachep);
2665 2666
	n->free_objects += cachep->num;
	spin_unlock(&n->list_lock);
L
Linus Torvalds 已提交
2667
	return 1;
A
Andrew Morton 已提交
2668
opps1:
2669
	kmem_freepages(cachep, page);
A
Andrew Morton 已提交
2670
failed:
L
Linus Torvalds 已提交
2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686
	if (local_flags & __GFP_WAIT)
		local_irq_disable();
	return 0;
}

#if DEBUG

/*
 * Perform extra freeing checks:
 * - detect bad pointers.
 * - POISON/RED_ZONE checking
 */
static void kfree_debugcheck(const void *objp)
{
	if (!virt_addr_valid(objp)) {
		printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n",
P
Pekka Enberg 已提交
2687 2688
		       (unsigned long)objp);
		BUG();
L
Linus Torvalds 已提交
2689 2690 2691
	}
}

2692 2693
static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
{
2694
	unsigned long long redzone1, redzone2;
2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709

	redzone1 = *dbg_redzone1(cache, obj);
	redzone2 = *dbg_redzone2(cache, obj);

	/*
	 * Redzone is ok.
	 */
	if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE)
		return;

	if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE)
		slab_error(cache, "double free detected");
	else
		slab_error(cache, "memory outside object was overwritten");

2710
	printk(KERN_ERR "%p: redzone 1:0x%llx, redzone 2:0x%llx.\n",
2711 2712 2713
			obj, redzone1, redzone2);
}

2714
static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
2715
				   unsigned long caller)
L
Linus Torvalds 已提交
2716 2717
{
	unsigned int objnr;
2718
	struct page *page;
L
Linus Torvalds 已提交
2719

2720 2721
	BUG_ON(virt_to_cache(objp) != cachep);

2722
	objp -= obj_offset(cachep);
L
Linus Torvalds 已提交
2723
	kfree_debugcheck(objp);
2724
	page = virt_to_head_page(objp);
L
Linus Torvalds 已提交
2725 2726

	if (cachep->flags & SLAB_RED_ZONE) {
2727
		verify_redzone_free(cachep, objp);
L
Linus Torvalds 已提交
2728 2729 2730 2731
		*dbg_redzone1(cachep, objp) = RED_INACTIVE;
		*dbg_redzone2(cachep, objp) = RED_INACTIVE;
	}
	if (cachep->flags & SLAB_STORE_USER)
2732
		*dbg_userword(cachep, objp) = (void *)caller;
L
Linus Torvalds 已提交
2733

2734
	objnr = obj_to_index(cachep, page, objp);
L
Linus Torvalds 已提交
2735 2736

	BUG_ON(objnr >= cachep->num);
2737
	BUG_ON(objp != index_to_obj(cachep, page, objnr));
L
Linus Torvalds 已提交
2738

2739
	set_obj_status(page, objnr, OBJECT_FREE);
L
Linus Torvalds 已提交
2740 2741
	if (cachep->flags & SLAB_POISON) {
#ifdef CONFIG_DEBUG_PAGEALLOC
2742
		if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
2743
			store_stackinfo(cachep, objp, caller);
P
Pekka Enberg 已提交
2744
			kernel_map_pages(virt_to_page(objp),
2745
					 cachep->size / PAGE_SIZE, 0);
L
Linus Torvalds 已提交
2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760
		} else {
			poison_obj(cachep, objp, POISON_FREE);
		}
#else
		poison_obj(cachep, objp, POISON_FREE);
#endif
	}
	return objp;
}

#else
#define kfree_debugcheck(x) do { } while(0)
#define cache_free_debugcheck(x,objp,z) (objp)
#endif

2761 2762
static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags,
							bool force_refill)
L
Linus Torvalds 已提交
2763 2764
{
	int batchcount;
2765
	struct kmem_cache_node *n;
L
Linus Torvalds 已提交
2766
	struct array_cache *ac;
P
Pekka Enberg 已提交
2767 2768
	int node;

L
Linus Torvalds 已提交
2769
	check_irq_off();
2770
	node = numa_mem_id();
2771 2772 2773
	if (unlikely(force_refill))
		goto force_grow;
retry:
2774
	ac = cpu_cache_get(cachep);
L
Linus Torvalds 已提交
2775 2776
	batchcount = ac->batchcount;
	if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
A
Andrew Morton 已提交
2777 2778 2779 2780
		/*
		 * If there was little recent activity on this cache, then
		 * perform only a partial refill.  Otherwise we could generate
		 * refill bouncing.
L
Linus Torvalds 已提交
2781 2782 2783
		 */
		batchcount = BATCHREFILL_LIMIT;
	}
2784
	n = get_node(cachep, node);
2785

2786 2787
	BUG_ON(ac->avail > 0 || !n);
	spin_lock(&n->list_lock);
L
Linus Torvalds 已提交
2788

2789
	/* See if we can refill from the shared array */
2790 2791
	if (n->shared && transfer_objects(ac, n->shared, batchcount)) {
		n->shared->touched = 1;
2792
		goto alloc_done;
2793
	}
2794

L
Linus Torvalds 已提交
2795 2796
	while (batchcount > 0) {
		struct list_head *entry;
2797
		struct page *page;
L
Linus Torvalds 已提交
2798
		/* Get slab alloc is to come from. */
2799 2800 2801 2802 2803
		entry = n->slabs_partial.next;
		if (entry == &n->slabs_partial) {
			n->free_touched = 1;
			entry = n->slabs_free.next;
			if (entry == &n->slabs_free)
L
Linus Torvalds 已提交
2804 2805 2806
				goto must_grow;
		}

2807
		page = list_entry(entry, struct page, lru);
L
Linus Torvalds 已提交
2808
		check_spinlock_acquired(cachep);
2809 2810 2811 2812 2813 2814

		/*
		 * The slab was either on partial or free list so
		 * there must be at least one object available for
		 * allocation.
		 */
2815
		BUG_ON(page->active >= cachep->num);
2816

2817
		while (page->active < cachep->num && batchcount--) {
L
Linus Torvalds 已提交
2818 2819 2820 2821
			STATS_INC_ALLOCED(cachep);
			STATS_INC_ACTIVE(cachep);
			STATS_SET_HIGH(cachep);

2822
			ac_put_obj(cachep, ac, slab_get_obj(cachep, page,
2823
									node));
L
Linus Torvalds 已提交
2824 2825 2826
		}

		/* move slabp to correct slabp list: */
2827 2828
		list_del(&page->lru);
		if (page->active == cachep->num)
2829
			list_add(&page->lru, &n->slabs_full);
L
Linus Torvalds 已提交
2830
		else
2831
			list_add(&page->lru, &n->slabs_partial);
L
Linus Torvalds 已提交
2832 2833
	}

A
Andrew Morton 已提交
2834
must_grow:
2835
	n->free_objects -= ac->avail;
A
Andrew Morton 已提交
2836
alloc_done:
2837
	spin_unlock(&n->list_lock);
L
Linus Torvalds 已提交
2838 2839 2840

	if (unlikely(!ac->avail)) {
		int x;
2841
force_grow:
D
David Rientjes 已提交
2842
		x = cache_grow(cachep, gfp_exact_node(flags), node, NULL);
2843

A
Andrew Morton 已提交
2844
		/* cache_grow can reenable interrupts, then ac could change. */
2845
		ac = cpu_cache_get(cachep);
2846
		node = numa_mem_id();
2847 2848 2849

		/* no objects in sight? abort */
		if (!x && (ac->avail == 0 || force_refill))
L
Linus Torvalds 已提交
2850 2851
			return NULL;

A
Andrew Morton 已提交
2852
		if (!ac->avail)		/* objects refilled by interrupt? */
L
Linus Torvalds 已提交
2853 2854 2855
			goto retry;
	}
	ac->touched = 1;
2856 2857

	return ac_get_obj(cachep, ac, flags, force_refill);
L
Linus Torvalds 已提交
2858 2859
}

A
Andrew Morton 已提交
2860 2861
static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
						gfp_t flags)
L
Linus Torvalds 已提交
2862 2863 2864 2865 2866 2867 2868 2869
{
	might_sleep_if(flags & __GFP_WAIT);
#if DEBUG
	kmem_flagcheck(cachep, flags);
#endif
}

#if DEBUG
A
Andrew Morton 已提交
2870
static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
2871
				gfp_t flags, void *objp, unsigned long caller)
L
Linus Torvalds 已提交
2872
{
2873 2874
	struct page *page;

P
Pekka Enberg 已提交
2875
	if (!objp)
L
Linus Torvalds 已提交
2876
		return objp;
P
Pekka Enberg 已提交
2877
	if (cachep->flags & SLAB_POISON) {
L
Linus Torvalds 已提交
2878
#ifdef CONFIG_DEBUG_PAGEALLOC
2879
		if ((cachep->size % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
P
Pekka Enberg 已提交
2880
			kernel_map_pages(virt_to_page(objp),
2881
					 cachep->size / PAGE_SIZE, 1);
L
Linus Torvalds 已提交
2882 2883 2884 2885 2886 2887 2888 2889
		else
			check_poison_obj(cachep, objp);
#else
		check_poison_obj(cachep, objp);
#endif
		poison_obj(cachep, objp, POISON_INUSE);
	}
	if (cachep->flags & SLAB_STORE_USER)
2890
		*dbg_userword(cachep, objp) = (void *)caller;
L
Linus Torvalds 已提交
2891 2892

	if (cachep->flags & SLAB_RED_ZONE) {
A
Andrew Morton 已提交
2893 2894 2895 2896
		if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
				*dbg_redzone2(cachep, objp) != RED_INACTIVE) {
			slab_error(cachep, "double free, or memory outside"
						" object was overwritten");
P
Pekka Enberg 已提交
2897
			printk(KERN_ERR
2898
				"%p: redzone 1:0x%llx, redzone 2:0x%llx\n",
A
Andrew Morton 已提交
2899 2900
				objp, *dbg_redzone1(cachep, objp),
				*dbg_redzone2(cachep, objp));
L
Linus Torvalds 已提交
2901 2902 2903 2904
		}
		*dbg_redzone1(cachep, objp) = RED_ACTIVE;
		*dbg_redzone2(cachep, objp) = RED_ACTIVE;
	}
2905 2906 2907

	page = virt_to_head_page(objp);
	set_obj_status(page, obj_to_index(cachep, page, objp), OBJECT_ACTIVE);
2908
	objp += obj_offset(cachep);
2909
	if (cachep->ctor && cachep->flags & SLAB_POISON)
2910
		cachep->ctor(objp);
T
Tetsuo Handa 已提交
2911 2912
	if (ARCH_SLAB_MINALIGN &&
	    ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) {
2913
		printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",
H
Hugh Dickins 已提交
2914
		       objp, (int)ARCH_SLAB_MINALIGN);
2915
	}
L
Linus Torvalds 已提交
2916 2917 2918 2919 2920 2921
	return objp;
}
#else
#define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
#endif

A
Akinobu Mita 已提交
2922
static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags)
2923
{
2924
	if (unlikely(cachep == kmem_cache))
A
Akinobu Mita 已提交
2925
		return false;
2926

2927
	return should_failslab(cachep->object_size, flags, cachep->flags);
2928 2929
}

2930
static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
L
Linus Torvalds 已提交
2931
{
P
Pekka Enberg 已提交
2932
	void *objp;
L
Linus Torvalds 已提交
2933
	struct array_cache *ac;
2934
	bool force_refill = false;
L
Linus Torvalds 已提交
2935

2936
	check_irq_off();
2937

2938
	ac = cpu_cache_get(cachep);
L
Linus Torvalds 已提交
2939 2940
	if (likely(ac->avail)) {
		ac->touched = 1;
2941 2942
		objp = ac_get_obj(cachep, ac, flags, false);

2943
		/*
2944 2945
		 * Allow for the possibility all avail objects are not allowed
		 * by the current flags
2946
		 */
2947 2948 2949 2950 2951
		if (objp) {
			STATS_INC_ALLOCHIT(cachep);
			goto out;
		}
		force_refill = true;
L
Linus Torvalds 已提交
2952
	}
2953 2954 2955 2956 2957 2958 2959 2960 2961 2962

	STATS_INC_ALLOCMISS(cachep);
	objp = cache_alloc_refill(cachep, flags, force_refill);
	/*
	 * the 'ac' may be updated by cache_alloc_refill(),
	 * and kmemleak_erase() requires its correct value.
	 */
	ac = cpu_cache_get(cachep);

out:
2963 2964 2965 2966 2967
	/*
	 * To avoid a false negative, if an object that is in one of the
	 * per-CPU caches is leaked, we need to make sure kmemleak doesn't
	 * treat the array pointers as a reference to the object.
	 */
2968 2969
	if (objp)
		kmemleak_erase(&ac->entry[ac->avail]);
2970 2971 2972
	return objp;
}

2973
#ifdef CONFIG_NUMA
2974
/*
2975
 * Try allocating on another node if PFA_SPREAD_SLAB is a mempolicy is set.
2976 2977 2978 2979 2980 2981 2982 2983
 *
 * If we are in_interrupt, then process context, including cpusets and
 * mempolicy, may not apply and should not be used for allocation policy.
 */
static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
{
	int nid_alloc, nid_here;

2984
	if (in_interrupt() || (flags & __GFP_THISNODE))
2985
		return NULL;
2986
	nid_alloc = nid_here = numa_mem_id();
2987
	if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
2988
		nid_alloc = cpuset_slab_spread_node();
2989
	else if (current->mempolicy)
2990
		nid_alloc = mempolicy_slab_node();
2991
	if (nid_alloc != nid_here)
2992
		return ____cache_alloc_node(cachep, flags, nid_alloc);
2993 2994 2995
	return NULL;
}

2996 2997
/*
 * Fallback function if there was no memory available and no objects on a
2998
 * certain node and fall back is permitted. First we scan all the
2999
 * available node for available objects. If that fails then we
3000 3001 3002
 * perform an allocation without specifying a node. This allows the page
 * allocator to do its reclaim / fallback magic. We then insert the
 * slab into the proper nodelist and then allocate from it.
3003
 */
3004
static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
3005
{
3006 3007
	struct zonelist *zonelist;
	gfp_t local_flags;
3008
	struct zoneref *z;
3009 3010
	struct zone *zone;
	enum zone_type high_zoneidx = gfp_zone(flags);
3011
	void *obj = NULL;
3012
	int nid;
3013
	unsigned int cpuset_mems_cookie;
3014 3015 3016 3017

	if (flags & __GFP_THISNODE)
		return NULL;

C
Christoph Lameter 已提交
3018
	local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
3019

3020
retry_cpuset:
3021
	cpuset_mems_cookie = read_mems_allowed_begin();
3022
	zonelist = node_zonelist(mempolicy_slab_node(), flags);
3023

3024 3025 3026 3027 3028
retry:
	/*
	 * Look through allowed nodes for objects available
	 * from existing per node queues.
	 */
3029 3030
	for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
		nid = zone_to_nid(zone);
3031

3032
		if (cpuset_zone_allowed(zone, flags) &&
3033 3034
			get_node(cache, nid) &&
			get_node(cache, nid)->free_objects) {
3035
				obj = ____cache_alloc_node(cache,
D
David Rientjes 已提交
3036
					gfp_exact_node(flags), nid);
3037 3038 3039
				if (obj)
					break;
		}
3040 3041
	}

3042
	if (!obj) {
3043 3044 3045 3046 3047 3048
		/*
		 * This allocation will be performed within the constraints
		 * of the current cpuset / memory policy requirements.
		 * We may trigger various forms of reclaim on the allowed
		 * set and go into memory reserves if necessary.
		 */
3049 3050
		struct page *page;

3051 3052 3053
		if (local_flags & __GFP_WAIT)
			local_irq_enable();
		kmem_flagcheck(cache, flags);
3054
		page = kmem_getpages(cache, local_flags, numa_mem_id());
3055 3056
		if (local_flags & __GFP_WAIT)
			local_irq_disable();
3057
		if (page) {
3058 3059 3060
			/*
			 * Insert into the appropriate per node queues
			 */
3061 3062
			nid = page_to_nid(page);
			if (cache_grow(cache, flags, nid, page)) {
3063
				obj = ____cache_alloc_node(cache,
D
David Rientjes 已提交
3064
					gfp_exact_node(flags), nid);
3065 3066 3067 3068 3069 3070 3071 3072
				if (!obj)
					/*
					 * Another processor may allocate the
					 * objects in the slab since we are
					 * not holding any locks.
					 */
					goto retry;
			} else {
3073
				/* cache_grow already freed obj */
3074 3075 3076
				obj = NULL;
			}
		}
3077
	}
3078

3079
	if (unlikely(!obj && read_mems_allowed_retry(cpuset_mems_cookie)))
3080
		goto retry_cpuset;
3081 3082 3083
	return obj;
}

3084 3085
/*
 * A interface to enable slab creation on nodeid
L
Linus Torvalds 已提交
3086
 */
3087
static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
A
Andrew Morton 已提交
3088
				int nodeid)
3089 3090
{
	struct list_head *entry;
3091
	struct page *page;
3092
	struct kmem_cache_node *n;
P
Pekka Enberg 已提交
3093 3094 3095
	void *obj;
	int x;

3096
	VM_BUG_ON(nodeid < 0 || nodeid >= MAX_NUMNODES);
3097
	n = get_node(cachep, nodeid);
3098
	BUG_ON(!n);
P
Pekka Enberg 已提交
3099

A
Andrew Morton 已提交
3100
retry:
3101
	check_irq_off();
3102 3103 3104 3105 3106 3107
	spin_lock(&n->list_lock);
	entry = n->slabs_partial.next;
	if (entry == &n->slabs_partial) {
		n->free_touched = 1;
		entry = n->slabs_free.next;
		if (entry == &n->slabs_free)
P
Pekka Enberg 已提交
3108 3109 3110
			goto must_grow;
	}

3111
	page = list_entry(entry, struct page, lru);
P
Pekka Enberg 已提交
3112 3113 3114 3115 3116 3117
	check_spinlock_acquired_node(cachep, nodeid);

	STATS_INC_NODEALLOCS(cachep);
	STATS_INC_ACTIVE(cachep);
	STATS_SET_HIGH(cachep);

3118
	BUG_ON(page->active == cachep->num);
P
Pekka Enberg 已提交
3119

3120
	obj = slab_get_obj(cachep, page, nodeid);
3121
	n->free_objects--;
P
Pekka Enberg 已提交
3122
	/* move slabp to correct slabp list: */
3123
	list_del(&page->lru);
P
Pekka Enberg 已提交
3124

3125 3126
	if (page->active == cachep->num)
		list_add(&page->lru, &n->slabs_full);
A
Andrew Morton 已提交
3127
	else
3128
		list_add(&page->lru, &n->slabs_partial);
3129

3130
	spin_unlock(&n->list_lock);
P
Pekka Enberg 已提交
3131
	goto done;
3132

A
Andrew Morton 已提交
3133
must_grow:
3134
	spin_unlock(&n->list_lock);
D
David Rientjes 已提交
3135
	x = cache_grow(cachep, gfp_exact_node(flags), nodeid, NULL);
3136 3137
	if (x)
		goto retry;
L
Linus Torvalds 已提交
3138

3139
	return fallback_alloc(cachep, flags);
3140

A
Andrew Morton 已提交
3141
done:
P
Pekka Enberg 已提交
3142
	return obj;
3143
}
3144 3145

static __always_inline void *
3146
slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3147
		   unsigned long caller)
3148 3149 3150
{
	unsigned long save_flags;
	void *ptr;
3151
	int slab_node = numa_mem_id();
3152

3153
	flags &= gfp_allowed_mask;
3154

3155 3156
	lockdep_trace_alloc(flags);

A
Akinobu Mita 已提交
3157
	if (slab_should_failslab(cachep, flags))
3158 3159
		return NULL;

3160 3161
	cachep = memcg_kmem_get_cache(cachep, flags);

3162 3163 3164
	cache_alloc_debugcheck_before(cachep, flags);
	local_irq_save(save_flags);

A
Andrew Morton 已提交
3165
	if (nodeid == NUMA_NO_NODE)
3166
		nodeid = slab_node;
3167

3168
	if (unlikely(!get_node(cachep, nodeid))) {
3169 3170 3171 3172 3173
		/* Node not bootstrapped yet */
		ptr = fallback_alloc(cachep, flags);
		goto out;
	}

3174
	if (nodeid == slab_node) {
3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189
		/*
		 * Use the locally cached objects if possible.
		 * However ____cache_alloc does not allow fallback
		 * to other nodes. It may fail while we still have
		 * objects on other nodes available.
		 */
		ptr = ____cache_alloc(cachep, flags);
		if (ptr)
			goto out;
	}
	/* ___cache_alloc_node can fall back to other nodes */
	ptr = ____cache_alloc_node(cachep, flags, nodeid);
  out:
	local_irq_restore(save_flags);
	ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
3190
	kmemleak_alloc_recursive(ptr, cachep->object_size, 1, cachep->flags,
3191
				 flags);
3192

3193
	if (likely(ptr)) {
3194
		kmemcheck_slab_alloc(cachep, flags, ptr, cachep->object_size);
3195 3196 3197
		if (unlikely(flags & __GFP_ZERO))
			memset(ptr, 0, cachep->object_size);
	}
3198

3199
	memcg_kmem_put_cache(cachep);
3200 3201 3202 3203 3204 3205 3206 3207
	return ptr;
}

static __always_inline void *
__do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
{
	void *objp;

3208
	if (current->mempolicy || cpuset_do_slab_mem_spread()) {
3209 3210 3211 3212 3213 3214 3215 3216 3217 3218
		objp = alternate_node_alloc(cache, flags);
		if (objp)
			goto out;
	}
	objp = ____cache_alloc(cache, flags);

	/*
	 * We may just have run out of memory on the local node.
	 * ____cache_alloc_node() knows how to locate memory on other nodes
	 */
3219 3220
	if (!objp)
		objp = ____cache_alloc_node(cache, flags, numa_mem_id());
3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235

  out:
	return objp;
}
#else

static __always_inline void *
__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
{
	return ____cache_alloc(cachep, flags);
}

#endif /* CONFIG_NUMA */

static __always_inline void *
3236
slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
3237 3238 3239 3240
{
	unsigned long save_flags;
	void *objp;

3241
	flags &= gfp_allowed_mask;
3242

3243 3244
	lockdep_trace_alloc(flags);

A
Akinobu Mita 已提交
3245
	if (slab_should_failslab(cachep, flags))
3246 3247
		return NULL;

3248 3249
	cachep = memcg_kmem_get_cache(cachep, flags);

3250 3251 3252 3253 3254
	cache_alloc_debugcheck_before(cachep, flags);
	local_irq_save(save_flags);
	objp = __do_cache_alloc(cachep, flags);
	local_irq_restore(save_flags);
	objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
3255
	kmemleak_alloc_recursive(objp, cachep->object_size, 1, cachep->flags,
3256
				 flags);
3257 3258
	prefetchw(objp);

3259
	if (likely(objp)) {
3260
		kmemcheck_slab_alloc(cachep, flags, objp, cachep->object_size);
3261 3262 3263
		if (unlikely(flags & __GFP_ZERO))
			memset(objp, 0, cachep->object_size);
	}
3264

3265
	memcg_kmem_put_cache(cachep);
3266 3267
	return objp;
}
3268 3269

/*
3270
 * Caller needs to acquire correct kmem_cache_node's list_lock
3271
 * @list: List of detached free slabs should be freed by caller
3272
 */
3273 3274
static void free_block(struct kmem_cache *cachep, void **objpp,
			int nr_objects, int node, struct list_head *list)
L
Linus Torvalds 已提交
3275 3276
{
	int i;
3277
	struct kmem_cache_node *n = get_node(cachep, node);
L
Linus Torvalds 已提交
3278 3279

	for (i = 0; i < nr_objects; i++) {
3280
		void *objp;
3281
		struct page *page;
L
Linus Torvalds 已提交
3282

3283 3284 3285
		clear_obj_pfmemalloc(&objpp[i]);
		objp = objpp[i];

3286 3287
		page = virt_to_head_page(objp);
		list_del(&page->lru);
3288
		check_spinlock_acquired_node(cachep, node);
3289
		slab_put_obj(cachep, page, objp, node);
L
Linus Torvalds 已提交
3290
		STATS_DEC_ACTIVE(cachep);
3291
		n->free_objects++;
L
Linus Torvalds 已提交
3292 3293

		/* fixup slab chains */
3294
		if (page->active == 0) {
3295 3296
			if (n->free_objects > n->free_limit) {
				n->free_objects -= cachep->num;
3297
				list_add_tail(&page->lru, list);
L
Linus Torvalds 已提交
3298
			} else {
3299
				list_add(&page->lru, &n->slabs_free);
L
Linus Torvalds 已提交
3300 3301 3302 3303 3304 3305
			}
		} else {
			/* Unconditionally move a slab to the end of the
			 * partial list on free - maximum time for the
			 * other objects to be freed, too.
			 */
3306
			list_add_tail(&page->lru, &n->slabs_partial);
L
Linus Torvalds 已提交
3307 3308 3309 3310
		}
	}
}

3311
static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
L
Linus Torvalds 已提交
3312 3313
{
	int batchcount;
3314
	struct kmem_cache_node *n;
3315
	int node = numa_mem_id();
3316
	LIST_HEAD(list);
L
Linus Torvalds 已提交
3317 3318 3319 3320 3321 3322

	batchcount = ac->batchcount;
#if DEBUG
	BUG_ON(!batchcount || batchcount > ac->avail);
#endif
	check_irq_off();
3323
	n = get_node(cachep, node);
3324 3325 3326
	spin_lock(&n->list_lock);
	if (n->shared) {
		struct array_cache *shared_array = n->shared;
P
Pekka Enberg 已提交
3327
		int max = shared_array->limit - shared_array->avail;
L
Linus Torvalds 已提交
3328 3329 3330
		if (max) {
			if (batchcount > max)
				batchcount = max;
3331
			memcpy(&(shared_array->entry[shared_array->avail]),
P
Pekka Enberg 已提交
3332
			       ac->entry, sizeof(void *) * batchcount);
L
Linus Torvalds 已提交
3333 3334 3335 3336 3337
			shared_array->avail += batchcount;
			goto free_done;
		}
	}

3338
	free_block(cachep, ac->entry, batchcount, node, &list);
A
Andrew Morton 已提交
3339
free_done:
L
Linus Torvalds 已提交
3340 3341 3342 3343 3344
#if STATS
	{
		int i = 0;
		struct list_head *p;

3345 3346
		p = n->slabs_free.next;
		while (p != &(n->slabs_free)) {
3347
			struct page *page;
L
Linus Torvalds 已提交
3348

3349 3350
			page = list_entry(p, struct page, lru);
			BUG_ON(page->active);
L
Linus Torvalds 已提交
3351 3352 3353 3354 3355 3356 3357

			i++;
			p = p->next;
		}
		STATS_SET_FREEABLE(cachep, i);
	}
#endif
3358
	spin_unlock(&n->list_lock);
3359
	slabs_destroy(cachep, &list);
L
Linus Torvalds 已提交
3360
	ac->avail -= batchcount;
A
Andrew Morton 已提交
3361
	memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
L
Linus Torvalds 已提交
3362 3363 3364
}

/*
A
Andrew Morton 已提交
3365 3366
 * Release an obj back to its cache. If the obj has a constructed state, it must
 * be in this state _before_ it is released.  Called with disabled ints.
L
Linus Torvalds 已提交
3367
 */
3368
static inline void __cache_free(struct kmem_cache *cachep, void *objp,
3369
				unsigned long caller)
L
Linus Torvalds 已提交
3370
{
3371
	struct array_cache *ac = cpu_cache_get(cachep);
L
Linus Torvalds 已提交
3372 3373

	check_irq_off();
3374
	kmemleak_free_recursive(objp, cachep->flags);
3375
	objp = cache_free_debugcheck(cachep, objp, caller);
L
Linus Torvalds 已提交
3376

3377
	kmemcheck_slab_free(cachep, objp, cachep->object_size);
P
Pekka Enberg 已提交
3378

3379 3380 3381 3382 3383 3384 3385
	/*
	 * Skip calling cache_free_alien() when the platform is not numa.
	 * This will avoid cache misses that happen while accessing slabp (which
	 * is per page memory  reference) to get nodeid. Instead use a global
	 * variable to skip the call, which is mostly likely to be present in
	 * the cache.
	 */
3386
	if (nr_online_nodes > 1 && cache_free_alien(cachep, objp))
3387 3388
		return;

3389
	if (ac->avail < ac->limit) {
L
Linus Torvalds 已提交
3390 3391 3392 3393 3394
		STATS_INC_FREEHIT(cachep);
	} else {
		STATS_INC_FREEMISS(cachep);
		cache_flusharray(cachep, ac);
	}
Z
Zhao Jin 已提交
3395

3396
	ac_put_obj(cachep, ac, objp);
L
Linus Torvalds 已提交
3397 3398 3399 3400 3401 3402 3403 3404 3405 3406
}

/**
 * kmem_cache_alloc - Allocate an object
 * @cachep: The cache to allocate from.
 * @flags: See kmalloc().
 *
 * Allocate an object from this cache.  The flags are only relevant
 * if the cache has no available objects.
 */
3407
void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
L
Linus Torvalds 已提交
3408
{
3409
	void *ret = slab_alloc(cachep, flags, _RET_IP_);
E
Eduard - Gabriel Munteanu 已提交
3410

3411
	trace_kmem_cache_alloc(_RET_IP_, ret,
3412
			       cachep->object_size, cachep->size, flags);
E
Eduard - Gabriel Munteanu 已提交
3413 3414

	return ret;
L
Linus Torvalds 已提交
3415 3416 3417
}
EXPORT_SYMBOL(kmem_cache_alloc);

3418
#ifdef CONFIG_TRACING
3419
void *
3420
kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
E
Eduard - Gabriel Munteanu 已提交
3421
{
3422 3423
	void *ret;

3424
	ret = slab_alloc(cachep, flags, _RET_IP_);
3425 3426

	trace_kmalloc(_RET_IP_, ret,
3427
		      size, cachep->size, flags);
3428
	return ret;
E
Eduard - Gabriel Munteanu 已提交
3429
}
3430
EXPORT_SYMBOL(kmem_cache_alloc_trace);
E
Eduard - Gabriel Munteanu 已提交
3431 3432
#endif

L
Linus Torvalds 已提交
3433
#ifdef CONFIG_NUMA
3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444
/**
 * kmem_cache_alloc_node - Allocate an object on the specified node
 * @cachep: The cache to allocate from.
 * @flags: See kmalloc().
 * @nodeid: node number of the target node.
 *
 * Identical to kmem_cache_alloc but it will allocate memory on the given
 * node, which can improve the performance for cpu bound structures.
 *
 * Fallback to other node is possible if __GFP_THISNODE is not set.
 */
3445 3446
void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
3447
	void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
E
Eduard - Gabriel Munteanu 已提交
3448

3449
	trace_kmem_cache_alloc_node(_RET_IP_, ret,
3450
				    cachep->object_size, cachep->size,
3451
				    flags, nodeid);
E
Eduard - Gabriel Munteanu 已提交
3452 3453

	return ret;
3454
}
L
Linus Torvalds 已提交
3455 3456
EXPORT_SYMBOL(kmem_cache_alloc_node);

3457
#ifdef CONFIG_TRACING
3458
void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
3459
				  gfp_t flags,
3460 3461
				  int nodeid,
				  size_t size)
E
Eduard - Gabriel Munteanu 已提交
3462
{
3463 3464
	void *ret;

3465
	ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
3466

3467
	trace_kmalloc_node(_RET_IP_, ret,
3468
			   size, cachep->size,
3469 3470
			   flags, nodeid);
	return ret;
E
Eduard - Gabriel Munteanu 已提交
3471
}
3472
EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
E
Eduard - Gabriel Munteanu 已提交
3473 3474
#endif

3475
static __always_inline void *
3476
__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
3477
{
3478
	struct kmem_cache *cachep;
3479

3480
	cachep = kmalloc_slab(size, flags);
3481 3482
	if (unlikely(ZERO_OR_NULL_PTR(cachep)))
		return cachep;
3483
	return kmem_cache_alloc_node_trace(cachep, flags, node, size);
3484
}
3485 3486 3487

void *__kmalloc_node(size_t size, gfp_t flags, int node)
{
3488
	return __do_kmalloc_node(size, flags, node, _RET_IP_);
3489
}
3490
EXPORT_SYMBOL(__kmalloc_node);
3491 3492

void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
3493
		int node, unsigned long caller)
3494
{
3495
	return __do_kmalloc_node(size, flags, node, caller);
3496 3497 3498
}
EXPORT_SYMBOL(__kmalloc_node_track_caller);
#endif /* CONFIG_NUMA */
L
Linus Torvalds 已提交
3499 3500

/**
3501
 * __do_kmalloc - allocate memory
L
Linus Torvalds 已提交
3502
 * @size: how many bytes of memory are required.
3503
 * @flags: the type of memory to allocate (see kmalloc).
3504
 * @caller: function caller for debug tracking of the caller
L
Linus Torvalds 已提交
3505
 */
3506
static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3507
					  unsigned long caller)
L
Linus Torvalds 已提交
3508
{
3509
	struct kmem_cache *cachep;
E
Eduard - Gabriel Munteanu 已提交
3510
	void *ret;
L
Linus Torvalds 已提交
3511

3512
	cachep = kmalloc_slab(size, flags);
3513 3514
	if (unlikely(ZERO_OR_NULL_PTR(cachep)))
		return cachep;
3515
	ret = slab_alloc(cachep, flags, caller);
E
Eduard - Gabriel Munteanu 已提交
3516

3517
	trace_kmalloc(caller, ret,
3518
		      size, cachep->size, flags);
E
Eduard - Gabriel Munteanu 已提交
3519 3520

	return ret;
3521 3522 3523 3524
}

void *__kmalloc(size_t size, gfp_t flags)
{
3525
	return __do_kmalloc(size, flags, _RET_IP_);
L
Linus Torvalds 已提交
3526 3527 3528
}
EXPORT_SYMBOL(__kmalloc);

3529
void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
3530
{
3531
	return __do_kmalloc(size, flags, caller);
3532 3533
}
EXPORT_SYMBOL(__kmalloc_track_caller);
3534

L
Linus Torvalds 已提交
3535 3536 3537 3538 3539 3540 3541 3542
/**
 * kmem_cache_free - Deallocate an object
 * @cachep: The cache the allocation was from.
 * @objp: The previously allocated object.
 *
 * Free an object which was previously allocated from this
 * cache.
 */
3543
void kmem_cache_free(struct kmem_cache *cachep, void *objp)
L
Linus Torvalds 已提交
3544 3545
{
	unsigned long flags;
3546 3547 3548
	cachep = cache_from_obj(cachep, objp);
	if (!cachep)
		return;
L
Linus Torvalds 已提交
3549 3550

	local_irq_save(flags);
3551
	debug_check_no_locks_freed(objp, cachep->object_size);
3552
	if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
3553
		debug_check_no_obj_freed(objp, cachep->object_size);
3554
	__cache_free(cachep, objp, _RET_IP_);
L
Linus Torvalds 已提交
3555
	local_irq_restore(flags);
E
Eduard - Gabriel Munteanu 已提交
3556

3557
	trace_kmem_cache_free(_RET_IP_, objp);
L
Linus Torvalds 已提交
3558 3559 3560 3561 3562 3563 3564
}
EXPORT_SYMBOL(kmem_cache_free);

/**
 * kfree - free previously allocated memory
 * @objp: pointer returned by kmalloc.
 *
3565 3566
 * If @objp is NULL, no operation is performed.
 *
L
Linus Torvalds 已提交
3567 3568 3569 3570 3571
 * Don't free memory not originally allocated by kmalloc()
 * or you will run into trouble.
 */
void kfree(const void *objp)
{
3572
	struct kmem_cache *c;
L
Linus Torvalds 已提交
3573 3574
	unsigned long flags;

3575 3576
	trace_kfree(_RET_IP_, objp);

3577
	if (unlikely(ZERO_OR_NULL_PTR(objp)))
L
Linus Torvalds 已提交
3578 3579 3580
		return;
	local_irq_save(flags);
	kfree_debugcheck(objp);
3581
	c = virt_to_cache(objp);
3582 3583 3584
	debug_check_no_locks_freed(objp, c->object_size);

	debug_check_no_obj_freed(objp, c->object_size);
3585
	__cache_free(c, (void *)objp, _RET_IP_);
L
Linus Torvalds 已提交
3586 3587 3588 3589
	local_irq_restore(flags);
}
EXPORT_SYMBOL(kfree);

3590
/*
3591
 * This initializes kmem_cache_node or resizes various caches for all nodes.
3592
 */
3593
static int alloc_kmem_cache_node(struct kmem_cache *cachep, gfp_t gfp)
3594 3595
{
	int node;
3596
	struct kmem_cache_node *n;
3597
	struct array_cache *new_shared;
J
Joonsoo Kim 已提交
3598
	struct alien_cache **new_alien = NULL;
3599

3600
	for_each_online_node(node) {
3601

3602 3603 3604 3605 3606
		if (use_alien_caches) {
			new_alien = alloc_alien_cache(node, cachep->limit, gfp);
			if (!new_alien)
				goto fail;
		}
3607

3608 3609 3610
		new_shared = NULL;
		if (cachep->shared) {
			new_shared = alloc_arraycache(node,
3611
				cachep->shared*cachep->batchcount,
3612
					0xbaadf00d, gfp);
3613 3614 3615 3616
			if (!new_shared) {
				free_alien_cache(new_alien);
				goto fail;
			}
3617
		}
3618

3619
		n = get_node(cachep, node);
3620 3621
		if (n) {
			struct array_cache *shared = n->shared;
3622
			LIST_HEAD(list);
3623

3624
			spin_lock_irq(&n->list_lock);
3625

3626
			if (shared)
3627
				free_block(cachep, shared->entry,
3628
						shared->avail, node, &list);
3629

3630 3631 3632
			n->shared = new_shared;
			if (!n->alien) {
				n->alien = new_alien;
3633 3634
				new_alien = NULL;
			}
3635
			n->free_limit = (1 + nr_cpus_node(node)) *
A
Andrew Morton 已提交
3636
					cachep->batchcount + cachep->num;
3637
			spin_unlock_irq(&n->list_lock);
3638
			slabs_destroy(cachep, &list);
3639
			kfree(shared);
3640 3641 3642
			free_alien_cache(new_alien);
			continue;
		}
3643 3644
		n = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node);
		if (!n) {
3645 3646
			free_alien_cache(new_alien);
			kfree(new_shared);
3647
			goto fail;
3648
		}
3649

3650
		kmem_cache_node_init(n);
3651 3652
		n->next_reap = jiffies + REAPTIMEOUT_NODE +
				((unsigned long)cachep) % REAPTIMEOUT_NODE;
3653 3654 3655
		n->shared = new_shared;
		n->alien = new_alien;
		n->free_limit = (1 + nr_cpus_node(node)) *
A
Andrew Morton 已提交
3656
					cachep->batchcount + cachep->num;
3657
		cachep->node[node] = n;
3658
	}
3659
	return 0;
3660

A
Andrew Morton 已提交
3661
fail:
3662
	if (!cachep->list.next) {
3663 3664 3665
		/* Cache is not active yet. Roll back what we did */
		node--;
		while (node >= 0) {
3666 3667
			n = get_node(cachep, node);
			if (n) {
3668 3669 3670
				kfree(n->shared);
				free_alien_cache(n->alien);
				kfree(n);
3671
				cachep->node[node] = NULL;
3672 3673 3674 3675
			}
			node--;
		}
	}
3676
	return -ENOMEM;
3677 3678
}

3679
/* Always called with the slab_mutex held */
G
Glauber Costa 已提交
3680
static int __do_tune_cpucache(struct kmem_cache *cachep, int limit,
3681
				int batchcount, int shared, gfp_t gfp)
L
Linus Torvalds 已提交
3682
{
3683 3684
	struct array_cache __percpu *cpu_cache, *prev;
	int cpu;
L
Linus Torvalds 已提交
3685

3686 3687
	cpu_cache = alloc_kmem_cache_cpus(cachep, limit, batchcount);
	if (!cpu_cache)
3688 3689
		return -ENOMEM;

3690 3691 3692
	prev = cachep->cpu_cache;
	cachep->cpu_cache = cpu_cache;
	kick_all_cpus_sync();
3693

L
Linus Torvalds 已提交
3694 3695 3696
	check_irq_on();
	cachep->batchcount = batchcount;
	cachep->limit = limit;
3697
	cachep->shared = shared;
L
Linus Torvalds 已提交
3698

3699 3700 3701 3702
	if (!prev)
		goto alloc_node;

	for_each_online_cpu(cpu) {
3703
		LIST_HEAD(list);
3704 3705
		int node;
		struct kmem_cache_node *n;
3706
		struct array_cache *ac = per_cpu_ptr(prev, cpu);
3707

3708
		node = cpu_to_mem(cpu);
3709 3710
		n = get_node(cachep, node);
		spin_lock_irq(&n->list_lock);
3711
		free_block(cachep, ac->entry, ac->avail, node, &list);
3712
		spin_unlock_irq(&n->list_lock);
3713
		slabs_destroy(cachep, &list);
L
Linus Torvalds 已提交
3714
	}
3715 3716 3717
	free_percpu(prev);

alloc_node:
3718
	return alloc_kmem_cache_node(cachep, gfp);
L
Linus Torvalds 已提交
3719 3720
}

G
Glauber Costa 已提交
3721 3722 3723 3724
static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
				int batchcount, int shared, gfp_t gfp)
{
	int ret;
3725
	struct kmem_cache *c;
G
Glauber Costa 已提交
3726 3727 3728 3729 3730 3731 3732 3733 3734

	ret = __do_tune_cpucache(cachep, limit, batchcount, shared, gfp);

	if (slab_state < FULL)
		return ret;

	if ((ret < 0) || !is_root_cache(cachep))
		return ret;

3735 3736 3737 3738
	lockdep_assert_held(&slab_mutex);
	for_each_memcg_cache(c, cachep) {
		/* return value determined by the root cache only */
		__do_tune_cpucache(c, limit, batchcount, shared, gfp);
G
Glauber Costa 已提交
3739 3740 3741 3742 3743
	}

	return ret;
}

3744
/* Called with slab_mutex held always */
3745
static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
L
Linus Torvalds 已提交
3746 3747
{
	int err;
G
Glauber Costa 已提交
3748 3749 3750 3751 3752 3753 3754 3755 3756 3757
	int limit = 0;
	int shared = 0;
	int batchcount = 0;

	if (!is_root_cache(cachep)) {
		struct kmem_cache *root = memcg_root_cache(cachep);
		limit = root->limit;
		shared = root->shared;
		batchcount = root->batchcount;
	}
L
Linus Torvalds 已提交
3758

G
Glauber Costa 已提交
3759 3760
	if (limit && shared && batchcount)
		goto skip_setup;
A
Andrew Morton 已提交
3761 3762
	/*
	 * The head array serves three purposes:
L
Linus Torvalds 已提交
3763 3764
	 * - create a LIFO ordering, i.e. return objects that are cache-warm
	 * - reduce the number of spinlock operations.
A
Andrew Morton 已提交
3765
	 * - reduce the number of linked list operations on the slab and
L
Linus Torvalds 已提交
3766 3767 3768 3769
	 *   bufctl chains: array operations are cheaper.
	 * The numbers are guessed, we should auto-tune as described by
	 * Bonwick.
	 */
3770
	if (cachep->size > 131072)
L
Linus Torvalds 已提交
3771
		limit = 1;
3772
	else if (cachep->size > PAGE_SIZE)
L
Linus Torvalds 已提交
3773
		limit = 8;
3774
	else if (cachep->size > 1024)
L
Linus Torvalds 已提交
3775
		limit = 24;
3776
	else if (cachep->size > 256)
L
Linus Torvalds 已提交
3777 3778 3779 3780
		limit = 54;
	else
		limit = 120;

A
Andrew Morton 已提交
3781 3782
	/*
	 * CPU bound tasks (e.g. network routing) can exhibit cpu bound
L
Linus Torvalds 已提交
3783 3784 3785 3786 3787 3788 3789 3790
	 * allocation behaviour: Most allocs on one cpu, most free operations
	 * on another cpu. For these cases, an efficient object passing between
	 * cpus is necessary. This is provided by a shared array. The array
	 * replaces Bonwick's magazine layer.
	 * On uniprocessor, it's functionally equivalent (but less efficient)
	 * to a larger limit. Thus disabled by default.
	 */
	shared = 0;
3791
	if (cachep->size <= PAGE_SIZE && num_possible_cpus() > 1)
L
Linus Torvalds 已提交
3792 3793 3794
		shared = 8;

#if DEBUG
A
Andrew Morton 已提交
3795 3796 3797
	/*
	 * With debugging enabled, large batchcount lead to excessively long
	 * periods with disabled local interrupts. Limit the batchcount
L
Linus Torvalds 已提交
3798 3799 3800 3801
	 */
	if (limit > 32)
		limit = 32;
#endif
G
Glauber Costa 已提交
3802 3803 3804
	batchcount = (limit + 1) / 2;
skip_setup:
	err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
L
Linus Torvalds 已提交
3805 3806
	if (err)
		printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
P
Pekka Enberg 已提交
3807
		       cachep->name, -err);
3808
	return err;
L
Linus Torvalds 已提交
3809 3810
}

3811
/*
3812 3813
 * Drain an array if it contains any elements taking the node lock only if
 * necessary. Note that the node listlock also protects the array_cache
3814
 * if drain_array() is used on the shared array.
3815
 */
3816
static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
3817
			 struct array_cache *ac, int force, int node)
L
Linus Torvalds 已提交
3818
{
3819
	LIST_HEAD(list);
L
Linus Torvalds 已提交
3820 3821
	int tofree;

3822 3823
	if (!ac || !ac->avail)
		return;
L
Linus Torvalds 已提交
3824 3825
	if (ac->touched && !force) {
		ac->touched = 0;
3826
	} else {
3827
		spin_lock_irq(&n->list_lock);
3828 3829 3830 3831
		if (ac->avail) {
			tofree = force ? ac->avail : (ac->limit + 4) / 5;
			if (tofree > ac->avail)
				tofree = (ac->avail + 1) / 2;
3832
			free_block(cachep, ac->entry, tofree, node, &list);
3833 3834 3835 3836
			ac->avail -= tofree;
			memmove(ac->entry, &(ac->entry[tofree]),
				sizeof(void *) * ac->avail);
		}
3837
		spin_unlock_irq(&n->list_lock);
3838
		slabs_destroy(cachep, &list);
L
Linus Torvalds 已提交
3839 3840 3841 3842 3843
	}
}

/**
 * cache_reap - Reclaim memory from caches.
3844
 * @w: work descriptor
L
Linus Torvalds 已提交
3845 3846 3847 3848 3849 3850
 *
 * Called from workqueue/eventd every few seconds.
 * Purpose:
 * - clear the per-cpu caches for this CPU.
 * - return freeable pages to the main free memory pool.
 *
A
Andrew Morton 已提交
3851 3852
 * If we cannot acquire the cache chain mutex then just give up - we'll try
 * again on the next iteration.
L
Linus Torvalds 已提交
3853
 */
3854
static void cache_reap(struct work_struct *w)
L
Linus Torvalds 已提交
3855
{
3856
	struct kmem_cache *searchp;
3857
	struct kmem_cache_node *n;
3858
	int node = numa_mem_id();
3859
	struct delayed_work *work = to_delayed_work(w);
L
Linus Torvalds 已提交
3860

3861
	if (!mutex_trylock(&slab_mutex))
L
Linus Torvalds 已提交
3862
		/* Give up. Setup the next iteration. */
3863
		goto out;
L
Linus Torvalds 已提交
3864

3865
	list_for_each_entry(searchp, &slab_caches, list) {
L
Linus Torvalds 已提交
3866 3867
		check_irq_on();

3868
		/*
3869
		 * We only take the node lock if absolutely necessary and we
3870 3871 3872
		 * have established with reasonable certainty that
		 * we can do some work if the lock was obtained.
		 */
3873
		n = get_node(searchp, node);
3874

3875
		reap_alien(searchp, n);
L
Linus Torvalds 已提交
3876

3877
		drain_array(searchp, n, cpu_cache_get(searchp), 0, node);
L
Linus Torvalds 已提交
3878

3879 3880 3881 3882
		/*
		 * These are racy checks but it does not matter
		 * if we skip one check or scan twice.
		 */
3883
		if (time_after(n->next_reap, jiffies))
3884
			goto next;
L
Linus Torvalds 已提交
3885

3886
		n->next_reap = jiffies + REAPTIMEOUT_NODE;
L
Linus Torvalds 已提交
3887

3888
		drain_array(searchp, n, n->shared, 0, node);
L
Linus Torvalds 已提交
3889

3890 3891
		if (n->free_touched)
			n->free_touched = 0;
3892 3893
		else {
			int freed;
L
Linus Torvalds 已提交
3894

3895
			freed = drain_freelist(searchp, n, (n->free_limit +
3896 3897 3898
				5 * searchp->num - 1) / (5 * searchp->num));
			STATS_ADD_REAPED(searchp, freed);
		}
3899
next:
L
Linus Torvalds 已提交
3900 3901 3902
		cond_resched();
	}
	check_irq_on();
3903
	mutex_unlock(&slab_mutex);
3904
	next_reap_node();
3905
out:
A
Andrew Morton 已提交
3906
	/* Set up the next iteration */
3907
	schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_AC));
L
Linus Torvalds 已提交
3908 3909
}

3910
#ifdef CONFIG_SLABINFO
3911
void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
L
Linus Torvalds 已提交
3912
{
3913
	struct page *page;
P
Pekka Enberg 已提交
3914 3915 3916 3917
	unsigned long active_objs;
	unsigned long num_objs;
	unsigned long active_slabs = 0;
	unsigned long num_slabs, free_objects = 0, shared_avail = 0;
3918
	const char *name;
L
Linus Torvalds 已提交
3919
	char *error = NULL;
3920
	int node;
3921
	struct kmem_cache_node *n;
L
Linus Torvalds 已提交
3922 3923 3924

	active_objs = 0;
	num_slabs = 0;
3925
	for_each_kmem_cache_node(cachep, node, n) {
3926

3927
		check_irq_on();
3928
		spin_lock_irq(&n->list_lock);
3929

3930 3931
		list_for_each_entry(page, &n->slabs_full, lru) {
			if (page->active != cachep->num && !error)
3932 3933 3934 3935
				error = "slabs_full accounting error";
			active_objs += cachep->num;
			active_slabs++;
		}
3936 3937
		list_for_each_entry(page, &n->slabs_partial, lru) {
			if (page->active == cachep->num && !error)
3938
				error = "slabs_partial accounting error";
3939
			if (!page->active && !error)
3940
				error = "slabs_partial accounting error";
3941
			active_objs += page->active;
3942 3943
			active_slabs++;
		}
3944 3945
		list_for_each_entry(page, &n->slabs_free, lru) {
			if (page->active && !error)
3946
				error = "slabs_free accounting error";
3947 3948
			num_slabs++;
		}
3949 3950 3951
		free_objects += n->free_objects;
		if (n->shared)
			shared_avail += n->shared->avail;
3952

3953
		spin_unlock_irq(&n->list_lock);
L
Linus Torvalds 已提交
3954
	}
P
Pekka Enberg 已提交
3955 3956
	num_slabs += active_slabs;
	num_objs = num_slabs * cachep->num;
3957
	if (num_objs - active_objs != free_objects && !error)
L
Linus Torvalds 已提交
3958 3959
		error = "free_objects accounting error";

P
Pekka Enberg 已提交
3960
	name = cachep->name;
L
Linus Torvalds 已提交
3961 3962 3963
	if (error)
		printk(KERN_ERR "slab: cache %s error: %s\n", name, error);

3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977
	sinfo->active_objs = active_objs;
	sinfo->num_objs = num_objs;
	sinfo->active_slabs = active_slabs;
	sinfo->num_slabs = num_slabs;
	sinfo->shared_avail = shared_avail;
	sinfo->limit = cachep->limit;
	sinfo->batchcount = cachep->batchcount;
	sinfo->shared = cachep->shared;
	sinfo->objects_per_slab = cachep->num;
	sinfo->cache_order = cachep->gfporder;
}

void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
{
L
Linus Torvalds 已提交
3978
#if STATS
3979
	{			/* node stats */
L
Linus Torvalds 已提交
3980 3981 3982 3983 3984 3985 3986
		unsigned long high = cachep->high_mark;
		unsigned long allocs = cachep->num_allocations;
		unsigned long grown = cachep->grown;
		unsigned long reaped = cachep->reaped;
		unsigned long errors = cachep->errors;
		unsigned long max_freeable = cachep->max_freeable;
		unsigned long node_allocs = cachep->node_allocs;
3987
		unsigned long node_frees = cachep->node_frees;
3988
		unsigned long overflows = cachep->node_overflow;
L
Linus Torvalds 已提交
3989

J
Joe Perches 已提交
3990 3991 3992 3993 3994
		seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu "
			   "%4lu %4lu %4lu %4lu %4lu",
			   allocs, high, grown,
			   reaped, errors, max_freeable, node_allocs,
			   node_frees, overflows);
L
Linus Torvalds 已提交
3995 3996 3997 3998 3999 4000 4001 4002 4003
	}
	/* cpu stats */
	{
		unsigned long allochit = atomic_read(&cachep->allochit);
		unsigned long allocmiss = atomic_read(&cachep->allocmiss);
		unsigned long freehit = atomic_read(&cachep->freehit);
		unsigned long freemiss = atomic_read(&cachep->freemiss);

		seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
P
Pekka Enberg 已提交
4004
			   allochit, allocmiss, freehit, freemiss);
L
Linus Torvalds 已提交
4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016
	}
#endif
}

#define MAX_SLABINFO_WRITE 128
/**
 * slabinfo_write - Tuning for the slab allocator
 * @file: unused
 * @buffer: user buffer
 * @count: data length
 * @ppos: unused
 */
4017
ssize_t slabinfo_write(struct file *file, const char __user *buffer,
P
Pekka Enberg 已提交
4018
		       size_t count, loff_t *ppos)
L
Linus Torvalds 已提交
4019
{
P
Pekka Enberg 已提交
4020
	char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
L
Linus Torvalds 已提交
4021
	int limit, batchcount, shared, res;
4022
	struct kmem_cache *cachep;
P
Pekka Enberg 已提交
4023

L
Linus Torvalds 已提交
4024 4025 4026 4027
	if (count > MAX_SLABINFO_WRITE)
		return -EINVAL;
	if (copy_from_user(&kbuf, buffer, count))
		return -EFAULT;
P
Pekka Enberg 已提交
4028
	kbuf[MAX_SLABINFO_WRITE] = '\0';
L
Linus Torvalds 已提交
4029 4030 4031 4032 4033 4034 4035 4036 4037 4038

	tmp = strchr(kbuf, ' ');
	if (!tmp)
		return -EINVAL;
	*tmp = '\0';
	tmp++;
	if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3)
		return -EINVAL;

	/* Find the cache in the chain of caches. */
4039
	mutex_lock(&slab_mutex);
L
Linus Torvalds 已提交
4040
	res = -EINVAL;
4041
	list_for_each_entry(cachep, &slab_caches, list) {
L
Linus Torvalds 已提交
4042
		if (!strcmp(cachep->name, kbuf)) {
A
Andrew Morton 已提交
4043 4044
			if (limit < 1 || batchcount < 1 ||
					batchcount > limit || shared < 0) {
4045
				res = 0;
L
Linus Torvalds 已提交
4046
			} else {
4047
				res = do_tune_cpucache(cachep, limit,
4048 4049
						       batchcount, shared,
						       GFP_KERNEL);
L
Linus Torvalds 已提交
4050 4051 4052 4053
			}
			break;
		}
	}
4054
	mutex_unlock(&slab_mutex);
L
Linus Torvalds 已提交
4055 4056 4057 4058
	if (res >= 0)
		res = count;
	return res;
}
4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091

#ifdef CONFIG_DEBUG_SLAB_LEAK

static inline int add_caller(unsigned long *n, unsigned long v)
{
	unsigned long *p;
	int l;
	if (!v)
		return 1;
	l = n[1];
	p = n + 2;
	while (l) {
		int i = l/2;
		unsigned long *q = p + 2 * i;
		if (*q == v) {
			q[1]++;
			return 1;
		}
		if (*q > v) {
			l = i;
		} else {
			p = q + 2;
			l -= i + 1;
		}
	}
	if (++n[1] == n[0])
		return 0;
	memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n));
	p[0] = v;
	p[1] = 1;
	return 1;
}

4092 4093
static void handle_slab(unsigned long *n, struct kmem_cache *c,
						struct page *page)
4094 4095
{
	void *p;
4096
	int i;
4097

4098 4099
	if (n[0] == n[1])
		return;
4100
	for (i = 0, p = page->s_mem; i < c->num; i++, p += c->size) {
4101
		if (get_obj_status(page, i) != OBJECT_ACTIVE)
4102
			continue;
4103

4104 4105 4106 4107 4108 4109 4110 4111 4112
		if (!add_caller(n, (unsigned long)*dbg_userword(c, p)))
			return;
	}
}

static void show_symbol(struct seq_file *m, unsigned long address)
{
#ifdef CONFIG_KALLSYMS
	unsigned long offset, size;
4113
	char modname[MODULE_NAME_LEN], name[KSYM_NAME_LEN];
4114

4115
	if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) {
4116
		seq_printf(m, "%s+%#lx/%#lx", name, offset, size);
4117
		if (modname[0])
4118 4119 4120 4121 4122 4123 4124 4125 4126
			seq_printf(m, " [%s]", modname);
		return;
	}
#endif
	seq_printf(m, "%p", (void *)address);
}

static int leaks_show(struct seq_file *m, void *p)
{
4127
	struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list);
4128
	struct page *page;
4129
	struct kmem_cache_node *n;
4130
	const char *name;
4131
	unsigned long *x = m->private;
4132 4133 4134 4135 4136 4137 4138 4139 4140 4141
	int node;
	int i;

	if (!(cachep->flags & SLAB_STORE_USER))
		return 0;
	if (!(cachep->flags & SLAB_RED_ZONE))
		return 0;

	/* OK, we can do it */

4142
	x[1] = 0;
4143

4144
	for_each_kmem_cache_node(cachep, node, n) {
4145 4146

		check_irq_on();
4147
		spin_lock_irq(&n->list_lock);
4148

4149 4150 4151 4152
		list_for_each_entry(page, &n->slabs_full, lru)
			handle_slab(x, cachep, page);
		list_for_each_entry(page, &n->slabs_partial, lru)
			handle_slab(x, cachep, page);
4153
		spin_unlock_irq(&n->list_lock);
4154 4155
	}
	name = cachep->name;
4156
	if (x[0] == x[1]) {
4157
		/* Increase the buffer size */
4158
		mutex_unlock(&slab_mutex);
4159
		m->private = kzalloc(x[0] * 4 * sizeof(unsigned long), GFP_KERNEL);
4160 4161
		if (!m->private) {
			/* Too bad, we are really out */
4162
			m->private = x;
4163
			mutex_lock(&slab_mutex);
4164 4165
			return -ENOMEM;
		}
4166 4167
		*(unsigned long *)m->private = x[0] * 2;
		kfree(x);
4168
		mutex_lock(&slab_mutex);
4169 4170 4171 4172
		/* Now make sure this entry will be retried */
		m->count = m->size;
		return 0;
	}
4173 4174 4175
	for (i = 0; i < x[1]; i++) {
		seq_printf(m, "%s: %lu ", name, x[2*i+3]);
		show_symbol(m, x[2*i+2]);
4176 4177
		seq_putc(m, '\n');
	}
4178

4179 4180 4181
	return 0;
}

4182
static const struct seq_operations slabstats_op = {
4183
	.start = slab_start,
4184 4185
	.next = slab_next,
	.stop = slab_stop,
4186 4187
	.show = leaks_show,
};
4188 4189 4190

static int slabstats_open(struct inode *inode, struct file *file)
{
4191 4192 4193 4194 4195 4196 4197 4198 4199
	unsigned long *n;

	n = __seq_open_private(file, &slabstats_op, PAGE_SIZE);
	if (!n)
		return -ENOMEM;

	*n = PAGE_SIZE / (2 * sizeof(unsigned long));

	return 0;
4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213
}

static const struct file_operations proc_slabstats_operations = {
	.open		= slabstats_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= seq_release_private,
};
#endif

static int __init slab_proc_init(void)
{
#ifdef CONFIG_DEBUG_SLAB_LEAK
	proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
4214
#endif
4215 4216 4217
	return 0;
}
module_init(slab_proc_init);
L
Linus Torvalds 已提交
4218 4219
#endif

4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231
/**
 * ksize - get the actual amount of memory allocated for a given object
 * @objp: Pointer to the object
 *
 * kmalloc may internally round up allocations and return more memory
 * than requested. ksize() can be used to determine the actual amount of
 * memory allocated. The caller may use this additional memory, even though
 * a smaller amount of memory was initially specified with the kmalloc call.
 * The caller must guarantee that objp points to a valid object previously
 * allocated with either kmalloc() or kmem_cache_alloc(). The object
 * must not be freed during the duration of the call.
 */
P
Pekka Enberg 已提交
4232
size_t ksize(const void *objp)
L
Linus Torvalds 已提交
4233
{
4234 4235
	BUG_ON(!objp);
	if (unlikely(objp == ZERO_SIZE_PTR))
4236
		return 0;
L
Linus Torvalds 已提交
4237

4238
	return virt_to_cache(objp)->object_size;
L
Linus Torvalds 已提交
4239
}
K
Kirill A. Shutemov 已提交
4240
EXPORT_SYMBOL(ksize);