slab.c 108.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * linux/mm/slab.c
 * Written by Mark Hemment, 1996/97.
 * (markhe@nextd.demon.co.uk)
 *
 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
 *
 * Major cleanup, different bufctl logic, per-cpu arrays
 *	(c) 2000 Manfred Spraul
 *
 * Cleanup, make the head arrays unconditional, preparation for NUMA
 * 	(c) 2002 Manfred Spraul
 *
 * An implementation of the Slab Allocator as described in outline in;
 *	UNIX Internals: The New Frontiers by Uresh Vahalia
 *	Pub: Prentice Hall	ISBN 0-13-101908-2
 * or with a little more detail in;
 *	The Slab Allocator: An Object-Caching Kernel Memory Allocator
 *	Jeff Bonwick (Sun Microsystems).
 *	Presented at: USENIX Summer 1994 Technical Conference
 *
 * The memory is organized in caches, one cache for each object type.
 * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
 * Each cache consists out of many slabs (they are small (usually one
 * page long) and always contiguous), and each slab contains multiple
 * initialized objects.
 *
 * This means, that your constructor is used only for newly allocated
S
Simon Arlott 已提交
29
 * slabs and you must pass objects with the same initializations to
L
Linus Torvalds 已提交
30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52
 * kmem_cache_free.
 *
 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
 * normal). If you need a special memory type, then must create a new
 * cache for that memory type.
 *
 * In order to reduce fragmentation, the slabs are sorted in 3 groups:
 *   full slabs with 0 free objects
 *   partial slabs
 *   empty slabs with no allocated objects
 *
 * If partial slabs exist, then new allocations come from these slabs,
 * otherwise from empty slabs or new slabs are allocated.
 *
 * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
 * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
 *
 * Each cache has a short per-cpu head array, most allocs
 * and frees go into that array, and if that array overflows, then 1/2
 * of the entries in the array are given back into the global cache.
 * The head array is strictly LIFO and should improve the cache hit rates.
 * On SMP, it additionally reduces the spinlock operations.
 *
A
Andrew Morton 已提交
53
 * The c_cpuarray may not be read with enabled local interrupts -
L
Linus Torvalds 已提交
54 55 56 57
 * it's changed with a smp_call_function().
 *
 * SMP synchronization:
 *  constructors and destructors are called without any locking.
58
 *  Several members in struct kmem_cache and struct slab never change, they
L
Linus Torvalds 已提交
59 60 61 62 63 64 65 66 67 68 69 70
 *	are accessed without any locking.
 *  The per-cpu arrays are never accessed from the wrong cpu, no locking,
 *  	and local interrupts are disabled so slab code is preempt-safe.
 *  The non-constant members are protected with a per-cache irq spinlock.
 *
 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
 * in 2000 - many ideas in the current implementation are derived from
 * his patch.
 *
 * Further notes from the original documentation:
 *
 * 11 April '97.  Started multi-threading - markhe
71
 *	The global cache-chain is protected by the mutex 'slab_mutex'.
L
Linus Torvalds 已提交
72 73 74 75 76 77
 *	The sem is only needed when accessing/extending the cache-chain, which
 *	can never happen inside an interrupt (kmem_cache_create(),
 *	kmem_cache_shrink() and kmem_cache_reap()).
 *
 *	At present, each engine can be growing a cache.  This should be blocked.
 *
78 79 80 81 82 83 84 85 86
 * 15 March 2005. NUMA slab allocator.
 *	Shai Fultheim <shai@scalex86.org>.
 *	Shobhit Dayal <shobhit@calsoftinc.com>
 *	Alok N Kataria <alokk@calsoftinc.com>
 *	Christoph Lameter <christoph@lameter.com>
 *
 *	Modified the slab allocator to be node aware on NUMA systems.
 *	Each node has its own list of partial, free and full slabs.
 *	All object allocations for a node occur from node specific slab lists.
L
Linus Torvalds 已提交
87 88 89 90
 */

#include	<linux/slab.h>
#include	<linux/mm.h>
91
#include	<linux/poison.h>
L
Linus Torvalds 已提交
92 93 94 95 96
#include	<linux/swap.h>
#include	<linux/cache.h>
#include	<linux/interrupt.h>
#include	<linux/init.h>
#include	<linux/compiler.h>
97
#include	<linux/cpuset.h>
98
#include	<linux/proc_fs.h>
L
Linus Torvalds 已提交
99 100 101 102 103 104 105
#include	<linux/seq_file.h>
#include	<linux/notifier.h>
#include	<linux/kallsyms.h>
#include	<linux/cpu.h>
#include	<linux/sysctl.h>
#include	<linux/module.h>
#include	<linux/rcupdate.h>
106
#include	<linux/string.h>
107
#include	<linux/uaccess.h>
108
#include	<linux/nodemask.h>
109
#include	<linux/kmemleak.h>
110
#include	<linux/mempolicy.h>
I
Ingo Molnar 已提交
111
#include	<linux/mutex.h>
112
#include	<linux/fault-inject.h>
I
Ingo Molnar 已提交
113
#include	<linux/rtmutex.h>
114
#include	<linux/reciprocal_div.h>
115
#include	<linux/debugobjects.h>
P
Pekka Enberg 已提交
116
#include	<linux/kmemcheck.h>
117
#include	<linux/memory.h>
118
#include	<linux/prefetch.h>
L
Linus Torvalds 已提交
119

120 121
#include	<net/sock.h>

L
Linus Torvalds 已提交
122 123 124 125
#include	<asm/cacheflush.h>
#include	<asm/tlbflush.h>
#include	<asm/page.h>

126 127
#include <trace/events/kmem.h>

128 129
#include	"internal.h"

130 131
#include	"slab.h"

L
Linus Torvalds 已提交
132
/*
133
 * DEBUG	- 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
L
Linus Torvalds 已提交
134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153
 *		  0 for faster, smaller code (especially in the critical paths).
 *
 * STATS	- 1 to collect stats for /proc/slabinfo.
 *		  0 for faster, smaller code (especially in the critical paths).
 *
 * FORCED_DEBUG	- 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
 */

#ifdef CONFIG_DEBUG_SLAB
#define	DEBUG		1
#define	STATS		1
#define	FORCED_DEBUG	1
#else
#define	DEBUG		0
#define	STATS		0
#define	FORCED_DEBUG	0
#endif

/* Shouldn't this be in a header file somewhere? */
#define	BYTES_PER_WORD		sizeof(void *)
D
David Woodhouse 已提交
154
#define	REDZONE_ALIGN		max(BYTES_PER_WORD, __alignof__(unsigned long long))
L
Linus Torvalds 已提交
155 156 157 158 159

#ifndef ARCH_KMALLOC_FLAGS
#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
#endif

160 161 162 163 164 165 166 167 168
#define FREELIST_BYTE_INDEX (((PAGE_SIZE >> BITS_PER_BYTE) \
				<= SLAB_OBJ_MIN_SIZE) ? 1 : 0)

#if FREELIST_BYTE_INDEX
typedef unsigned char freelist_idx_t;
#else
typedef unsigned short freelist_idx_t;
#endif

169
#define SLAB_OBJ_MAX_NUM ((1 << sizeof(freelist_idx_t) * BITS_PER_BYTE) - 1)
170

171 172 173 174 175 176
/*
 * true if a page was allocated from pfmemalloc reserves for network-based
 * swap
 */
static bool pfmemalloc_active __read_mostly;

L
Linus Torvalds 已提交
177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193
/*
 * struct array_cache
 *
 * Purpose:
 * - LIFO ordering, to hand out cache-warm objects from _alloc
 * - reduce the number of linked list operations
 * - reduce spinlock operations
 *
 * The limit is stored in the per-cpu structure to reduce the data cache
 * footprint.
 *
 */
struct array_cache {
	unsigned int avail;
	unsigned int limit;
	unsigned int batchcount;
	unsigned int touched;
194
	void *entry[];	/*
A
Andrew Morton 已提交
195 196 197
			 * Must have this definition in here for the proper
			 * alignment of array_cache. Also simplifies accessing
			 * the entries.
198 199 200 201
			 *
			 * Entries should not be directly dereferenced as
			 * entries belonging to slabs marked pfmemalloc will
			 * have the lower bits set SLAB_OBJ_PFMEMALLOC
A
Andrew Morton 已提交
202
			 */
L
Linus Torvalds 已提交
203 204
};

J
Joonsoo Kim 已提交
205 206 207 208 209
struct alien_cache {
	spinlock_t lock;
	struct array_cache ac;
};

210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226
#define SLAB_OBJ_PFMEMALLOC	1
static inline bool is_obj_pfmemalloc(void *objp)
{
	return (unsigned long)objp & SLAB_OBJ_PFMEMALLOC;
}

static inline void set_obj_pfmemalloc(void **objp)
{
	*objp = (void *)((unsigned long)*objp | SLAB_OBJ_PFMEMALLOC);
	return;
}

static inline void clear_obj_pfmemalloc(void **objp)
{
	*objp = (void *)((unsigned long)*objp & ~SLAB_OBJ_PFMEMALLOC);
}

A
Andrew Morton 已提交
227 228 229
/*
 * bootstrap: The caches do not work without cpuarrays anymore, but the
 * cpuarrays are allocated from the generic caches...
L
Linus Torvalds 已提交
230 231 232 233
 */
#define BOOT_CPUCACHE_ENTRIES	1
struct arraycache_init {
	struct array_cache cache;
P
Pekka Enberg 已提交
234
	void *entries[BOOT_CPUCACHE_ENTRIES];
L
Linus Torvalds 已提交
235 236
};

237 238 239
/*
 * Need this for bootstrapping a per node allocator.
 */
240
#define NUM_INIT_LISTS (3 * MAX_NUMNODES)
241
static struct kmem_cache_node __initdata init_kmem_cache_node[NUM_INIT_LISTS];
242
#define	CACHE_CACHE 0
243
#define	SIZE_AC MAX_NUMNODES
244
#define	SIZE_NODE (2 * MAX_NUMNODES)
245

246
static int drain_freelist(struct kmem_cache *cache,
247
			struct kmem_cache_node *n, int tofree);
248
static void free_block(struct kmem_cache *cachep, void **objpp, int len,
249 250
			int node, struct list_head *list);
static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list);
251
static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
252
static void cache_reap(struct work_struct *unused);
253

254 255
static int slab_early_init = 1;

256
#define INDEX_AC kmalloc_index(sizeof(struct arraycache_init))
257
#define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node))
L
Linus Torvalds 已提交
258

259
static void kmem_cache_node_init(struct kmem_cache_node *parent)
260 261 262 263 264 265
{
	INIT_LIST_HEAD(&parent->slabs_full);
	INIT_LIST_HEAD(&parent->slabs_partial);
	INIT_LIST_HEAD(&parent->slabs_free);
	parent->shared = NULL;
	parent->alien = NULL;
266
	parent->colour_next = 0;
267 268 269 270 271
	spin_lock_init(&parent->list_lock);
	parent->free_objects = 0;
	parent->free_touched = 0;
}

A
Andrew Morton 已提交
272 273 274
#define MAKE_LIST(cachep, listp, slab, nodeid)				\
	do {								\
		INIT_LIST_HEAD(listp);					\
275
		list_splice(&get_node(cachep, nodeid)->slab, listp);	\
276 277
	} while (0)

A
Andrew Morton 已提交
278 279
#define	MAKE_ALL_LISTS(cachep, ptr, nodeid)				\
	do {								\
280 281 282 283
	MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid);	\
	MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
	MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid);	\
	} while (0)
L
Linus Torvalds 已提交
284 285 286 287 288

#define CFLGS_OFF_SLAB		(0x80000000UL)
#define	OFF_SLAB(x)	((x)->flags & CFLGS_OFF_SLAB)

#define BATCHREFILL_LIMIT	16
A
Andrew Morton 已提交
289 290 291
/*
 * Optimization question: fewer reaps means less probability for unnessary
 * cpucache drain/refill cycles.
L
Linus Torvalds 已提交
292
 *
A
Adrian Bunk 已提交
293
 * OTOH the cpuarrays can contain lots of objects,
L
Linus Torvalds 已提交
294 295
 * which could lock up otherwise freeable slabs.
 */
296 297
#define REAPTIMEOUT_AC		(2*HZ)
#define REAPTIMEOUT_NODE	(4*HZ)
L
Linus Torvalds 已提交
298 299 300 301 302 303

#if STATS
#define	STATS_INC_ACTIVE(x)	((x)->num_active++)
#define	STATS_DEC_ACTIVE(x)	((x)->num_active--)
#define	STATS_INC_ALLOCED(x)	((x)->num_allocations++)
#define	STATS_INC_GROWN(x)	((x)->grown++)
304
#define	STATS_ADD_REAPED(x,y)	((x)->reaped += (y))
A
Andrew Morton 已提交
305 306 307 308 309
#define	STATS_SET_HIGH(x)						\
	do {								\
		if ((x)->num_active > (x)->high_mark)			\
			(x)->high_mark = (x)->num_active;		\
	} while (0)
L
Linus Torvalds 已提交
310 311
#define	STATS_INC_ERR(x)	((x)->errors++)
#define	STATS_INC_NODEALLOCS(x)	((x)->node_allocs++)
312
#define	STATS_INC_NODEFREES(x)	((x)->node_frees++)
313
#define STATS_INC_ACOVERFLOW(x)   ((x)->node_overflow++)
A
Andrew Morton 已提交
314 315 316 317 318
#define	STATS_SET_FREEABLE(x, i)					\
	do {								\
		if ((x)->max_freeable < i)				\
			(x)->max_freeable = i;				\
	} while (0)
L
Linus Torvalds 已提交
319 320 321 322 323 324 325 326 327
#define STATS_INC_ALLOCHIT(x)	atomic_inc(&(x)->allochit)
#define STATS_INC_ALLOCMISS(x)	atomic_inc(&(x)->allocmiss)
#define STATS_INC_FREEHIT(x)	atomic_inc(&(x)->freehit)
#define STATS_INC_FREEMISS(x)	atomic_inc(&(x)->freemiss)
#else
#define	STATS_INC_ACTIVE(x)	do { } while (0)
#define	STATS_DEC_ACTIVE(x)	do { } while (0)
#define	STATS_INC_ALLOCED(x)	do { } while (0)
#define	STATS_INC_GROWN(x)	do { } while (0)
328
#define	STATS_ADD_REAPED(x,y)	do { (void)(y); } while (0)
L
Linus Torvalds 已提交
329 330 331
#define	STATS_SET_HIGH(x)	do { } while (0)
#define	STATS_INC_ERR(x)	do { } while (0)
#define	STATS_INC_NODEALLOCS(x)	do { } while (0)
332
#define	STATS_INC_NODEFREES(x)	do { } while (0)
333
#define STATS_INC_ACOVERFLOW(x)   do { } while (0)
A
Andrew Morton 已提交
334
#define	STATS_SET_FREEABLE(x, i) do { } while (0)
L
Linus Torvalds 已提交
335 336 337 338 339 340 341 342
#define STATS_INC_ALLOCHIT(x)	do { } while (0)
#define STATS_INC_ALLOCMISS(x)	do { } while (0)
#define STATS_INC_FREEHIT(x)	do { } while (0)
#define STATS_INC_FREEMISS(x)	do { } while (0)
#endif

#if DEBUG

A
Andrew Morton 已提交
343 344
/*
 * memory layout of objects:
L
Linus Torvalds 已提交
345
 * 0		: objp
346
 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
L
Linus Torvalds 已提交
347 348
 * 		the end of an object is aligned with the end of the real
 * 		allocation. Catches writes behind the end of the allocation.
349
 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
L
Linus Torvalds 已提交
350
 * 		redzone word.
351
 * cachep->obj_offset: The real object.
352 353
 * cachep->size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
 * cachep->size - 1* BYTES_PER_WORD: last caller address
A
Andrew Morton 已提交
354
 *					[BYTES_PER_WORD long]
L
Linus Torvalds 已提交
355
 */
356
static int obj_offset(struct kmem_cache *cachep)
L
Linus Torvalds 已提交
357
{
358
	return cachep->obj_offset;
L
Linus Torvalds 已提交
359 360
}

361
static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
L
Linus Torvalds 已提交
362 363
{
	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
364 365
	return (unsigned long long*) (objp + obj_offset(cachep) -
				      sizeof(unsigned long long));
L
Linus Torvalds 已提交
366 367
}

368
static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
L
Linus Torvalds 已提交
369 370 371
{
	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
	if (cachep->flags & SLAB_STORE_USER)
372
		return (unsigned long long *)(objp + cachep->size -
373
					      sizeof(unsigned long long) -
D
David Woodhouse 已提交
374
					      REDZONE_ALIGN);
375
	return (unsigned long long *) (objp + cachep->size -
376
				       sizeof(unsigned long long));
L
Linus Torvalds 已提交
377 378
}

379
static void **dbg_userword(struct kmem_cache *cachep, void *objp)
L
Linus Torvalds 已提交
380 381
{
	BUG_ON(!(cachep->flags & SLAB_STORE_USER));
382
	return (void **)(objp + cachep->size - BYTES_PER_WORD);
L
Linus Torvalds 已提交
383 384 385 386
}

#else

387
#define obj_offset(x)			0
388 389
#define dbg_redzone1(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
#define dbg_redzone2(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
L
Linus Torvalds 已提交
390 391 392 393
#define dbg_userword(cachep, objp)	({BUG(); (void **)NULL;})

#endif

394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426
#define OBJECT_FREE (0)
#define OBJECT_ACTIVE (1)

#ifdef CONFIG_DEBUG_SLAB_LEAK

static void set_obj_status(struct page *page, int idx, int val)
{
	int freelist_size;
	char *status;
	struct kmem_cache *cachep = page->slab_cache;

	freelist_size = cachep->num * sizeof(freelist_idx_t);
	status = (char *)page->freelist + freelist_size;
	status[idx] = val;
}

static inline unsigned int get_obj_status(struct page *page, int idx)
{
	int freelist_size;
	char *status;
	struct kmem_cache *cachep = page->slab_cache;

	freelist_size = cachep->num * sizeof(freelist_idx_t);
	status = (char *)page->freelist + freelist_size;

	return status[idx];
}

#else
static inline void set_obj_status(struct page *page, int idx, int val) {}

#endif

L
Linus Torvalds 已提交
427
/*
428 429
 * Do not go above this order unless 0 objects fit into the slab or
 * overridden on the command line.
L
Linus Torvalds 已提交
430
 */
431 432 433
#define	SLAB_MAX_ORDER_HI	1
#define	SLAB_MAX_ORDER_LO	0
static int slab_max_order = SLAB_MAX_ORDER_LO;
434
static bool slab_max_order_set __initdata;
L
Linus Torvalds 已提交
435

436 437
static inline struct kmem_cache *virt_to_cache(const void *obj)
{
438
	struct page *page = virt_to_head_page(obj);
C
Christoph Lameter 已提交
439
	return page->slab_cache;
440 441
}

442
static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
443 444
				 unsigned int idx)
{
445
	return page->s_mem + cache->size * idx;
446 447
}

448
/*
449 450 451
 * We want to avoid an expensive divide : (offset / cache->size)
 *   Using the fact that size is a constant for a particular cache,
 *   we can replace (offset / cache->size) by
452 453 454
 *   reciprocal_divide(offset, cache->reciprocal_buffer_size)
 */
static inline unsigned int obj_to_index(const struct kmem_cache *cache,
455
					const struct page *page, void *obj)
456
{
457
	u32 offset = (obj - page->s_mem);
458
	return reciprocal_divide(offset, cache->reciprocal_buffer_size);
459 460
}

L
Linus Torvalds 已提交
461
static struct arraycache_init initarray_generic =
P
Pekka Enberg 已提交
462
    { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
L
Linus Torvalds 已提交
463 464

/* internal cache of cache description objs */
465
static struct kmem_cache kmem_cache_boot = {
P
Pekka Enberg 已提交
466 467 468
	.batchcount = 1,
	.limit = BOOT_CPUCACHE_ENTRIES,
	.shared = 1,
469
	.size = sizeof(struct kmem_cache),
P
Pekka Enberg 已提交
470
	.name = "kmem_cache",
L
Linus Torvalds 已提交
471 472
};

473 474
#define BAD_ALIEN_MAGIC 0x01020304ul

475
static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
L
Linus Torvalds 已提交
476

477
static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
L
Linus Torvalds 已提交
478 479 480 481
{
	return cachep->array[smp_processor_id()];
}

482 483 484 485 486 487 488 489 490 491 492 493 494 495
static size_t calculate_freelist_size(int nr_objs, size_t align)
{
	size_t freelist_size;

	freelist_size = nr_objs * sizeof(freelist_idx_t);
	if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK))
		freelist_size += nr_objs * sizeof(char);

	if (align)
		freelist_size = ALIGN(freelist_size, align);

	return freelist_size;
}

496 497
static int calculate_nr_objs(size_t slab_size, size_t buffer_size,
				size_t idx_size, size_t align)
L
Linus Torvalds 已提交
498
{
499
	int nr_objs;
500
	size_t remained_size;
501
	size_t freelist_size;
502
	int extra_space = 0;
503

504 505
	if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK))
		extra_space = sizeof(char);
506 507 508 509 510 511 512 513
	/*
	 * Ignore padding for the initial guess. The padding
	 * is at most @align-1 bytes, and @buffer_size is at
	 * least @align. In the worst case, this result will
	 * be one greater than the number of objects that fit
	 * into the memory allocation when taking the padding
	 * into account.
	 */
514
	nr_objs = slab_size / (buffer_size + idx_size + extra_space);
515 516 517 518 519

	/*
	 * This calculated number will be either the right
	 * amount, or one greater than what we want.
	 */
520 521 522
	remained_size = slab_size - nr_objs * buffer_size;
	freelist_size = calculate_freelist_size(nr_objs, align);
	if (remained_size < freelist_size)
523 524 525
		nr_objs--;

	return nr_objs;
526
}
L
Linus Torvalds 已提交
527

A
Andrew Morton 已提交
528 529 530
/*
 * Calculate the number of objects and left-over bytes for a given buffer size.
 */
531 532 533 534 535 536 537
static void cache_estimate(unsigned long gfporder, size_t buffer_size,
			   size_t align, int flags, size_t *left_over,
			   unsigned int *num)
{
	int nr_objs;
	size_t mgmt_size;
	size_t slab_size = PAGE_SIZE << gfporder;
L
Linus Torvalds 已提交
538

539 540 541 542 543
	/*
	 * The slab management structure can be either off the slab or
	 * on it. For the latter case, the memory allocated for a
	 * slab is used for:
	 *
J
Joonsoo Kim 已提交
544
	 * - One unsigned int for each object
545 546 547 548 549 550 551 552 553 554 555 556 557
	 * - Padding to respect alignment of @align
	 * - @buffer_size bytes for each object
	 *
	 * If the slab management structure is off the slab, then the
	 * alignment will already be calculated into the size. Because
	 * the slabs are all pages aligned, the objects will be at the
	 * correct alignment when allocated.
	 */
	if (flags & CFLGS_OFF_SLAB) {
		mgmt_size = 0;
		nr_objs = slab_size / buffer_size;

	} else {
558
		nr_objs = calculate_nr_objs(slab_size, buffer_size,
559
					sizeof(freelist_idx_t), align);
560
		mgmt_size = calculate_freelist_size(nr_objs, align);
561 562 563
	}
	*num = nr_objs;
	*left_over = slab_size - nr_objs*buffer_size - mgmt_size;
L
Linus Torvalds 已提交
564 565
}

566
#if DEBUG
567
#define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
L
Linus Torvalds 已提交
568

A
Andrew Morton 已提交
569 570
static void __slab_error(const char *function, struct kmem_cache *cachep,
			char *msg)
L
Linus Torvalds 已提交
571 572
{
	printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
P
Pekka Enberg 已提交
573
	       function, cachep->name, msg);
L
Linus Torvalds 已提交
574
	dump_stack();
575
	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
L
Linus Torvalds 已提交
576
}
577
#endif
L
Linus Torvalds 已提交
578

579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594
/*
 * By default on NUMA we use alien caches to stage the freeing of
 * objects allocated from other nodes. This causes massive memory
 * inefficiencies when using fake NUMA setup to split memory into a
 * large number of small nodes, so it can be disabled on the command
 * line
  */

static int use_alien_caches __read_mostly = 1;
static int __init noaliencache_setup(char *s)
{
	use_alien_caches = 0;
	return 1;
}
__setup("noaliencache", noaliencache_setup);

595 596 597 598 599 600 601 602 603 604 605
static int __init slab_max_order_setup(char *str)
{
	get_option(&str, &slab_max_order);
	slab_max_order = slab_max_order < 0 ? 0 :
				min(slab_max_order, MAX_ORDER - 1);
	slab_max_order_set = true;

	return 1;
}
__setup("slab_max_order=", slab_max_order_setup);

606 607 608 609 610 611 612
#ifdef CONFIG_NUMA
/*
 * Special reaping functions for NUMA systems called from cache_reap().
 * These take care of doing round robin flushing of alien caches (containing
 * objects freed on different nodes from which they were allocated) and the
 * flushing of remote pcps by calling drain_node_pages.
 */
613
static DEFINE_PER_CPU(unsigned long, slab_reap_node);
614 615 616 617 618

static void init_reap_node(int cpu)
{
	int node;

619
	node = next_node(cpu_to_mem(cpu), node_online_map);
620
	if (node == MAX_NUMNODES)
621
		node = first_node(node_online_map);
622

623
	per_cpu(slab_reap_node, cpu) = node;
624 625 626 627
}

static void next_reap_node(void)
{
628
	int node = __this_cpu_read(slab_reap_node);
629 630 631 632

	node = next_node(node, node_online_map);
	if (unlikely(node >= MAX_NUMNODES))
		node = first_node(node_online_map);
633
	__this_cpu_write(slab_reap_node, node);
634 635 636 637 638 639 640
}

#else
#define init_reap_node(cpu) do { } while (0)
#define next_reap_node(void) do { } while (0)
#endif

L
Linus Torvalds 已提交
641 642 643 644 645 646 647
/*
 * Initiate the reap timer running on the target CPU.  We run at around 1 to 2Hz
 * via the workqueue/eventd.
 * Add the CPU number into the expiration time to minimize the possibility of
 * the CPUs getting into lockstep and contending for the global cache chain
 * lock.
 */
648
static void start_cpu_timer(int cpu)
L
Linus Torvalds 已提交
649
{
650
	struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
L
Linus Torvalds 已提交
651 652 653 654 655 656

	/*
	 * When this gets called from do_initcalls via cpucache_init(),
	 * init_workqueues() has already run, so keventd will be setup
	 * at that time.
	 */
657
	if (keventd_up() && reap_work->work.func == NULL) {
658
		init_reap_node(cpu);
659
		INIT_DEFERRABLE_WORK(reap_work, cache_reap);
660 661
		schedule_delayed_work_on(cpu, reap_work,
					__round_jiffies_relative(HZ, cpu));
L
Linus Torvalds 已提交
662 663 664
	}
}

665
static void init_arraycache(struct array_cache *ac, int limit, int batch)
L
Linus Torvalds 已提交
666
{
667 668
	/*
	 * The array_cache structures contain pointers to free object.
L
Lucas De Marchi 已提交
669
	 * However, when such objects are allocated or transferred to another
670 671 672 673
	 * cache the pointers are not cleared and they could be counted as
	 * valid references during a kmemleak scan. Therefore, kmemleak must
	 * not scan such objects.
	 */
674 675 676 677 678 679
	kmemleak_no_scan(ac);
	if (ac) {
		ac->avail = 0;
		ac->limit = limit;
		ac->batchcount = batch;
		ac->touched = 0;
L
Linus Torvalds 已提交
680
	}
681 682 683 684 685
}

static struct array_cache *alloc_arraycache(int node, int entries,
					    int batchcount, gfp_t gfp)
{
686
	size_t memsize = sizeof(void *) * entries + sizeof(struct array_cache);
687 688 689 690 691
	struct array_cache *ac = NULL;

	ac = kmalloc_node(memsize, gfp, node);
	init_arraycache(ac, entries, batchcount);
	return ac;
L
Linus Torvalds 已提交
692 693
}

694
static inline bool is_slab_pfmemalloc(struct page *page)
695 696 697 698 699 700 701 702
{
	return PageSlabPfmemalloc(page);
}

/* Clears pfmemalloc_active if no slabs have pfmalloc set */
static void recheck_pfmemalloc_active(struct kmem_cache *cachep,
						struct array_cache *ac)
{
703
	struct kmem_cache_node *n = get_node(cachep, numa_mem_id());
704
	struct page *page;
705 706 707 708 709
	unsigned long flags;

	if (!pfmemalloc_active)
		return;

710
	spin_lock_irqsave(&n->list_lock, flags);
711 712
	list_for_each_entry(page, &n->slabs_full, lru)
		if (is_slab_pfmemalloc(page))
713 714
			goto out;

715 716
	list_for_each_entry(page, &n->slabs_partial, lru)
		if (is_slab_pfmemalloc(page))
717 718
			goto out;

719 720
	list_for_each_entry(page, &n->slabs_free, lru)
		if (is_slab_pfmemalloc(page))
721 722 723 724
			goto out;

	pfmemalloc_active = false;
out:
725
	spin_unlock_irqrestore(&n->list_lock, flags);
726 727
}

728
static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac,
729 730 731 732 733 734 735
						gfp_t flags, bool force_refill)
{
	int i;
	void *objp = ac->entry[--ac->avail];

	/* Ensure the caller is allowed to use objects from PFMEMALLOC slab */
	if (unlikely(is_obj_pfmemalloc(objp))) {
736
		struct kmem_cache_node *n;
737 738 739 740 741 742 743

		if (gfp_pfmemalloc_allowed(flags)) {
			clear_obj_pfmemalloc(&objp);
			return objp;
		}

		/* The caller cannot use PFMEMALLOC objects, find another one */
744
		for (i = 0; i < ac->avail; i++) {
745 746 747 748 749 750 751 752 753 754 755 756 757
			/* If a !PFMEMALLOC object is found, swap them */
			if (!is_obj_pfmemalloc(ac->entry[i])) {
				objp = ac->entry[i];
				ac->entry[i] = ac->entry[ac->avail];
				ac->entry[ac->avail] = objp;
				return objp;
			}
		}

		/*
		 * If there are empty slabs on the slabs_free list and we are
		 * being forced to refill the cache, mark this one !pfmemalloc.
		 */
758
		n = get_node(cachep, numa_mem_id());
759
		if (!list_empty(&n->slabs_free) && force_refill) {
760
			struct page *page = virt_to_head_page(objp);
761
			ClearPageSlabPfmemalloc(page);
762 763 764 765 766 767 768 769 770 771 772 773 774
			clear_obj_pfmemalloc(&objp);
			recheck_pfmemalloc_active(cachep, ac);
			return objp;
		}

		/* No !PFMEMALLOC objects available */
		ac->avail++;
		objp = NULL;
	}

	return objp;
}

775 776 777 778 779 780 781 782 783 784 785 786 787
static inline void *ac_get_obj(struct kmem_cache *cachep,
			struct array_cache *ac, gfp_t flags, bool force_refill)
{
	void *objp;

	if (unlikely(sk_memalloc_socks()))
		objp = __ac_get_obj(cachep, ac, flags, force_refill);
	else
		objp = ac->entry[--ac->avail];

	return objp;
}

J
Joonsoo Kim 已提交
788 789
static noinline void *__ac_put_obj(struct kmem_cache *cachep,
			struct array_cache *ac, void *objp)
790 791 792
{
	if (unlikely(pfmemalloc_active)) {
		/* Some pfmemalloc slabs exist, check if this is one */
793
		struct page *page = virt_to_head_page(objp);
794 795 796 797
		if (PageSlabPfmemalloc(page))
			set_obj_pfmemalloc(&objp);
	}

798 799 800 801 802 803 804 805 806
	return objp;
}

static inline void ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac,
								void *objp)
{
	if (unlikely(sk_memalloc_socks()))
		objp = __ac_put_obj(cachep, ac, objp);

807 808 809
	ac->entry[ac->avail++] = objp;
}

810 811 812 813 814 815 816 817 818 819
/*
 * Transfer objects in one arraycache to another.
 * Locking must be handled by the caller.
 *
 * Return the number of entries transferred.
 */
static int transfer_objects(struct array_cache *to,
		struct array_cache *from, unsigned int max)
{
	/* Figure out how many entries to transfer */
820
	int nr = min3(from->avail, max, to->limit - to->avail);
821 822 823 824 825 826 827 828 829 830 831 832

	if (!nr)
		return 0;

	memcpy(to->entry + to->avail, from->entry + from->avail -nr,
			sizeof(void *) *nr);

	from->avail -= nr;
	to->avail += nr;
	return nr;
}

833 834 835
#ifndef CONFIG_NUMA

#define drain_alien_cache(cachep, alien) do { } while (0)
836
#define reap_alien(cachep, n) do { } while (0)
837

J
Joonsoo Kim 已提交
838 839
static inline struct alien_cache **alloc_alien_cache(int node,
						int limit, gfp_t gfp)
840
{
841
	return (struct alien_cache **)BAD_ALIEN_MAGIC;
842 843
}

J
Joonsoo Kim 已提交
844
static inline void free_alien_cache(struct alien_cache **ac_ptr)
845 846 847 848 849 850 851 852 853 854 855 856 857 858
{
}

static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
{
	return 0;
}

static inline void *alternate_node_alloc(struct kmem_cache *cachep,
		gfp_t flags)
{
	return NULL;
}

859
static inline void *____cache_alloc_node(struct kmem_cache *cachep,
860 861 862 863 864 865 866
		 gfp_t flags, int nodeid)
{
	return NULL;
}

#else	/* CONFIG_NUMA */

867
static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
868
static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
869

J
Joonsoo Kim 已提交
870 871 872
static struct alien_cache *__alloc_alien_cache(int node, int entries,
						int batch, gfp_t gfp)
{
873
	size_t memsize = sizeof(void *) * entries + sizeof(struct alien_cache);
J
Joonsoo Kim 已提交
874 875 876 877
	struct alien_cache *alc = NULL;

	alc = kmalloc_node(memsize, gfp, node);
	init_arraycache(&alc->ac, entries, batch);
878
	spin_lock_init(&alc->lock);
J
Joonsoo Kim 已提交
879 880 881 882
	return alc;
}

static struct alien_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
883
{
J
Joonsoo Kim 已提交
884
	struct alien_cache **alc_ptr;
885
	size_t memsize = sizeof(void *) * nr_node_ids;
886 887 888 889
	int i;

	if (limit > 1)
		limit = 12;
J
Joonsoo Kim 已提交
890 891 892 893 894 895 896 897 898 899 900 901 902
	alc_ptr = kzalloc_node(memsize, gfp, node);
	if (!alc_ptr)
		return NULL;

	for_each_node(i) {
		if (i == node || !node_online(i))
			continue;
		alc_ptr[i] = __alloc_alien_cache(node, limit, 0xbaadf00d, gfp);
		if (!alc_ptr[i]) {
			for (i--; i >= 0; i--)
				kfree(alc_ptr[i]);
			kfree(alc_ptr);
			return NULL;
903 904
		}
	}
J
Joonsoo Kim 已提交
905
	return alc_ptr;
906 907
}

J
Joonsoo Kim 已提交
908
static void free_alien_cache(struct alien_cache **alc_ptr)
909 910 911
{
	int i;

J
Joonsoo Kim 已提交
912
	if (!alc_ptr)
913 914
		return;
	for_each_node(i)
J
Joonsoo Kim 已提交
915 916
	    kfree(alc_ptr[i]);
	kfree(alc_ptr);
917 918
}

919
static void __drain_alien_cache(struct kmem_cache *cachep,
920 921
				struct array_cache *ac, int node,
				struct list_head *list)
922
{
923
	struct kmem_cache_node *n = get_node(cachep, node);
924 925

	if (ac->avail) {
926
		spin_lock(&n->list_lock);
927 928 929 930 931
		/*
		 * Stuff objects into the remote nodes shared array first.
		 * That way we could avoid the overhead of putting the objects
		 * into the free lists and getting them back later.
		 */
932 933
		if (n->shared)
			transfer_objects(n->shared, ac, ac->limit);
934

935
		free_block(cachep, ac->entry, ac->avail, node, list);
936
		ac->avail = 0;
937
		spin_unlock(&n->list_lock);
938 939 940
	}
}

941 942 943
/*
 * Called from cache_reap() to regularly drain alien caches round robin.
 */
944
static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n)
945
{
946
	int node = __this_cpu_read(slab_reap_node);
947

948
	if (n->alien) {
J
Joonsoo Kim 已提交
949 950 951 952 953
		struct alien_cache *alc = n->alien[node];
		struct array_cache *ac;

		if (alc) {
			ac = &alc->ac;
954
			if (ac->avail && spin_trylock_irq(&alc->lock)) {
955 956 957
				LIST_HEAD(list);

				__drain_alien_cache(cachep, ac, node, &list);
958
				spin_unlock_irq(&alc->lock);
959
				slabs_destroy(cachep, &list);
J
Joonsoo Kim 已提交
960
			}
961 962 963 964
		}
	}
}

A
Andrew Morton 已提交
965
static void drain_alien_cache(struct kmem_cache *cachep,
J
Joonsoo Kim 已提交
966
				struct alien_cache **alien)
967
{
P
Pekka Enberg 已提交
968
	int i = 0;
J
Joonsoo Kim 已提交
969
	struct alien_cache *alc;
970 971 972 973
	struct array_cache *ac;
	unsigned long flags;

	for_each_online_node(i) {
J
Joonsoo Kim 已提交
974 975
		alc = alien[i];
		if (alc) {
976 977
			LIST_HEAD(list);

J
Joonsoo Kim 已提交
978
			ac = &alc->ac;
979
			spin_lock_irqsave(&alc->lock, flags);
980
			__drain_alien_cache(cachep, ac, i, &list);
981
			spin_unlock_irqrestore(&alc->lock, flags);
982
			slabs_destroy(cachep, &list);
983 984 985
		}
	}
}
986

987
static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
988
{
J
Joonsoo Kim 已提交
989
	int nodeid = page_to_nid(virt_to_page(objp));
990
	struct kmem_cache_node *n;
J
Joonsoo Kim 已提交
991 992
	struct alien_cache *alien = NULL;
	struct array_cache *ac;
P
Pekka Enberg 已提交
993
	int node;
994
	LIST_HEAD(list);
P
Pekka Enberg 已提交
995

996
	node = numa_mem_id();
997 998 999 1000 1001

	/*
	 * Make sure we are not freeing a object from another node to the array
	 * cache on this cpu.
	 */
J
Joonsoo Kim 已提交
1002
	if (likely(nodeid == node))
1003 1004
		return 0;

1005
	n = get_node(cachep, node);
1006
	STATS_INC_NODEFREES(cachep);
1007 1008
	if (n->alien && n->alien[nodeid]) {
		alien = n->alien[nodeid];
J
Joonsoo Kim 已提交
1009
		ac = &alien->ac;
1010
		spin_lock(&alien->lock);
J
Joonsoo Kim 已提交
1011
		if (unlikely(ac->avail == ac->limit)) {
1012
			STATS_INC_ACOVERFLOW(cachep);
1013
			__drain_alien_cache(cachep, ac, nodeid, &list);
1014
		}
J
Joonsoo Kim 已提交
1015
		ac_put_obj(cachep, ac, objp);
1016
		spin_unlock(&alien->lock);
1017
		slabs_destroy(cachep, &list);
1018
	} else {
1019 1020
		n = get_node(cachep, nodeid);
		spin_lock(&n->list_lock);
1021
		free_block(cachep, &objp, 1, nodeid, &list);
1022
		spin_unlock(&n->list_lock);
1023
		slabs_destroy(cachep, &list);
1024 1025 1026
	}
	return 1;
}
1027 1028
#endif

1029
/*
1030
 * Allocates and initializes node for a node on each slab cache, used for
1031
 * either memory or cpu hotplug.  If memory is being hot-added, the kmem_cache_node
1032
 * will be allocated off-node since memory is not yet online for the new node.
1033
 * When hotplugging memory or a cpu, existing node are not replaced if
1034 1035
 * already in use.
 *
1036
 * Must hold slab_mutex.
1037
 */
1038
static int init_cache_node_node(int node)
1039 1040
{
	struct kmem_cache *cachep;
1041
	struct kmem_cache_node *n;
1042
	const size_t memsize = sizeof(struct kmem_cache_node);
1043

1044
	list_for_each_entry(cachep, &slab_caches, list) {
1045
		/*
1046
		 * Set up the kmem_cache_node for cpu before we can
1047 1048 1049
		 * begin anything. Make sure some other cpu on this
		 * node has not already allocated this
		 */
1050 1051
		n = get_node(cachep, node);
		if (!n) {
1052 1053
			n = kmalloc_node(memsize, GFP_KERNEL, node);
			if (!n)
1054
				return -ENOMEM;
1055
			kmem_cache_node_init(n);
1056 1057
			n->next_reap = jiffies + REAPTIMEOUT_NODE +
			    ((unsigned long)cachep) % REAPTIMEOUT_NODE;
1058 1059

			/*
1060 1061
			 * The kmem_cache_nodes don't come and go as CPUs
			 * come and go.  slab_mutex is sufficient
1062 1063
			 * protection here.
			 */
1064
			cachep->node[node] = n;
1065 1066
		}

1067 1068
		spin_lock_irq(&n->list_lock);
		n->free_limit =
1069 1070
			(1 + nr_cpus_node(node)) *
			cachep->batchcount + cachep->num;
1071
		spin_unlock_irq(&n->list_lock);
1072 1073 1074 1075
	}
	return 0;
}

1076 1077 1078 1079 1080 1081
static inline int slabs_tofree(struct kmem_cache *cachep,
						struct kmem_cache_node *n)
{
	return (n->free_objects + cachep->num - 1) / cachep->num;
}

1082
static void cpuup_canceled(long cpu)
1083 1084
{
	struct kmem_cache *cachep;
1085
	struct kmem_cache_node *n = NULL;
1086
	int node = cpu_to_mem(cpu);
1087
	const struct cpumask *mask = cpumask_of_node(node);
1088

1089
	list_for_each_entry(cachep, &slab_caches, list) {
1090 1091
		struct array_cache *nc;
		struct array_cache *shared;
J
Joonsoo Kim 已提交
1092
		struct alien_cache **alien;
1093
		LIST_HEAD(list);
1094 1095 1096 1097

		/* cpu is dead; no one can alloc from it. */
		nc = cachep->array[cpu];
		cachep->array[cpu] = NULL;
1098
		n = get_node(cachep, node);
1099

1100
		if (!n)
1101 1102
			goto free_array_cache;

1103
		spin_lock_irq(&n->list_lock);
1104

1105 1106
		/* Free limit for this kmem_cache_node */
		n->free_limit -= cachep->batchcount;
1107
		if (nc)
1108
			free_block(cachep, nc->entry, nc->avail, node, &list);
1109

1110
		if (!cpumask_empty(mask)) {
1111
			spin_unlock_irq(&n->list_lock);
1112 1113 1114
			goto free_array_cache;
		}

1115
		shared = n->shared;
1116 1117
		if (shared) {
			free_block(cachep, shared->entry,
1118
				   shared->avail, node, &list);
1119
			n->shared = NULL;
1120 1121
		}

1122 1123
		alien = n->alien;
		n->alien = NULL;
1124

1125
		spin_unlock_irq(&n->list_lock);
1126 1127 1128 1129 1130 1131 1132

		kfree(shared);
		if (alien) {
			drain_alien_cache(cachep, alien);
			free_alien_cache(alien);
		}
free_array_cache:
1133
		slabs_destroy(cachep, &list);
1134 1135 1136 1137 1138 1139 1140
		kfree(nc);
	}
	/*
	 * In the previous loop, all the objects were freed to
	 * the respective cache's slabs,  now we can go ahead and
	 * shrink each nodelist to its limit.
	 */
1141
	list_for_each_entry(cachep, &slab_caches, list) {
1142
		n = get_node(cachep, node);
1143
		if (!n)
1144
			continue;
1145
		drain_freelist(cachep, n, slabs_tofree(cachep, n));
1146 1147 1148
	}
}

1149
static int cpuup_prepare(long cpu)
L
Linus Torvalds 已提交
1150
{
1151
	struct kmem_cache *cachep;
1152
	struct kmem_cache_node *n = NULL;
1153
	int node = cpu_to_mem(cpu);
1154
	int err;
L
Linus Torvalds 已提交
1155

1156 1157 1158 1159
	/*
	 * We need to do this right in the beginning since
	 * alloc_arraycache's are going to use this list.
	 * kmalloc_node allows us to add the slab to the right
1160
	 * kmem_cache_node and not this cpu's kmem_cache_node
1161
	 */
1162
	err = init_cache_node_node(node);
1163 1164
	if (err < 0)
		goto bad;
1165 1166 1167 1168 1169

	/*
	 * Now we can go ahead with allocating the shared arrays and
	 * array caches
	 */
1170
	list_for_each_entry(cachep, &slab_caches, list) {
1171 1172
		struct array_cache *nc;
		struct array_cache *shared = NULL;
J
Joonsoo Kim 已提交
1173
		struct alien_cache **alien = NULL;
1174 1175

		nc = alloc_arraycache(node, cachep->limit,
1176
					cachep->batchcount, GFP_KERNEL);
1177 1178 1179 1180 1181
		if (!nc)
			goto bad;
		if (cachep->shared) {
			shared = alloc_arraycache(node,
				cachep->shared * cachep->batchcount,
1182
				0xbaadf00d, GFP_KERNEL);
1183 1184
			if (!shared) {
				kfree(nc);
L
Linus Torvalds 已提交
1185
				goto bad;
1186
			}
1187 1188
		}
		if (use_alien_caches) {
1189
			alien = alloc_alien_cache(node, cachep->limit, GFP_KERNEL);
1190 1191 1192
			if (!alien) {
				kfree(shared);
				kfree(nc);
1193
				goto bad;
1194
			}
1195 1196
		}
		cachep->array[cpu] = nc;
1197
		n = get_node(cachep, node);
1198
		BUG_ON(!n);
1199

1200 1201
		spin_lock_irq(&n->list_lock);
		if (!n->shared) {
1202 1203 1204 1205
			/*
			 * We are serialised from CPU_DEAD or
			 * CPU_UP_CANCELLED by the cpucontrol lock
			 */
1206
			n->shared = shared;
1207 1208
			shared = NULL;
		}
1209
#ifdef CONFIG_NUMA
1210 1211
		if (!n->alien) {
			n->alien = alien;
1212
			alien = NULL;
L
Linus Torvalds 已提交
1213
		}
1214
#endif
1215
		spin_unlock_irq(&n->list_lock);
1216 1217 1218
		kfree(shared);
		free_alien_cache(alien);
	}
1219

1220 1221
	return 0;
bad:
1222
	cpuup_canceled(cpu);
1223 1224 1225
	return -ENOMEM;
}

1226
static int cpuup_callback(struct notifier_block *nfb,
1227 1228 1229 1230 1231 1232 1233 1234
				    unsigned long action, void *hcpu)
{
	long cpu = (long)hcpu;
	int err = 0;

	switch (action) {
	case CPU_UP_PREPARE:
	case CPU_UP_PREPARE_FROZEN:
1235
		mutex_lock(&slab_mutex);
1236
		err = cpuup_prepare(cpu);
1237
		mutex_unlock(&slab_mutex);
L
Linus Torvalds 已提交
1238 1239
		break;
	case CPU_ONLINE:
1240
	case CPU_ONLINE_FROZEN:
L
Linus Torvalds 已提交
1241 1242 1243
		start_cpu_timer(cpu);
		break;
#ifdef CONFIG_HOTPLUG_CPU
1244
  	case CPU_DOWN_PREPARE:
1245
  	case CPU_DOWN_PREPARE_FROZEN:
1246
		/*
1247
		 * Shutdown cache reaper. Note that the slab_mutex is
1248 1249 1250 1251
		 * held so that if cache_reap() is invoked it cannot do
		 * anything expensive but will only modify reap_work
		 * and reschedule the timer.
		*/
1252
		cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu));
1253
		/* Now the cache_reaper is guaranteed to be not running. */
1254
		per_cpu(slab_reap_work, cpu).work.func = NULL;
1255 1256
  		break;
  	case CPU_DOWN_FAILED:
1257
  	case CPU_DOWN_FAILED_FROZEN:
1258 1259
		start_cpu_timer(cpu);
  		break;
L
Linus Torvalds 已提交
1260
	case CPU_DEAD:
1261
	case CPU_DEAD_FROZEN:
1262 1263
		/*
		 * Even if all the cpus of a node are down, we don't free the
1264
		 * kmem_cache_node of any cache. This to avoid a race between
1265
		 * cpu_down, and a kmalloc allocation from another cpu for
1266
		 * memory from the node of the cpu going down.  The node
1267 1268 1269
		 * structure is usually allocated from kmem_cache_create() and
		 * gets destroyed at kmem_cache_destroy().
		 */
S
Simon Arlott 已提交
1270
		/* fall through */
1271
#endif
L
Linus Torvalds 已提交
1272
	case CPU_UP_CANCELED:
1273
	case CPU_UP_CANCELED_FROZEN:
1274
		mutex_lock(&slab_mutex);
1275
		cpuup_canceled(cpu);
1276
		mutex_unlock(&slab_mutex);
L
Linus Torvalds 已提交
1277 1278
		break;
	}
1279
	return notifier_from_errno(err);
L
Linus Torvalds 已提交
1280 1281
}

1282
static struct notifier_block cpucache_notifier = {
1283 1284
	&cpuup_callback, NULL, 0
};
L
Linus Torvalds 已提交
1285

1286 1287 1288 1289 1290 1291
#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
/*
 * Drains freelist for a node on each slab cache, used for memory hot-remove.
 * Returns -EBUSY if all objects cannot be drained so that the node is not
 * removed.
 *
1292
 * Must hold slab_mutex.
1293
 */
1294
static int __meminit drain_cache_node_node(int node)
1295 1296 1297 1298
{
	struct kmem_cache *cachep;
	int ret = 0;

1299
	list_for_each_entry(cachep, &slab_caches, list) {
1300
		struct kmem_cache_node *n;
1301

1302
		n = get_node(cachep, node);
1303
		if (!n)
1304 1305
			continue;

1306
		drain_freelist(cachep, n, slabs_tofree(cachep, n));
1307

1308 1309
		if (!list_empty(&n->slabs_full) ||
		    !list_empty(&n->slabs_partial)) {
1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329
			ret = -EBUSY;
			break;
		}
	}
	return ret;
}

static int __meminit slab_memory_callback(struct notifier_block *self,
					unsigned long action, void *arg)
{
	struct memory_notify *mnb = arg;
	int ret = 0;
	int nid;

	nid = mnb->status_change_nid;
	if (nid < 0)
		goto out;

	switch (action) {
	case MEM_GOING_ONLINE:
1330
		mutex_lock(&slab_mutex);
1331
		ret = init_cache_node_node(nid);
1332
		mutex_unlock(&slab_mutex);
1333 1334
		break;
	case MEM_GOING_OFFLINE:
1335
		mutex_lock(&slab_mutex);
1336
		ret = drain_cache_node_node(nid);
1337
		mutex_unlock(&slab_mutex);
1338 1339 1340 1341 1342 1343 1344 1345
		break;
	case MEM_ONLINE:
	case MEM_OFFLINE:
	case MEM_CANCEL_ONLINE:
	case MEM_CANCEL_OFFLINE:
		break;
	}
out:
1346
	return notifier_from_errno(ret);
1347 1348 1349
}
#endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */

1350
/*
1351
 * swap the static kmem_cache_node with kmalloced memory
1352
 */
1353
static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *list,
1354
				int nodeid)
1355
{
1356
	struct kmem_cache_node *ptr;
1357

1358
	ptr = kmalloc_node(sizeof(struct kmem_cache_node), GFP_NOWAIT, nodeid);
1359 1360
	BUG_ON(!ptr);

1361
	memcpy(ptr, list, sizeof(struct kmem_cache_node));
1362 1363 1364 1365 1366
	/*
	 * Do not assume that spinlocks can be initialized via memcpy:
	 */
	spin_lock_init(&ptr->list_lock);

1367
	MAKE_ALL_LISTS(cachep, ptr, nodeid);
1368
	cachep->node[nodeid] = ptr;
1369 1370
}

1371
/*
1372 1373
 * For setting up all the kmem_cache_node for cache whose buffer_size is same as
 * size of kmem_cache_node.
1374
 */
1375
static void __init set_up_node(struct kmem_cache *cachep, int index)
1376 1377 1378 1379
{
	int node;

	for_each_online_node(node) {
1380
		cachep->node[node] = &init_kmem_cache_node[index + node];
1381
		cachep->node[node]->next_reap = jiffies +
1382 1383
		    REAPTIMEOUT_NODE +
		    ((unsigned long)cachep) % REAPTIMEOUT_NODE;
1384 1385 1386
	}
}

C
Christoph Lameter 已提交
1387 1388
/*
 * The memory after the last cpu cache pointer is used for the
1389
 * the node pointer.
C
Christoph Lameter 已提交
1390
 */
1391
static void setup_node_pointer(struct kmem_cache *cachep)
C
Christoph Lameter 已提交
1392
{
1393
	cachep->node = (struct kmem_cache_node **)&cachep->array[nr_cpu_ids];
C
Christoph Lameter 已提交
1394 1395
}

A
Andrew Morton 已提交
1396 1397 1398
/*
 * Initialisation.  Called after the page allocator have been initialised and
 * before smp_init().
L
Linus Torvalds 已提交
1399 1400 1401
 */
void __init kmem_cache_init(void)
{
1402 1403
	int i;

1404 1405
	BUILD_BUG_ON(sizeof(((struct page *)NULL)->lru) <
					sizeof(struct rcu_head));
1406
	kmem_cache = &kmem_cache_boot;
1407
	setup_node_pointer(kmem_cache);
1408

1409
	if (num_possible_nodes() == 1)
1410 1411
		use_alien_caches = 0;

C
Christoph Lameter 已提交
1412
	for (i = 0; i < NUM_INIT_LISTS; i++)
1413
		kmem_cache_node_init(&init_kmem_cache_node[i]);
C
Christoph Lameter 已提交
1414

1415
	set_up_node(kmem_cache, CACHE_CACHE);
L
Linus Torvalds 已提交
1416 1417 1418

	/*
	 * Fragmentation resistance on low memory - only use bigger
1419 1420
	 * page orders on machines with more than 32MB of memory if
	 * not overridden on the command line.
L
Linus Torvalds 已提交
1421
	 */
1422
	if (!slab_max_order_set && totalram_pages > (32 << 20) >> PAGE_SHIFT)
1423
		slab_max_order = SLAB_MAX_ORDER_HI;
L
Linus Torvalds 已提交
1424 1425 1426

	/* Bootstrap is tricky, because several objects are allocated
	 * from caches that do not exist yet:
1427 1428 1429
	 * 1) initialize the kmem_cache cache: it contains the struct
	 *    kmem_cache structures of all caches, except kmem_cache itself:
	 *    kmem_cache is statically allocated.
1430
	 *    Initially an __init data area is used for the head array and the
1431
	 *    kmem_cache_node structures, it's replaced with a kmalloc allocated
1432
	 *    array at the end of the bootstrap.
L
Linus Torvalds 已提交
1433
	 * 2) Create the first kmalloc cache.
1434
	 *    The struct kmem_cache for the new cache is allocated normally.
1435 1436 1437
	 *    An __init data area is used for the head array.
	 * 3) Create the remaining kmalloc caches, with minimally sized
	 *    head arrays.
1438
	 * 4) Replace the __init data head arrays for kmem_cache and the first
L
Linus Torvalds 已提交
1439
	 *    kmalloc cache with kmalloc allocated arrays.
1440
	 * 5) Replace the __init data for kmem_cache_node for kmem_cache and
1441 1442
	 *    the other cache's with kmalloc allocated memory.
	 * 6) Resize the head arrays of the kmalloc caches to their final sizes.
L
Linus Torvalds 已提交
1443 1444
	 */

1445
	/* 1) create the kmem_cache */
L
Linus Torvalds 已提交
1446

E
Eric Dumazet 已提交
1447
	/*
1448
	 * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids
E
Eric Dumazet 已提交
1449
	 */
1450 1451
	create_boot_cache(kmem_cache, "kmem_cache",
		offsetof(struct kmem_cache, array[nr_cpu_ids]) +
1452
				  nr_node_ids * sizeof(struct kmem_cache_node *),
1453 1454
				  SLAB_HWCACHE_ALIGN);
	list_add(&kmem_cache->list, &slab_caches);
L
Linus Torvalds 已提交
1455 1456 1457

	/* 2+3) create the kmalloc caches */

A
Andrew Morton 已提交
1458 1459
	/*
	 * Initialize the caches that provide memory for the array cache and the
1460
	 * kmem_cache_node structures first.  Without this, further allocations will
A
Andrew Morton 已提交
1461
	 * bug.
1462 1463
	 */

1464 1465
	kmalloc_caches[INDEX_AC] = create_kmalloc_cache("kmalloc-ac",
					kmalloc_size(INDEX_AC), ARCH_KMALLOC_FLAGS);
1466

1467 1468 1469 1470
	if (INDEX_AC != INDEX_NODE)
		kmalloc_caches[INDEX_NODE] =
			create_kmalloc_cache("kmalloc-node",
				kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
1471

1472 1473
	slab_early_init = 0;

L
Linus Torvalds 已提交
1474 1475
	/* 4) Replace the bootstrap head arrays */
	{
1476
		struct array_cache *ptr;
1477

1478
		ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
1479

1480
		memcpy(ptr, cpu_cache_get(kmem_cache),
P
Pekka Enberg 已提交
1481
		       sizeof(struct arraycache_init));
1482

1483
		kmem_cache->array[smp_processor_id()] = ptr;
1484

1485
		ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
1486

1487
		BUG_ON(cpu_cache_get(kmalloc_caches[INDEX_AC])
P
Pekka Enberg 已提交
1488
		       != &initarray_generic.cache);
1489
		memcpy(ptr, cpu_cache_get(kmalloc_caches[INDEX_AC]),
P
Pekka Enberg 已提交
1490
		       sizeof(struct arraycache_init));
1491

1492
		kmalloc_caches[INDEX_AC]->array[smp_processor_id()] = ptr;
L
Linus Torvalds 已提交
1493
	}
1494
	/* 5) Replace the bootstrap kmem_cache_node */
1495
	{
P
Pekka Enberg 已提交
1496 1497
		int nid;

1498
		for_each_online_node(nid) {
1499
			init_list(kmem_cache, &init_kmem_cache_node[CACHE_CACHE + nid], nid);
1500

1501
			init_list(kmalloc_caches[INDEX_AC],
1502
				  &init_kmem_cache_node[SIZE_AC + nid], nid);
1503

1504 1505 1506
			if (INDEX_AC != INDEX_NODE) {
				init_list(kmalloc_caches[INDEX_NODE],
					  &init_kmem_cache_node[SIZE_NODE + nid], nid);
1507 1508 1509
			}
		}
	}
L
Linus Torvalds 已提交
1510

1511
	create_kmalloc_caches(ARCH_KMALLOC_FLAGS);
1512 1513 1514 1515 1516 1517
}

void __init kmem_cache_init_late(void)
{
	struct kmem_cache *cachep;

1518
	slab_state = UP;
P
Peter Zijlstra 已提交
1519

1520
	/* 6) resize the head arrays to their final sizes */
1521 1522
	mutex_lock(&slab_mutex);
	list_for_each_entry(cachep, &slab_caches, list)
1523 1524
		if (enable_cpucache(cachep, GFP_NOWAIT))
			BUG();
1525
	mutex_unlock(&slab_mutex);
1526

1527 1528 1529
	/* Done! */
	slab_state = FULL;

A
Andrew Morton 已提交
1530 1531 1532
	/*
	 * Register a cpu startup notifier callback that initializes
	 * cpu_cache_get for all new cpus
L
Linus Torvalds 已提交
1533 1534 1535
	 */
	register_cpu_notifier(&cpucache_notifier);

1536 1537 1538
#ifdef CONFIG_NUMA
	/*
	 * Register a memory hotplug callback that initializes and frees
1539
	 * node.
1540 1541 1542 1543
	 */
	hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
#endif

A
Andrew Morton 已提交
1544 1545 1546
	/*
	 * The reap timers are started later, with a module init call: That part
	 * of the kernel is not yet operational.
L
Linus Torvalds 已提交
1547 1548 1549 1550 1551 1552 1553
	 */
}

static int __init cpucache_init(void)
{
	int cpu;

A
Andrew Morton 已提交
1554 1555
	/*
	 * Register the timers that return unneeded pages to the page allocator
L
Linus Torvalds 已提交
1556
	 */
1557
	for_each_online_cpu(cpu)
A
Andrew Morton 已提交
1558
		start_cpu_timer(cpu);
1559 1560

	/* Done! */
1561
	slab_state = FULL;
L
Linus Torvalds 已提交
1562 1563 1564 1565
	return 0;
}
__initcall(cpucache_init);

1566 1567 1568
static noinline void
slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
{
1569
#if DEBUG
1570
	struct kmem_cache_node *n;
1571
	struct page *page;
1572 1573
	unsigned long flags;
	int node;
1574 1575 1576 1577 1578
	static DEFINE_RATELIMIT_STATE(slab_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
				      DEFAULT_RATELIMIT_BURST);

	if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slab_oom_rs))
		return;
1579 1580 1581 1582 1583

	printk(KERN_WARNING
		"SLAB: Unable to allocate memory on node %d (gfp=0x%x)\n",
		nodeid, gfpflags);
	printk(KERN_WARNING "  cache: %s, object size: %d, order: %d\n",
1584
		cachep->name, cachep->size, cachep->gfporder);
1585

1586
	for_each_kmem_cache_node(cachep, node, n) {
1587 1588 1589
		unsigned long active_objs = 0, num_objs = 0, free_objects = 0;
		unsigned long active_slabs = 0, num_slabs = 0;

1590
		spin_lock_irqsave(&n->list_lock, flags);
1591
		list_for_each_entry(page, &n->slabs_full, lru) {
1592 1593 1594
			active_objs += cachep->num;
			active_slabs++;
		}
1595 1596
		list_for_each_entry(page, &n->slabs_partial, lru) {
			active_objs += page->active;
1597 1598
			active_slabs++;
		}
1599
		list_for_each_entry(page, &n->slabs_free, lru)
1600 1601
			num_slabs++;

1602 1603
		free_objects += n->free_objects;
		spin_unlock_irqrestore(&n->list_lock, flags);
1604 1605 1606 1607 1608 1609 1610 1611

		num_slabs += active_slabs;
		num_objs = num_slabs * cachep->num;
		printk(KERN_WARNING
			"  node %d: slabs: %ld/%ld, objs: %ld/%ld, free: %ld\n",
			node, active_slabs, num_slabs, active_objs, num_objs,
			free_objects);
	}
1612
#endif
1613 1614
}

L
Linus Torvalds 已提交
1615
/*
W
Wang Sheng-Hui 已提交
1616 1617
 * Interface to system's page allocator. No need to hold the
 * kmem_cache_node ->list_lock.
L
Linus Torvalds 已提交
1618 1619 1620 1621 1622
 *
 * If we requested dmaable memory, we will get it. Even if we
 * did not request dmaable memory, we might get it, but that
 * would be relatively rare and ignorable.
 */
1623 1624
static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
								int nodeid)
L
Linus Torvalds 已提交
1625 1626
{
	struct page *page;
1627
	int nr_pages;
1628

1629
	flags |= cachep->allocflags;
1630 1631
	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
		flags |= __GFP_RECLAIMABLE;
1632

1633 1634 1635
	if (memcg_charge_slab(cachep, flags, cachep->gfporder))
		return NULL;

L
Linus Torvalds 已提交
1636
	page = alloc_pages_exact_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder);
1637
	if (!page) {
1638
		memcg_uncharge_slab(cachep, cachep->gfporder);
1639
		slab_out_of_memory(cachep, flags, nodeid);
L
Linus Torvalds 已提交
1640
		return NULL;
1641
	}
L
Linus Torvalds 已提交
1642

1643
	/* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
1644 1645 1646
	if (unlikely(page->pfmemalloc))
		pfmemalloc_active = true;

1647
	nr_pages = (1 << cachep->gfporder);
L
Linus Torvalds 已提交
1648
	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1649 1650 1651 1652 1653
		add_zone_page_state(page_zone(page),
			NR_SLAB_RECLAIMABLE, nr_pages);
	else
		add_zone_page_state(page_zone(page),
			NR_SLAB_UNRECLAIMABLE, nr_pages);
1654 1655 1656
	__SetPageSlab(page);
	if (page->pfmemalloc)
		SetPageSlabPfmemalloc(page);
1657

1658 1659 1660 1661 1662 1663 1664 1665
	if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
		kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid);

		if (cachep->ctor)
			kmemcheck_mark_uninitialized_pages(page, nr_pages);
		else
			kmemcheck_mark_unallocated_pages(page, nr_pages);
	}
P
Pekka Enberg 已提交
1666

1667
	return page;
L
Linus Torvalds 已提交
1668 1669 1670 1671 1672
}

/*
 * Interface to system's page release.
 */
1673
static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
L
Linus Torvalds 已提交
1674
{
1675
	const unsigned long nr_freed = (1 << cachep->gfporder);
L
Linus Torvalds 已提交
1676

1677
	kmemcheck_free_shadow(page, cachep->gfporder);
P
Pekka Enberg 已提交
1678

1679 1680 1681 1682 1683 1684
	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
		sub_zone_page_state(page_zone(page),
				NR_SLAB_RECLAIMABLE, nr_freed);
	else
		sub_zone_page_state(page_zone(page),
				NR_SLAB_UNRECLAIMABLE, nr_freed);
J
Joonsoo Kim 已提交
1685

1686
	BUG_ON(!PageSlab(page));
J
Joonsoo Kim 已提交
1687
	__ClearPageSlabPfmemalloc(page);
1688
	__ClearPageSlab(page);
1689 1690
	page_mapcount_reset(page);
	page->mapping = NULL;
G
Glauber Costa 已提交
1691

L
Linus Torvalds 已提交
1692 1693
	if (current->reclaim_state)
		current->reclaim_state->reclaimed_slab += nr_freed;
1694 1695
	__free_pages(page, cachep->gfporder);
	memcg_uncharge_slab(cachep, cachep->gfporder);
L
Linus Torvalds 已提交
1696 1697 1698 1699
}

static void kmem_rcu_free(struct rcu_head *head)
{
1700 1701
	struct kmem_cache *cachep;
	struct page *page;
L
Linus Torvalds 已提交
1702

1703 1704 1705 1706
	page = container_of(head, struct page, rcu_head);
	cachep = page->slab_cache;

	kmem_freepages(cachep, page);
L
Linus Torvalds 已提交
1707 1708 1709 1710 1711
}

#if DEBUG

#ifdef CONFIG_DEBUG_PAGEALLOC
1712
static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
P
Pekka Enberg 已提交
1713
			    unsigned long caller)
L
Linus Torvalds 已提交
1714
{
1715
	int size = cachep->object_size;
L
Linus Torvalds 已提交
1716

1717
	addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];
L
Linus Torvalds 已提交
1718

P
Pekka Enberg 已提交
1719
	if (size < 5 * sizeof(unsigned long))
L
Linus Torvalds 已提交
1720 1721
		return;

P
Pekka Enberg 已提交
1722 1723 1724 1725
	*addr++ = 0x12345678;
	*addr++ = caller;
	*addr++ = smp_processor_id();
	size -= 3 * sizeof(unsigned long);
L
Linus Torvalds 已提交
1726 1727 1728 1729 1730 1731 1732
	{
		unsigned long *sptr = &caller;
		unsigned long svalue;

		while (!kstack_end(sptr)) {
			svalue = *sptr++;
			if (kernel_text_address(svalue)) {
P
Pekka Enberg 已提交
1733
				*addr++ = svalue;
L
Linus Torvalds 已提交
1734 1735 1736 1737 1738 1739 1740
				size -= sizeof(unsigned long);
				if (size <= sizeof(unsigned long))
					break;
			}
		}

	}
P
Pekka Enberg 已提交
1741
	*addr++ = 0x87654321;
L
Linus Torvalds 已提交
1742 1743 1744
}
#endif

1745
static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
L
Linus Torvalds 已提交
1746
{
1747
	int size = cachep->object_size;
1748
	addr = &((char *)addr)[obj_offset(cachep)];
L
Linus Torvalds 已提交
1749 1750

	memset(addr, val, size);
P
Pekka Enberg 已提交
1751
	*(unsigned char *)(addr + size - 1) = POISON_END;
L
Linus Torvalds 已提交
1752 1753 1754 1755 1756
}

static void dump_line(char *data, int offset, int limit)
{
	int i;
D
Dave Jones 已提交
1757 1758 1759
	unsigned char error = 0;
	int bad_count = 0;

1760
	printk(KERN_ERR "%03x: ", offset);
D
Dave Jones 已提交
1761 1762 1763 1764 1765 1766
	for (i = 0; i < limit; i++) {
		if (data[offset + i] != POISON_FREE) {
			error = data[offset + i];
			bad_count++;
		}
	}
1767 1768
	print_hex_dump(KERN_CONT, "", 0, 16, 1,
			&data[offset], limit, 1);
D
Dave Jones 已提交
1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782

	if (bad_count == 1) {
		error ^= POISON_FREE;
		if (!(error & (error - 1))) {
			printk(KERN_ERR "Single bit error detected. Probably "
					"bad RAM.\n");
#ifdef CONFIG_X86
			printk(KERN_ERR "Run memtest86+ or a similar memory "
					"test tool.\n");
#else
			printk(KERN_ERR "Run a memory test tool.\n");
#endif
		}
	}
L
Linus Torvalds 已提交
1783 1784 1785 1786 1787
}
#endif

#if DEBUG

1788
static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
L
Linus Torvalds 已提交
1789 1790 1791 1792 1793
{
	int i, size;
	char *realobj;

	if (cachep->flags & SLAB_RED_ZONE) {
1794
		printk(KERN_ERR "Redzone: 0x%llx/0x%llx.\n",
A
Andrew Morton 已提交
1795 1796
			*dbg_redzone1(cachep, objp),
			*dbg_redzone2(cachep, objp));
L
Linus Torvalds 已提交
1797 1798 1799
	}

	if (cachep->flags & SLAB_STORE_USER) {
J
Joe Perches 已提交
1800 1801 1802
		printk(KERN_ERR "Last user: [<%p>](%pSR)\n",
		       *dbg_userword(cachep, objp),
		       *dbg_userword(cachep, objp));
L
Linus Torvalds 已提交
1803
	}
1804
	realobj = (char *)objp + obj_offset(cachep);
1805
	size = cachep->object_size;
P
Pekka Enberg 已提交
1806
	for (i = 0; i < size && lines; i += 16, lines--) {
L
Linus Torvalds 已提交
1807 1808
		int limit;
		limit = 16;
P
Pekka Enberg 已提交
1809 1810
		if (i + limit > size)
			limit = size - i;
L
Linus Torvalds 已提交
1811 1812 1813 1814
		dump_line(realobj, i, limit);
	}
}

1815
static void check_poison_obj(struct kmem_cache *cachep, void *objp)
L
Linus Torvalds 已提交
1816 1817 1818 1819 1820
{
	char *realobj;
	int size, i;
	int lines = 0;

1821
	realobj = (char *)objp + obj_offset(cachep);
1822
	size = cachep->object_size;
L
Linus Torvalds 已提交
1823

P
Pekka Enberg 已提交
1824
	for (i = 0; i < size; i++) {
L
Linus Torvalds 已提交
1825
		char exp = POISON_FREE;
P
Pekka Enberg 已提交
1826
		if (i == size - 1)
L
Linus Torvalds 已提交
1827 1828 1829 1830 1831 1832
			exp = POISON_END;
		if (realobj[i] != exp) {
			int limit;
			/* Mismatch ! */
			/* Print header */
			if (lines == 0) {
P
Pekka Enberg 已提交
1833
				printk(KERN_ERR
1834 1835
					"Slab corruption (%s): %s start=%p, len=%d\n",
					print_tainted(), cachep->name, realobj, size);
L
Linus Torvalds 已提交
1836 1837 1838
				print_objinfo(cachep, objp, 0);
			}
			/* Hexdump the affected line */
P
Pekka Enberg 已提交
1839
			i = (i / 16) * 16;
L
Linus Torvalds 已提交
1840
			limit = 16;
P
Pekka Enberg 已提交
1841 1842
			if (i + limit > size)
				limit = size - i;
L
Linus Torvalds 已提交
1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854
			dump_line(realobj, i, limit);
			i += 16;
			lines++;
			/* Limit to 5 lines */
			if (lines > 5)
				break;
		}
	}
	if (lines != 0) {
		/* Print some data about the neighboring objects, if they
		 * exist:
		 */
1855
		struct page *page = virt_to_head_page(objp);
1856
		unsigned int objnr;
L
Linus Torvalds 已提交
1857

1858
		objnr = obj_to_index(cachep, page, objp);
L
Linus Torvalds 已提交
1859
		if (objnr) {
1860
			objp = index_to_obj(cachep, page, objnr - 1);
1861
			realobj = (char *)objp + obj_offset(cachep);
L
Linus Torvalds 已提交
1862
			printk(KERN_ERR "Prev obj: start=%p, len=%d\n",
P
Pekka Enberg 已提交
1863
			       realobj, size);
L
Linus Torvalds 已提交
1864 1865
			print_objinfo(cachep, objp, 2);
		}
P
Pekka Enberg 已提交
1866
		if (objnr + 1 < cachep->num) {
1867
			objp = index_to_obj(cachep, page, objnr + 1);
1868
			realobj = (char *)objp + obj_offset(cachep);
L
Linus Torvalds 已提交
1869
			printk(KERN_ERR "Next obj: start=%p, len=%d\n",
P
Pekka Enberg 已提交
1870
			       realobj, size);
L
Linus Torvalds 已提交
1871 1872 1873 1874 1875 1876
			print_objinfo(cachep, objp, 2);
		}
	}
}
#endif

1877
#if DEBUG
1878 1879
static void slab_destroy_debugcheck(struct kmem_cache *cachep,
						struct page *page)
L
Linus Torvalds 已提交
1880 1881 1882
{
	int i;
	for (i = 0; i < cachep->num; i++) {
1883
		void *objp = index_to_obj(cachep, page, i);
L
Linus Torvalds 已提交
1884 1885 1886

		if (cachep->flags & SLAB_POISON) {
#ifdef CONFIG_DEBUG_PAGEALLOC
1887
			if (cachep->size % PAGE_SIZE == 0 &&
A
Andrew Morton 已提交
1888
					OFF_SLAB(cachep))
P
Pekka Enberg 已提交
1889
				kernel_map_pages(virt_to_page(objp),
1890
					cachep->size / PAGE_SIZE, 1);
L
Linus Torvalds 已提交
1891 1892 1893 1894 1895 1896 1897 1898 1899
			else
				check_poison_obj(cachep, objp);
#else
			check_poison_obj(cachep, objp);
#endif
		}
		if (cachep->flags & SLAB_RED_ZONE) {
			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
				slab_error(cachep, "start of a freed object "
P
Pekka Enberg 已提交
1900
					   "was overwritten");
L
Linus Torvalds 已提交
1901 1902
			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
				slab_error(cachep, "end of a freed object "
P
Pekka Enberg 已提交
1903
					   "was overwritten");
L
Linus Torvalds 已提交
1904 1905
		}
	}
1906
}
L
Linus Torvalds 已提交
1907
#else
1908 1909
static void slab_destroy_debugcheck(struct kmem_cache *cachep,
						struct page *page)
1910 1911
{
}
L
Linus Torvalds 已提交
1912 1913
#endif

1914 1915 1916
/**
 * slab_destroy - destroy and release all objects in a slab
 * @cachep: cache pointer being destroyed
1917
 * @page: page pointer being destroyed
1918
 *
W
Wang Sheng-Hui 已提交
1919 1920 1921
 * Destroy all the objs in a slab page, and release the mem back to the system.
 * Before calling the slab page must have been unlinked from the cache. The
 * kmem_cache_node ->list_lock is not held/needed.
1922
 */
1923
static void slab_destroy(struct kmem_cache *cachep, struct page *page)
1924
{
1925
	void *freelist;
1926

1927 1928
	freelist = page->freelist;
	slab_destroy_debugcheck(cachep, page);
L
Linus Torvalds 已提交
1929
	if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
1930 1931 1932 1933 1934 1935 1936 1937 1938 1939
		struct rcu_head *head;

		/*
		 * RCU free overloads the RCU head over the LRU.
		 * slab_page has been overloeaded over the LRU,
		 * however it is not used from now on so that
		 * we can use it safely.
		 */
		head = (void *)&page->rcu_head;
		call_rcu(head, kmem_rcu_free);
L
Linus Torvalds 已提交
1940 1941

	} else {
1942
		kmem_freepages(cachep, page);
L
Linus Torvalds 已提交
1943
	}
1944 1945

	/*
1946
	 * From now on, we don't use freelist
1947 1948 1949
	 * although actual page can be freed in rcu context
	 */
	if (OFF_SLAB(cachep))
1950
		kmem_cache_free(cachep->freelist_cache, freelist);
L
Linus Torvalds 已提交
1951 1952
}

1953 1954 1955 1956 1957 1958 1959 1960 1961 1962
static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list)
{
	struct page *page, *n;

	list_for_each_entry_safe(page, n, list, lru) {
		list_del(&page->lru);
		slab_destroy(cachep, page);
	}
}

1963
/**
1964 1965 1966 1967 1968 1969 1970
 * calculate_slab_order - calculate size (page order) of slabs
 * @cachep: pointer to the cache that is being created
 * @size: size of objects to be created in this cache.
 * @align: required alignment for the objects.
 * @flags: slab allocation flags
 *
 * Also calculates the number of objects per slab.
1971 1972 1973 1974 1975
 *
 * This could be made much more intelligent.  For now, try to avoid using
 * high order pages for slabs.  When the gfp() functions are more friendly
 * towards high-order requests, this should be changed.
 */
A
Andrew Morton 已提交
1976
static size_t calculate_slab_order(struct kmem_cache *cachep,
R
Randy Dunlap 已提交
1977
			size_t size, size_t align, unsigned long flags)
1978
{
1979
	unsigned long offslab_limit;
1980
	size_t left_over = 0;
1981
	int gfporder;
1982

1983
	for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) {
1984 1985 1986
		unsigned int num;
		size_t remainder;

1987
		cache_estimate(gfporder, size, align, flags, &remainder, &num);
1988 1989
		if (!num)
			continue;
1990

1991 1992 1993 1994
		/* Can't handle number of objects more than SLAB_OBJ_MAX_NUM */
		if (num > SLAB_OBJ_MAX_NUM)
			break;

1995
		if (flags & CFLGS_OFF_SLAB) {
1996
			size_t freelist_size_per_obj = sizeof(freelist_idx_t);
1997 1998 1999 2000 2001
			/*
			 * Max number of objs-per-slab for caches which
			 * use off-slab slabs. Needed to avoid a possible
			 * looping condition in cache_grow().
			 */
2002 2003
			if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK))
				freelist_size_per_obj += sizeof(char);
2004
			offslab_limit = size;
2005
			offslab_limit /= freelist_size_per_obj;
2006 2007 2008 2009

 			if (num > offslab_limit)
				break;
		}
2010

2011
		/* Found something acceptable - save it away */
2012
		cachep->num = num;
2013
		cachep->gfporder = gfporder;
2014 2015
		left_over = remainder;

2016 2017 2018 2019 2020 2021 2022 2023
		/*
		 * A VFS-reclaimable slab tends to have most allocations
		 * as GFP_NOFS and we really don't want to have to be allocating
		 * higher-order pages when we are unable to shrink dcache.
		 */
		if (flags & SLAB_RECLAIM_ACCOUNT)
			break;

2024 2025 2026 2027
		/*
		 * Large number of objects is good, but very large slabs are
		 * currently bad for the gfp()s.
		 */
2028
		if (gfporder >= slab_max_order)
2029 2030
			break;

2031 2032 2033
		/*
		 * Acceptable internal fragmentation?
		 */
A
Andrew Morton 已提交
2034
		if (left_over * 8 <= (PAGE_SIZE << gfporder))
2035 2036 2037 2038 2039
			break;
	}
	return left_over;
}

2040
static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
2041
{
2042
	if (slab_state >= FULL)
2043
		return enable_cpucache(cachep, gfp);
2044

2045
	if (slab_state == DOWN) {
2046
		/*
2047
		 * Note: Creation of first cache (kmem_cache).
2048
		 * The setup_node is taken care
2049 2050 2051 2052 2053 2054 2055
		 * of by the caller of __kmem_cache_create
		 */
		cachep->array[smp_processor_id()] = &initarray_generic.cache;
		slab_state = PARTIAL;
	} else if (slab_state == PARTIAL) {
		/*
		 * Note: the second kmem_cache_create must create the cache
2056 2057 2058 2059 2060 2061
		 * that's used by kmalloc(24), otherwise the creation of
		 * further caches will BUG().
		 */
		cachep->array[smp_processor_id()] = &initarray_generic.cache;

		/*
2062 2063
		 * If the cache that's used by kmalloc(sizeof(kmem_cache_node)) is
		 * the second cache, then we need to set up all its node/,
2064 2065
		 * otherwise the creation of further caches will BUG().
		 */
2066 2067 2068
		set_up_node(cachep, SIZE_AC);
		if (INDEX_AC == INDEX_NODE)
			slab_state = PARTIAL_NODE;
2069
		else
2070
			slab_state = PARTIAL_ARRAYCACHE;
2071
	} else {
2072
		/* Remaining boot caches */
2073
		cachep->array[smp_processor_id()] =
2074
			kmalloc(sizeof(struct arraycache_init), gfp);
2075

2076
		if (slab_state == PARTIAL_ARRAYCACHE) {
2077 2078
			set_up_node(cachep, SIZE_NODE);
			slab_state = PARTIAL_NODE;
2079 2080
		} else {
			int node;
2081
			for_each_online_node(node) {
2082
				cachep->node[node] =
2083
				    kmalloc_node(sizeof(struct kmem_cache_node),
2084
						gfp, node);
2085
				BUG_ON(!cachep->node[node]);
2086
				kmem_cache_node_init(cachep->node[node]);
2087 2088 2089
			}
		}
	}
2090
	cachep->node[numa_mem_id()]->next_reap =
2091 2092
			jiffies + REAPTIMEOUT_NODE +
			((unsigned long)cachep) % REAPTIMEOUT_NODE;
2093 2094 2095 2096 2097 2098 2099

	cpu_cache_get(cachep)->avail = 0;
	cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
	cpu_cache_get(cachep)->batchcount = 1;
	cpu_cache_get(cachep)->touched = 0;
	cachep->batchcount = 1;
	cachep->limit = BOOT_CPUCACHE_ENTRIES;
2100
	return 0;
2101 2102
}

L
Linus Torvalds 已提交
2103
/**
2104
 * __kmem_cache_create - Create a cache.
R
Randy Dunlap 已提交
2105
 * @cachep: cache management descriptor
L
Linus Torvalds 已提交
2106 2107 2108 2109
 * @flags: SLAB flags
 *
 * Returns a ptr to the cache on success, NULL on failure.
 * Cannot be called within a int, but can be interrupted.
2110
 * The @ctor is run when new pages are allocated by the cache.
L
Linus Torvalds 已提交
2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123
 *
 * The flags are
 *
 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
 * to catch references to uninitialised memory.
 *
 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
 * for buffer overruns.
 *
 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
 * cacheline.  This can be beneficial if you're counting cycles as closely
 * as davem.
 */
2124
int
2125
__kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
L
Linus Torvalds 已提交
2126
{
2127 2128
	size_t left_over, freelist_size;
	size_t ralign = BYTES_PER_WORD;
2129
	gfp_t gfp;
2130
	int err;
2131
	size_t size = cachep->size;
L
Linus Torvalds 已提交
2132 2133 2134 2135 2136 2137 2138 2139 2140

#if DEBUG
#if FORCED_DEBUG
	/*
	 * Enable redzoning and last user accounting, except for caches with
	 * large objects, if the increased size would increase the object size
	 * above the next power of two: caches with object sizes just above a
	 * power of two have a significant amount of internal fragmentation.
	 */
D
David Woodhouse 已提交
2141 2142
	if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN +
						2 * sizeof(unsigned long long)))
P
Pekka Enberg 已提交
2143
		flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
L
Linus Torvalds 已提交
2144 2145 2146 2147 2148 2149 2150
	if (!(flags & SLAB_DESTROY_BY_RCU))
		flags |= SLAB_POISON;
#endif
	if (flags & SLAB_DESTROY_BY_RCU)
		BUG_ON(flags & SLAB_POISON);
#endif

A
Andrew Morton 已提交
2151 2152
	/*
	 * Check that size is in terms of words.  This is needed to avoid
L
Linus Torvalds 已提交
2153 2154 2155
	 * unaligned accesses for some archs when redzoning is used, and makes
	 * sure any on-slab bufctl's are also correctly aligned.
	 */
P
Pekka Enberg 已提交
2156 2157 2158
	if (size & (BYTES_PER_WORD - 1)) {
		size += (BYTES_PER_WORD - 1);
		size &= ~(BYTES_PER_WORD - 1);
L
Linus Torvalds 已提交
2159 2160
	}

D
David Woodhouse 已提交
2161 2162 2163 2164 2165 2166 2167
	if (flags & SLAB_RED_ZONE) {
		ralign = REDZONE_ALIGN;
		/* If redzoning, ensure that the second redzone is suitably
		 * aligned, by adjusting the object size accordingly. */
		size += REDZONE_ALIGN - 1;
		size &= ~(REDZONE_ALIGN - 1);
	}
2168

2169
	/* 3) caller mandated alignment */
2170 2171
	if (ralign < cachep->align) {
		ralign = cachep->align;
L
Linus Torvalds 已提交
2172
	}
2173 2174
	/* disable debug if necessary */
	if (ralign > __alignof__(unsigned long long))
2175
		flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
A
Andrew Morton 已提交
2176
	/*
2177
	 * 4) Store it.
L
Linus Torvalds 已提交
2178
	 */
2179
	cachep->align = ralign;
L
Linus Torvalds 已提交
2180

2181 2182 2183 2184 2185
	if (slab_is_available())
		gfp = GFP_KERNEL;
	else
		gfp = GFP_NOWAIT;

2186
	setup_node_pointer(cachep);
L
Linus Torvalds 已提交
2187 2188
#if DEBUG

2189 2190 2191 2192
	/*
	 * Both debugging options require word-alignment which is calculated
	 * into align above.
	 */
L
Linus Torvalds 已提交
2193 2194
	if (flags & SLAB_RED_ZONE) {
		/* add space for red zone words */
2195 2196
		cachep->obj_offset += sizeof(unsigned long long);
		size += 2 * sizeof(unsigned long long);
L
Linus Torvalds 已提交
2197 2198
	}
	if (flags & SLAB_STORE_USER) {
2199
		/* user store requires one word storage behind the end of
D
David Woodhouse 已提交
2200 2201
		 * the real object. But if the second red zone needs to be
		 * aligned to 64 bits, we must allow that much space.
L
Linus Torvalds 已提交
2202
		 */
D
David Woodhouse 已提交
2203 2204 2205 2206
		if (flags & SLAB_RED_ZONE)
			size += REDZONE_ALIGN;
		else
			size += BYTES_PER_WORD;
L
Linus Torvalds 已提交
2207 2208
	}
#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
2209
	if (size >= kmalloc_size(INDEX_NODE + 1)
2210 2211 2212
	    && cachep->object_size > cache_line_size()
	    && ALIGN(size, cachep->align) < PAGE_SIZE) {
		cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align);
L
Linus Torvalds 已提交
2213 2214 2215 2216 2217
		size = PAGE_SIZE;
	}
#endif
#endif

2218 2219 2220
	/*
	 * Determine if the slab management is 'on' or 'off' slab.
	 * (bootstrapping cannot cope with offslab caches so don't do
2221 2222
	 * it too early on. Always use on-slab management when
	 * SLAB_NOLEAKTRACE to avoid recursive calls into kmemleak)
2223
	 */
2224
	if ((size >= (PAGE_SIZE >> 5)) && !slab_early_init &&
2225
	    !(flags & SLAB_NOLEAKTRACE))
L
Linus Torvalds 已提交
2226 2227 2228 2229 2230 2231
		/*
		 * Size is large, assume best to place the slab management obj
		 * off-slab (should allow better packing of objs).
		 */
		flags |= CFLGS_OFF_SLAB;

2232
	size = ALIGN(size, cachep->align);
2233 2234 2235 2236 2237 2238
	/*
	 * We should restrict the number of objects in a slab to implement
	 * byte sized index. Refer comment on SLAB_OBJ_MIN_SIZE definition.
	 */
	if (FREELIST_BYTE_INDEX && size < SLAB_OBJ_MIN_SIZE)
		size = ALIGN(SLAB_OBJ_MIN_SIZE, cachep->align);
L
Linus Torvalds 已提交
2239

2240
	left_over = calculate_slab_order(cachep, size, cachep->align, flags);
L
Linus Torvalds 已提交
2241

2242
	if (!cachep->num)
2243
		return -E2BIG;
L
Linus Torvalds 已提交
2244

2245
	freelist_size = calculate_freelist_size(cachep->num, cachep->align);
L
Linus Torvalds 已提交
2246 2247 2248 2249 2250

	/*
	 * If the slab has been placed off-slab, and we have enough space then
	 * move it on-slab. This is at the expense of any extra colouring.
	 */
2251
	if (flags & CFLGS_OFF_SLAB && left_over >= freelist_size) {
L
Linus Torvalds 已提交
2252
		flags &= ~CFLGS_OFF_SLAB;
2253
		left_over -= freelist_size;
L
Linus Torvalds 已提交
2254 2255 2256 2257
	}

	if (flags & CFLGS_OFF_SLAB) {
		/* really off slab. No need for manual alignment */
2258
		freelist_size = calculate_freelist_size(cachep->num, 0);
2259 2260 2261 2262 2263 2264 2265 2266 2267

#ifdef CONFIG_PAGE_POISONING
		/* If we're going to use the generic kernel_map_pages()
		 * poisoning, then it's going to smash the contents of
		 * the redzone and userword anyhow, so switch them off.
		 */
		if (size % PAGE_SIZE == 0 && flags & SLAB_POISON)
			flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
#endif
L
Linus Torvalds 已提交
2268 2269 2270 2271
	}

	cachep->colour_off = cache_line_size();
	/* Offset must be a multiple of the alignment. */
2272 2273
	if (cachep->colour_off < cachep->align)
		cachep->colour_off = cachep->align;
P
Pekka Enberg 已提交
2274
	cachep->colour = left_over / cachep->colour_off;
2275
	cachep->freelist_size = freelist_size;
L
Linus Torvalds 已提交
2276
	cachep->flags = flags;
2277
	cachep->allocflags = __GFP_COMP;
2278
	if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA))
2279
		cachep->allocflags |= GFP_DMA;
2280
	cachep->size = size;
2281
	cachep->reciprocal_buffer_size = reciprocal_value(size);
L
Linus Torvalds 已提交
2282

2283
	if (flags & CFLGS_OFF_SLAB) {
2284
		cachep->freelist_cache = kmalloc_slab(freelist_size, 0u);
2285
		/*
2286
		 * This is a possibility for one of the kmalloc_{dma,}_caches.
2287
		 * But since we go off slab only for object size greater than
2288 2289
		 * PAGE_SIZE/8, and kmalloc_{dma,}_caches get created
		 * in ascending order,this should not happen at all.
2290 2291
		 * But leave a BUG_ON for some lucky dude.
		 */
2292
		BUG_ON(ZERO_OR_NULL_PTR(cachep->freelist_cache));
2293
	}
L
Linus Torvalds 已提交
2294

2295 2296
	err = setup_cpu_cache(cachep, gfp);
	if (err) {
2297
		__kmem_cache_shutdown(cachep);
2298
		return err;
2299
	}
L
Linus Torvalds 已提交
2300

2301
	return 0;
L
Linus Torvalds 已提交
2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314
}

#if DEBUG
static void check_irq_off(void)
{
	BUG_ON(!irqs_disabled());
}

static void check_irq_on(void)
{
	BUG_ON(irqs_disabled());
}

2315
static void check_spinlock_acquired(struct kmem_cache *cachep)
L
Linus Torvalds 已提交
2316 2317 2318
{
#ifdef CONFIG_SMP
	check_irq_off();
2319
	assert_spin_locked(&get_node(cachep, numa_mem_id())->list_lock);
L
Linus Torvalds 已提交
2320 2321
#endif
}
2322

2323
static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
2324 2325 2326
{
#ifdef CONFIG_SMP
	check_irq_off();
2327
	assert_spin_locked(&get_node(cachep, node)->list_lock);
2328 2329 2330
#endif
}

L
Linus Torvalds 已提交
2331 2332 2333 2334
#else
#define check_irq_off()	do { } while(0)
#define check_irq_on()	do { } while(0)
#define check_spinlock_acquired(x) do { } while(0)
2335
#define check_spinlock_acquired_node(x, y) do { } while(0)
L
Linus Torvalds 已提交
2336 2337
#endif

2338
static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
2339 2340 2341
			struct array_cache *ac,
			int force, int node);

L
Linus Torvalds 已提交
2342 2343
static void do_drain(void *arg)
{
A
Andrew Morton 已提交
2344
	struct kmem_cache *cachep = arg;
L
Linus Torvalds 已提交
2345
	struct array_cache *ac;
2346
	int node = numa_mem_id();
2347
	struct kmem_cache_node *n;
2348
	LIST_HEAD(list);
L
Linus Torvalds 已提交
2349 2350

	check_irq_off();
2351
	ac = cpu_cache_get(cachep);
2352 2353
	n = get_node(cachep, node);
	spin_lock(&n->list_lock);
2354
	free_block(cachep, ac->entry, ac->avail, node, &list);
2355
	spin_unlock(&n->list_lock);
2356
	slabs_destroy(cachep, &list);
L
Linus Torvalds 已提交
2357 2358 2359
	ac->avail = 0;
}

2360
static void drain_cpu_caches(struct kmem_cache *cachep)
L
Linus Torvalds 已提交
2361
{
2362
	struct kmem_cache_node *n;
2363 2364
	int node;

2365
	on_each_cpu(do_drain, cachep, 1);
L
Linus Torvalds 已提交
2366
	check_irq_on();
2367 2368
	for_each_kmem_cache_node(cachep, node, n)
		if (n->alien)
2369
			drain_alien_cache(cachep, n->alien);
2370

2371 2372
	for_each_kmem_cache_node(cachep, node, n)
		drain_array(cachep, n, n->shared, 1, node);
L
Linus Torvalds 已提交
2373 2374
}

2375 2376 2377 2378 2379 2380 2381
/*
 * Remove slabs from the list of free slabs.
 * Specify the number of slabs to drain in tofree.
 *
 * Returns the actual number of slabs released.
 */
static int drain_freelist(struct kmem_cache *cache,
2382
			struct kmem_cache_node *n, int tofree)
L
Linus Torvalds 已提交
2383
{
2384 2385
	struct list_head *p;
	int nr_freed;
2386
	struct page *page;
L
Linus Torvalds 已提交
2387

2388
	nr_freed = 0;
2389
	while (nr_freed < tofree && !list_empty(&n->slabs_free)) {
L
Linus Torvalds 已提交
2390

2391 2392 2393 2394
		spin_lock_irq(&n->list_lock);
		p = n->slabs_free.prev;
		if (p == &n->slabs_free) {
			spin_unlock_irq(&n->list_lock);
2395 2396
			goto out;
		}
L
Linus Torvalds 已提交
2397

2398
		page = list_entry(p, struct page, lru);
L
Linus Torvalds 已提交
2399
#if DEBUG
2400
		BUG_ON(page->active);
L
Linus Torvalds 已提交
2401
#endif
2402
		list_del(&page->lru);
2403 2404 2405 2406
		/*
		 * Safe to drop the lock. The slab is no longer linked
		 * to the cache.
		 */
2407 2408
		n->free_objects -= cache->num;
		spin_unlock_irq(&n->list_lock);
2409
		slab_destroy(cache, page);
2410
		nr_freed++;
L
Linus Torvalds 已提交
2411
	}
2412 2413
out:
	return nr_freed;
L
Linus Torvalds 已提交
2414 2415
}

2416
int __kmem_cache_shrink(struct kmem_cache *cachep)
2417
{
2418 2419
	int ret = 0;
	int node;
2420
	struct kmem_cache_node *n;
2421 2422 2423 2424

	drain_cpu_caches(cachep);

	check_irq_on();
2425
	for_each_kmem_cache_node(cachep, node, n) {
2426
		drain_freelist(cachep, n, slabs_tofree(cachep, n));
2427

2428 2429
		ret += !list_empty(&n->slabs_full) ||
			!list_empty(&n->slabs_partial);
2430 2431 2432 2433
	}
	return (ret ? 1 : 0);
}

2434
int __kmem_cache_shutdown(struct kmem_cache *cachep)
L
Linus Torvalds 已提交
2435
{
2436
	int i;
2437
	struct kmem_cache_node *n;
2438
	int rc = __kmem_cache_shrink(cachep);
L
Linus Torvalds 已提交
2439

2440 2441
	if (rc)
		return rc;
L
Linus Torvalds 已提交
2442

2443 2444
	for_each_online_cpu(i)
	    kfree(cachep->array[i]);
L
Linus Torvalds 已提交
2445

2446
	/* NUMA: free the node structures */
2447 2448 2449 2450 2451
	for_each_kmem_cache_node(cachep, i, n) {
		kfree(n->shared);
		free_alien_cache(n->alien);
		kfree(n);
		cachep->node[i] = NULL;
2452 2453
	}
	return 0;
L
Linus Torvalds 已提交
2454 2455
}

2456 2457
/*
 * Get the memory for a slab management obj.
2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468
 *
 * For a slab cache when the slab descriptor is off-slab, the
 * slab descriptor can't come from the same cache which is being created,
 * Because if it is the case, that means we defer the creation of
 * the kmalloc_{dma,}_cache of size sizeof(slab descriptor) to this point.
 * And we eventually call down to __kmem_cache_create(), which
 * in turn looks up in the kmalloc_{dma,}_caches for the disired-size one.
 * This is a "chicken-and-egg" problem.
 *
 * So the off-slab slab descriptor shall come from the kmalloc_{dma,}_caches,
 * which are all initialized during kmem_cache_init().
2469
 */
2470
static void *alloc_slabmgmt(struct kmem_cache *cachep,
2471 2472
				   struct page *page, int colour_off,
				   gfp_t local_flags, int nodeid)
L
Linus Torvalds 已提交
2473
{
2474
	void *freelist;
2475
	void *addr = page_address(page);
P
Pekka Enberg 已提交
2476

L
Linus Torvalds 已提交
2477 2478
	if (OFF_SLAB(cachep)) {
		/* Slab management obj is off-slab. */
2479
		freelist = kmem_cache_alloc_node(cachep->freelist_cache,
2480
					      local_flags, nodeid);
2481
		if (!freelist)
L
Linus Torvalds 已提交
2482 2483
			return NULL;
	} else {
2484 2485
		freelist = addr + colour_off;
		colour_off += cachep->freelist_size;
L
Linus Torvalds 已提交
2486
	}
2487 2488 2489
	page->active = 0;
	page->s_mem = addr + colour_off;
	return freelist;
L
Linus Torvalds 已提交
2490 2491
}

2492
static inline freelist_idx_t get_free_obj(struct page *page, unsigned int idx)
L
Linus Torvalds 已提交
2493
{
2494
	return ((freelist_idx_t *)page->freelist)[idx];
2495 2496 2497
}

static inline void set_free_obj(struct page *page,
2498
					unsigned int idx, freelist_idx_t val)
2499
{
2500
	((freelist_idx_t *)(page->freelist))[idx] = val;
L
Linus Torvalds 已提交
2501 2502
}

2503
static void cache_init_objs(struct kmem_cache *cachep,
2504
			    struct page *page)
L
Linus Torvalds 已提交
2505 2506 2507 2508
{
	int i;

	for (i = 0; i < cachep->num; i++) {
2509
		void *objp = index_to_obj(cachep, page, i);
L
Linus Torvalds 已提交
2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521
#if DEBUG
		/* need to poison the objs? */
		if (cachep->flags & SLAB_POISON)
			poison_obj(cachep, objp, POISON_FREE);
		if (cachep->flags & SLAB_STORE_USER)
			*dbg_userword(cachep, objp) = NULL;

		if (cachep->flags & SLAB_RED_ZONE) {
			*dbg_redzone1(cachep, objp) = RED_INACTIVE;
			*dbg_redzone2(cachep, objp) = RED_INACTIVE;
		}
		/*
A
Andrew Morton 已提交
2522 2523 2524
		 * Constructors are not allowed to allocate memory from the same
		 * cache which they are a constructor for.  Otherwise, deadlock.
		 * They must also be threaded.
L
Linus Torvalds 已提交
2525 2526
		 */
		if (cachep->ctor && !(cachep->flags & SLAB_POISON))
2527
			cachep->ctor(objp + obj_offset(cachep));
L
Linus Torvalds 已提交
2528 2529 2530 2531

		if (cachep->flags & SLAB_RED_ZONE) {
			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
				slab_error(cachep, "constructor overwrote the"
P
Pekka Enberg 已提交
2532
					   " end of an object");
L
Linus Torvalds 已提交
2533 2534
			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
				slab_error(cachep, "constructor overwrote the"
P
Pekka Enberg 已提交
2535
					   " start of an object");
L
Linus Torvalds 已提交
2536
		}
2537
		if ((cachep->size % PAGE_SIZE) == 0 &&
A
Andrew Morton 已提交
2538
			    OFF_SLAB(cachep) && cachep->flags & SLAB_POISON)
P
Pekka Enberg 已提交
2539
			kernel_map_pages(virt_to_page(objp),
2540
					 cachep->size / PAGE_SIZE, 0);
L
Linus Torvalds 已提交
2541 2542
#else
		if (cachep->ctor)
2543
			cachep->ctor(objp);
L
Linus Torvalds 已提交
2544
#endif
2545
		set_obj_status(page, i, OBJECT_FREE);
2546
		set_free_obj(page, i, i);
L
Linus Torvalds 已提交
2547 2548 2549
	}
}

2550
static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
L
Linus Torvalds 已提交
2551
{
2552 2553
	if (CONFIG_ZONE_DMA_FLAG) {
		if (flags & GFP_DMA)
2554
			BUG_ON(!(cachep->allocflags & GFP_DMA));
2555
		else
2556
			BUG_ON(cachep->allocflags & GFP_DMA);
2557
	}
L
Linus Torvalds 已提交
2558 2559
}

2560
static void *slab_get_obj(struct kmem_cache *cachep, struct page *page,
A
Andrew Morton 已提交
2561
				int nodeid)
2562
{
2563
	void *objp;
2564

2565
	objp = index_to_obj(cachep, page, get_free_obj(page, page->active));
2566
	page->active++;
2567
#if DEBUG
J
Joonsoo Kim 已提交
2568
	WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid);
2569 2570 2571 2572 2573
#endif

	return objp;
}

2574
static void slab_put_obj(struct kmem_cache *cachep, struct page *page,
A
Andrew Morton 已提交
2575
				void *objp, int nodeid)
2576
{
2577
	unsigned int objnr = obj_to_index(cachep, page, objp);
2578
#if DEBUG
J
Joonsoo Kim 已提交
2579
	unsigned int i;
2580

2581
	/* Verify that the slab belongs to the intended node */
J
Joonsoo Kim 已提交
2582
	WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid);
2583

2584
	/* Verify double free bug */
2585
	for (i = page->active; i < cachep->num; i++) {
2586
		if (get_free_obj(page, i) == objnr) {
2587 2588 2589 2590
			printk(KERN_ERR "slab: double free detected in cache "
					"'%s', objp %p\n", cachep->name, objp);
			BUG();
		}
2591 2592
	}
#endif
2593
	page->active--;
2594
	set_free_obj(page, page->active, objnr);
2595 2596
}

2597 2598 2599
/*
 * Map pages beginning at addr to the given cache and slab. This is required
 * for the slab allocator to be able to lookup the cache and slab of a
2600
 * virtual address for kfree, ksize, and slab debugging.
2601
 */
2602
static void slab_map_pages(struct kmem_cache *cache, struct page *page,
2603
			   void *freelist)
L
Linus Torvalds 已提交
2604
{
2605
	page->slab_cache = cache;
2606
	page->freelist = freelist;
L
Linus Torvalds 已提交
2607 2608 2609 2610 2611 2612
}

/*
 * Grow (by 1) the number of slabs within a cache.  This is called by
 * kmem_cache_alloc() when there are no active objs left in a cache.
 */
2613
static int cache_grow(struct kmem_cache *cachep,
2614
		gfp_t flags, int nodeid, struct page *page)
L
Linus Torvalds 已提交
2615
{
2616
	void *freelist;
P
Pekka Enberg 已提交
2617 2618
	size_t offset;
	gfp_t local_flags;
2619
	struct kmem_cache_node *n;
L
Linus Torvalds 已提交
2620

A
Andrew Morton 已提交
2621 2622 2623
	/*
	 * Be lazy and only check for valid flags here,  keeping it out of the
	 * critical path in kmem_cache_alloc().
L
Linus Torvalds 已提交
2624
	 */
C
Christoph Lameter 已提交
2625 2626
	BUG_ON(flags & GFP_SLAB_BUG_MASK);
	local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
L
Linus Torvalds 已提交
2627

2628
	/* Take the node list lock to change the colour_next on this node */
L
Linus Torvalds 已提交
2629
	check_irq_off();
2630
	n = get_node(cachep, nodeid);
2631
	spin_lock(&n->list_lock);
L
Linus Torvalds 已提交
2632 2633

	/* Get colour for the slab, and cal the next value. */
2634 2635 2636 2637 2638
	offset = n->colour_next;
	n->colour_next++;
	if (n->colour_next >= cachep->colour)
		n->colour_next = 0;
	spin_unlock(&n->list_lock);
L
Linus Torvalds 已提交
2639

2640
	offset *= cachep->colour_off;
L
Linus Torvalds 已提交
2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652

	if (local_flags & __GFP_WAIT)
		local_irq_enable();

	/*
	 * The test for missing atomic flag is performed here, rather than
	 * the more obvious place, simply to reduce the critical path length
	 * in kmem_cache_alloc(). If a caller is seriously mis-behaving they
	 * will eventually be caught here (where it matters).
	 */
	kmem_flagcheck(cachep, flags);

A
Andrew Morton 已提交
2653 2654 2655
	/*
	 * Get mem for the objs.  Attempt to allocate a physical page from
	 * 'nodeid'.
2656
	 */
2657 2658 2659
	if (!page)
		page = kmem_getpages(cachep, local_flags, nodeid);
	if (!page)
L
Linus Torvalds 已提交
2660 2661 2662
		goto failed;

	/* Get slab management. */
2663
	freelist = alloc_slabmgmt(cachep, page, offset,
C
Christoph Lameter 已提交
2664
			local_flags & ~GFP_CONSTRAINT_MASK, nodeid);
2665
	if (!freelist)
L
Linus Torvalds 已提交
2666 2667
		goto opps1;

2668
	slab_map_pages(cachep, page, freelist);
L
Linus Torvalds 已提交
2669

2670
	cache_init_objs(cachep, page);
L
Linus Torvalds 已提交
2671 2672 2673 2674

	if (local_flags & __GFP_WAIT)
		local_irq_disable();
	check_irq_off();
2675
	spin_lock(&n->list_lock);
L
Linus Torvalds 已提交
2676 2677

	/* Make slab active. */
2678
	list_add_tail(&page->lru, &(n->slabs_free));
L
Linus Torvalds 已提交
2679
	STATS_INC_GROWN(cachep);
2680 2681
	n->free_objects += cachep->num;
	spin_unlock(&n->list_lock);
L
Linus Torvalds 已提交
2682
	return 1;
A
Andrew Morton 已提交
2683
opps1:
2684
	kmem_freepages(cachep, page);
A
Andrew Morton 已提交
2685
failed:
L
Linus Torvalds 已提交
2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701
	if (local_flags & __GFP_WAIT)
		local_irq_disable();
	return 0;
}

#if DEBUG

/*
 * Perform extra freeing checks:
 * - detect bad pointers.
 * - POISON/RED_ZONE checking
 */
static void kfree_debugcheck(const void *objp)
{
	if (!virt_addr_valid(objp)) {
		printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n",
P
Pekka Enberg 已提交
2702 2703
		       (unsigned long)objp);
		BUG();
L
Linus Torvalds 已提交
2704 2705 2706
	}
}

2707 2708
static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
{
2709
	unsigned long long redzone1, redzone2;
2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724

	redzone1 = *dbg_redzone1(cache, obj);
	redzone2 = *dbg_redzone2(cache, obj);

	/*
	 * Redzone is ok.
	 */
	if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE)
		return;

	if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE)
		slab_error(cache, "double free detected");
	else
		slab_error(cache, "memory outside object was overwritten");

2725
	printk(KERN_ERR "%p: redzone 1:0x%llx, redzone 2:0x%llx.\n",
2726 2727 2728
			obj, redzone1, redzone2);
}

2729
static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
2730
				   unsigned long caller)
L
Linus Torvalds 已提交
2731 2732
{
	unsigned int objnr;
2733
	struct page *page;
L
Linus Torvalds 已提交
2734

2735 2736
	BUG_ON(virt_to_cache(objp) != cachep);

2737
	objp -= obj_offset(cachep);
L
Linus Torvalds 已提交
2738
	kfree_debugcheck(objp);
2739
	page = virt_to_head_page(objp);
L
Linus Torvalds 已提交
2740 2741

	if (cachep->flags & SLAB_RED_ZONE) {
2742
		verify_redzone_free(cachep, objp);
L
Linus Torvalds 已提交
2743 2744 2745 2746
		*dbg_redzone1(cachep, objp) = RED_INACTIVE;
		*dbg_redzone2(cachep, objp) = RED_INACTIVE;
	}
	if (cachep->flags & SLAB_STORE_USER)
2747
		*dbg_userword(cachep, objp) = (void *)caller;
L
Linus Torvalds 已提交
2748

2749
	objnr = obj_to_index(cachep, page, objp);
L
Linus Torvalds 已提交
2750 2751

	BUG_ON(objnr >= cachep->num);
2752
	BUG_ON(objp != index_to_obj(cachep, page, objnr));
L
Linus Torvalds 已提交
2753

2754
	set_obj_status(page, objnr, OBJECT_FREE);
L
Linus Torvalds 已提交
2755 2756
	if (cachep->flags & SLAB_POISON) {
#ifdef CONFIG_DEBUG_PAGEALLOC
2757
		if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
2758
			store_stackinfo(cachep, objp, caller);
P
Pekka Enberg 已提交
2759
			kernel_map_pages(virt_to_page(objp),
2760
					 cachep->size / PAGE_SIZE, 0);
L
Linus Torvalds 已提交
2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775
		} else {
			poison_obj(cachep, objp, POISON_FREE);
		}
#else
		poison_obj(cachep, objp, POISON_FREE);
#endif
	}
	return objp;
}

#else
#define kfree_debugcheck(x) do { } while(0)
#define cache_free_debugcheck(x,objp,z) (objp)
#endif

2776 2777
static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags,
							bool force_refill)
L
Linus Torvalds 已提交
2778 2779
{
	int batchcount;
2780
	struct kmem_cache_node *n;
L
Linus Torvalds 已提交
2781
	struct array_cache *ac;
P
Pekka Enberg 已提交
2782 2783
	int node;

L
Linus Torvalds 已提交
2784
	check_irq_off();
2785
	node = numa_mem_id();
2786 2787 2788
	if (unlikely(force_refill))
		goto force_grow;
retry:
2789
	ac = cpu_cache_get(cachep);
L
Linus Torvalds 已提交
2790 2791
	batchcount = ac->batchcount;
	if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
A
Andrew Morton 已提交
2792 2793 2794 2795
		/*
		 * If there was little recent activity on this cache, then
		 * perform only a partial refill.  Otherwise we could generate
		 * refill bouncing.
L
Linus Torvalds 已提交
2796 2797 2798
		 */
		batchcount = BATCHREFILL_LIMIT;
	}
2799
	n = get_node(cachep, node);
2800

2801 2802
	BUG_ON(ac->avail > 0 || !n);
	spin_lock(&n->list_lock);
L
Linus Torvalds 已提交
2803

2804
	/* See if we can refill from the shared array */
2805 2806
	if (n->shared && transfer_objects(ac, n->shared, batchcount)) {
		n->shared->touched = 1;
2807
		goto alloc_done;
2808
	}
2809

L
Linus Torvalds 已提交
2810 2811
	while (batchcount > 0) {
		struct list_head *entry;
2812
		struct page *page;
L
Linus Torvalds 已提交
2813
		/* Get slab alloc is to come from. */
2814 2815 2816 2817 2818
		entry = n->slabs_partial.next;
		if (entry == &n->slabs_partial) {
			n->free_touched = 1;
			entry = n->slabs_free.next;
			if (entry == &n->slabs_free)
L
Linus Torvalds 已提交
2819 2820 2821
				goto must_grow;
		}

2822
		page = list_entry(entry, struct page, lru);
L
Linus Torvalds 已提交
2823
		check_spinlock_acquired(cachep);
2824 2825 2826 2827 2828 2829

		/*
		 * The slab was either on partial or free list so
		 * there must be at least one object available for
		 * allocation.
		 */
2830
		BUG_ON(page->active >= cachep->num);
2831

2832
		while (page->active < cachep->num && batchcount--) {
L
Linus Torvalds 已提交
2833 2834 2835 2836
			STATS_INC_ALLOCED(cachep);
			STATS_INC_ACTIVE(cachep);
			STATS_SET_HIGH(cachep);

2837
			ac_put_obj(cachep, ac, slab_get_obj(cachep, page,
2838
									node));
L
Linus Torvalds 已提交
2839 2840 2841
		}

		/* move slabp to correct slabp list: */
2842 2843
		list_del(&page->lru);
		if (page->active == cachep->num)
2844
			list_add(&page->lru, &n->slabs_full);
L
Linus Torvalds 已提交
2845
		else
2846
			list_add(&page->lru, &n->slabs_partial);
L
Linus Torvalds 已提交
2847 2848
	}

A
Andrew Morton 已提交
2849
must_grow:
2850
	n->free_objects -= ac->avail;
A
Andrew Morton 已提交
2851
alloc_done:
2852
	spin_unlock(&n->list_lock);
L
Linus Torvalds 已提交
2853 2854 2855

	if (unlikely(!ac->avail)) {
		int x;
2856
force_grow:
2857
		x = cache_grow(cachep, flags | GFP_THISNODE, node, NULL);
2858

A
Andrew Morton 已提交
2859
		/* cache_grow can reenable interrupts, then ac could change. */
2860
		ac = cpu_cache_get(cachep);
2861
		node = numa_mem_id();
2862 2863 2864

		/* no objects in sight? abort */
		if (!x && (ac->avail == 0 || force_refill))
L
Linus Torvalds 已提交
2865 2866
			return NULL;

A
Andrew Morton 已提交
2867
		if (!ac->avail)		/* objects refilled by interrupt? */
L
Linus Torvalds 已提交
2868 2869 2870
			goto retry;
	}
	ac->touched = 1;
2871 2872

	return ac_get_obj(cachep, ac, flags, force_refill);
L
Linus Torvalds 已提交
2873 2874
}

A
Andrew Morton 已提交
2875 2876
static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
						gfp_t flags)
L
Linus Torvalds 已提交
2877 2878 2879 2880 2881 2882 2883 2884
{
	might_sleep_if(flags & __GFP_WAIT);
#if DEBUG
	kmem_flagcheck(cachep, flags);
#endif
}

#if DEBUG
A
Andrew Morton 已提交
2885
static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
2886
				gfp_t flags, void *objp, unsigned long caller)
L
Linus Torvalds 已提交
2887
{
2888 2889
	struct page *page;

P
Pekka Enberg 已提交
2890
	if (!objp)
L
Linus Torvalds 已提交
2891
		return objp;
P
Pekka Enberg 已提交
2892
	if (cachep->flags & SLAB_POISON) {
L
Linus Torvalds 已提交
2893
#ifdef CONFIG_DEBUG_PAGEALLOC
2894
		if ((cachep->size % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
P
Pekka Enberg 已提交
2895
			kernel_map_pages(virt_to_page(objp),
2896
					 cachep->size / PAGE_SIZE, 1);
L
Linus Torvalds 已提交
2897 2898 2899 2900 2901 2902 2903 2904
		else
			check_poison_obj(cachep, objp);
#else
		check_poison_obj(cachep, objp);
#endif
		poison_obj(cachep, objp, POISON_INUSE);
	}
	if (cachep->flags & SLAB_STORE_USER)
2905
		*dbg_userword(cachep, objp) = (void *)caller;
L
Linus Torvalds 已提交
2906 2907

	if (cachep->flags & SLAB_RED_ZONE) {
A
Andrew Morton 已提交
2908 2909 2910 2911
		if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
				*dbg_redzone2(cachep, objp) != RED_INACTIVE) {
			slab_error(cachep, "double free, or memory outside"
						" object was overwritten");
P
Pekka Enberg 已提交
2912
			printk(KERN_ERR
2913
				"%p: redzone 1:0x%llx, redzone 2:0x%llx\n",
A
Andrew Morton 已提交
2914 2915
				objp, *dbg_redzone1(cachep, objp),
				*dbg_redzone2(cachep, objp));
L
Linus Torvalds 已提交
2916 2917 2918 2919
		}
		*dbg_redzone1(cachep, objp) = RED_ACTIVE;
		*dbg_redzone2(cachep, objp) = RED_ACTIVE;
	}
2920 2921 2922

	page = virt_to_head_page(objp);
	set_obj_status(page, obj_to_index(cachep, page, objp), OBJECT_ACTIVE);
2923
	objp += obj_offset(cachep);
2924
	if (cachep->ctor && cachep->flags & SLAB_POISON)
2925
		cachep->ctor(objp);
T
Tetsuo Handa 已提交
2926 2927
	if (ARCH_SLAB_MINALIGN &&
	    ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) {
2928
		printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",
H
Hugh Dickins 已提交
2929
		       objp, (int)ARCH_SLAB_MINALIGN);
2930
	}
L
Linus Torvalds 已提交
2931 2932 2933 2934 2935 2936
	return objp;
}
#else
#define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
#endif

A
Akinobu Mita 已提交
2937
static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags)
2938
{
2939
	if (unlikely(cachep == kmem_cache))
A
Akinobu Mita 已提交
2940
		return false;
2941

2942
	return should_failslab(cachep->object_size, flags, cachep->flags);
2943 2944
}

2945
static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
L
Linus Torvalds 已提交
2946
{
P
Pekka Enberg 已提交
2947
	void *objp;
L
Linus Torvalds 已提交
2948
	struct array_cache *ac;
2949
	bool force_refill = false;
L
Linus Torvalds 已提交
2950

2951
	check_irq_off();
2952

2953
	ac = cpu_cache_get(cachep);
L
Linus Torvalds 已提交
2954 2955
	if (likely(ac->avail)) {
		ac->touched = 1;
2956 2957
		objp = ac_get_obj(cachep, ac, flags, false);

2958
		/*
2959 2960
		 * Allow for the possibility all avail objects are not allowed
		 * by the current flags
2961
		 */
2962 2963 2964 2965 2966
		if (objp) {
			STATS_INC_ALLOCHIT(cachep);
			goto out;
		}
		force_refill = true;
L
Linus Torvalds 已提交
2967
	}
2968 2969 2970 2971 2972 2973 2974 2975 2976 2977

	STATS_INC_ALLOCMISS(cachep);
	objp = cache_alloc_refill(cachep, flags, force_refill);
	/*
	 * the 'ac' may be updated by cache_alloc_refill(),
	 * and kmemleak_erase() requires its correct value.
	 */
	ac = cpu_cache_get(cachep);

out:
2978 2979 2980 2981 2982
	/*
	 * To avoid a false negative, if an object that is in one of the
	 * per-CPU caches is leaked, we need to make sure kmemleak doesn't
	 * treat the array pointers as a reference to the object.
	 */
2983 2984
	if (objp)
		kmemleak_erase(&ac->entry[ac->avail]);
2985 2986 2987
	return objp;
}

2988
#ifdef CONFIG_NUMA
2989
/*
2990
 * Try allocating on another node if PFA_SPREAD_SLAB is a mempolicy is set.
2991 2992 2993 2994 2995 2996 2997 2998
 *
 * If we are in_interrupt, then process context, including cpusets and
 * mempolicy, may not apply and should not be used for allocation policy.
 */
static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
{
	int nid_alloc, nid_here;

2999
	if (in_interrupt() || (flags & __GFP_THISNODE))
3000
		return NULL;
3001
	nid_alloc = nid_here = numa_mem_id();
3002
	if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
3003
		nid_alloc = cpuset_slab_spread_node();
3004
	else if (current->mempolicy)
3005
		nid_alloc = mempolicy_slab_node();
3006
	if (nid_alloc != nid_here)
3007
		return ____cache_alloc_node(cachep, flags, nid_alloc);
3008 3009 3010
	return NULL;
}

3011 3012
/*
 * Fallback function if there was no memory available and no objects on a
3013
 * certain node and fall back is permitted. First we scan all the
3014
 * available node for available objects. If that fails then we
3015 3016 3017
 * perform an allocation without specifying a node. This allows the page
 * allocator to do its reclaim / fallback magic. We then insert the
 * slab into the proper nodelist and then allocate from it.
3018
 */
3019
static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
3020
{
3021 3022
	struct zonelist *zonelist;
	gfp_t local_flags;
3023
	struct zoneref *z;
3024 3025
	struct zone *zone;
	enum zone_type high_zoneidx = gfp_zone(flags);
3026
	void *obj = NULL;
3027
	int nid;
3028
	unsigned int cpuset_mems_cookie;
3029 3030 3031 3032

	if (flags & __GFP_THISNODE)
		return NULL;

C
Christoph Lameter 已提交
3033
	local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
3034

3035
retry_cpuset:
3036
	cpuset_mems_cookie = read_mems_allowed_begin();
3037
	zonelist = node_zonelist(mempolicy_slab_node(), flags);
3038

3039 3040 3041 3042 3043
retry:
	/*
	 * Look through allowed nodes for objects available
	 * from existing per node queues.
	 */
3044 3045
	for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
		nid = zone_to_nid(zone);
3046

3047
		if (cpuset_zone_allowed_hardwall(zone, flags) &&
3048 3049
			get_node(cache, nid) &&
			get_node(cache, nid)->free_objects) {
3050 3051
				obj = ____cache_alloc_node(cache,
					flags | GFP_THISNODE, nid);
3052 3053 3054
				if (obj)
					break;
		}
3055 3056
	}

3057
	if (!obj) {
3058 3059 3060 3061 3062 3063
		/*
		 * This allocation will be performed within the constraints
		 * of the current cpuset / memory policy requirements.
		 * We may trigger various forms of reclaim on the allowed
		 * set and go into memory reserves if necessary.
		 */
3064 3065
		struct page *page;

3066 3067 3068
		if (local_flags & __GFP_WAIT)
			local_irq_enable();
		kmem_flagcheck(cache, flags);
3069
		page = kmem_getpages(cache, local_flags, numa_mem_id());
3070 3071
		if (local_flags & __GFP_WAIT)
			local_irq_disable();
3072
		if (page) {
3073 3074 3075
			/*
			 * Insert into the appropriate per node queues
			 */
3076 3077
			nid = page_to_nid(page);
			if (cache_grow(cache, flags, nid, page)) {
3078 3079 3080 3081 3082 3083 3084 3085 3086 3087
				obj = ____cache_alloc_node(cache,
					flags | GFP_THISNODE, nid);
				if (!obj)
					/*
					 * Another processor may allocate the
					 * objects in the slab since we are
					 * not holding any locks.
					 */
					goto retry;
			} else {
3088
				/* cache_grow already freed obj */
3089 3090 3091
				obj = NULL;
			}
		}
3092
	}
3093

3094
	if (unlikely(!obj && read_mems_allowed_retry(cpuset_mems_cookie)))
3095
		goto retry_cpuset;
3096 3097 3098
	return obj;
}

3099 3100
/*
 * A interface to enable slab creation on nodeid
L
Linus Torvalds 已提交
3101
 */
3102
static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
A
Andrew Morton 已提交
3103
				int nodeid)
3104 3105
{
	struct list_head *entry;
3106
	struct page *page;
3107
	struct kmem_cache_node *n;
P
Pekka Enberg 已提交
3108 3109 3110
	void *obj;
	int x;

3111
	VM_BUG_ON(nodeid > num_online_nodes());
3112
	n = get_node(cachep, nodeid);
3113
	BUG_ON(!n);
P
Pekka Enberg 已提交
3114

A
Andrew Morton 已提交
3115
retry:
3116
	check_irq_off();
3117 3118 3119 3120 3121 3122
	spin_lock(&n->list_lock);
	entry = n->slabs_partial.next;
	if (entry == &n->slabs_partial) {
		n->free_touched = 1;
		entry = n->slabs_free.next;
		if (entry == &n->slabs_free)
P
Pekka Enberg 已提交
3123 3124 3125
			goto must_grow;
	}

3126
	page = list_entry(entry, struct page, lru);
P
Pekka Enberg 已提交
3127 3128 3129 3130 3131 3132
	check_spinlock_acquired_node(cachep, nodeid);

	STATS_INC_NODEALLOCS(cachep);
	STATS_INC_ACTIVE(cachep);
	STATS_SET_HIGH(cachep);

3133
	BUG_ON(page->active == cachep->num);
P
Pekka Enberg 已提交
3134

3135
	obj = slab_get_obj(cachep, page, nodeid);
3136
	n->free_objects--;
P
Pekka Enberg 已提交
3137
	/* move slabp to correct slabp list: */
3138
	list_del(&page->lru);
P
Pekka Enberg 已提交
3139

3140 3141
	if (page->active == cachep->num)
		list_add(&page->lru, &n->slabs_full);
A
Andrew Morton 已提交
3142
	else
3143
		list_add(&page->lru, &n->slabs_partial);
3144

3145
	spin_unlock(&n->list_lock);
P
Pekka Enberg 已提交
3146
	goto done;
3147

A
Andrew Morton 已提交
3148
must_grow:
3149
	spin_unlock(&n->list_lock);
3150
	x = cache_grow(cachep, flags | GFP_THISNODE, nodeid, NULL);
3151 3152
	if (x)
		goto retry;
L
Linus Torvalds 已提交
3153

3154
	return fallback_alloc(cachep, flags);
3155

A
Andrew Morton 已提交
3156
done:
P
Pekka Enberg 已提交
3157
	return obj;
3158
}
3159 3160

static __always_inline void *
3161
slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3162
		   unsigned long caller)
3163 3164 3165
{
	unsigned long save_flags;
	void *ptr;
3166
	int slab_node = numa_mem_id();
3167

3168
	flags &= gfp_allowed_mask;
3169

3170 3171
	lockdep_trace_alloc(flags);

A
Akinobu Mita 已提交
3172
	if (slab_should_failslab(cachep, flags))
3173 3174
		return NULL;

3175 3176
	cachep = memcg_kmem_get_cache(cachep, flags);

3177 3178 3179
	cache_alloc_debugcheck_before(cachep, flags);
	local_irq_save(save_flags);

A
Andrew Morton 已提交
3180
	if (nodeid == NUMA_NO_NODE)
3181
		nodeid = slab_node;
3182

3183
	if (unlikely(!get_node(cachep, nodeid))) {
3184 3185 3186 3187 3188
		/* Node not bootstrapped yet */
		ptr = fallback_alloc(cachep, flags);
		goto out;
	}

3189
	if (nodeid == slab_node) {
3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204
		/*
		 * Use the locally cached objects if possible.
		 * However ____cache_alloc does not allow fallback
		 * to other nodes. It may fail while we still have
		 * objects on other nodes available.
		 */
		ptr = ____cache_alloc(cachep, flags);
		if (ptr)
			goto out;
	}
	/* ___cache_alloc_node can fall back to other nodes */
	ptr = ____cache_alloc_node(cachep, flags, nodeid);
  out:
	local_irq_restore(save_flags);
	ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
3205
	kmemleak_alloc_recursive(ptr, cachep->object_size, 1, cachep->flags,
3206
				 flags);
3207

3208
	if (likely(ptr)) {
3209
		kmemcheck_slab_alloc(cachep, flags, ptr, cachep->object_size);
3210 3211 3212
		if (unlikely(flags & __GFP_ZERO))
			memset(ptr, 0, cachep->object_size);
	}
3213

3214 3215 3216 3217 3218 3219 3220 3221
	return ptr;
}

static __always_inline void *
__do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
{
	void *objp;

3222
	if (current->mempolicy || cpuset_do_slab_mem_spread()) {
3223 3224 3225 3226 3227 3228 3229 3230 3231 3232
		objp = alternate_node_alloc(cache, flags);
		if (objp)
			goto out;
	}
	objp = ____cache_alloc(cache, flags);

	/*
	 * We may just have run out of memory on the local node.
	 * ____cache_alloc_node() knows how to locate memory on other nodes
	 */
3233 3234
	if (!objp)
		objp = ____cache_alloc_node(cache, flags, numa_mem_id());
3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249

  out:
	return objp;
}
#else

static __always_inline void *
__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
{
	return ____cache_alloc(cachep, flags);
}

#endif /* CONFIG_NUMA */

static __always_inline void *
3250
slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
3251 3252 3253 3254
{
	unsigned long save_flags;
	void *objp;

3255
	flags &= gfp_allowed_mask;
3256

3257 3258
	lockdep_trace_alloc(flags);

A
Akinobu Mita 已提交
3259
	if (slab_should_failslab(cachep, flags))
3260 3261
		return NULL;

3262 3263
	cachep = memcg_kmem_get_cache(cachep, flags);

3264 3265 3266 3267 3268
	cache_alloc_debugcheck_before(cachep, flags);
	local_irq_save(save_flags);
	objp = __do_cache_alloc(cachep, flags);
	local_irq_restore(save_flags);
	objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
3269
	kmemleak_alloc_recursive(objp, cachep->object_size, 1, cachep->flags,
3270
				 flags);
3271 3272
	prefetchw(objp);

3273
	if (likely(objp)) {
3274
		kmemcheck_slab_alloc(cachep, flags, objp, cachep->object_size);
3275 3276 3277
		if (unlikely(flags & __GFP_ZERO))
			memset(objp, 0, cachep->object_size);
	}
3278

3279 3280
	return objp;
}
3281 3282

/*
3283
 * Caller needs to acquire correct kmem_cache_node's list_lock
3284
 * @list: List of detached free slabs should be freed by caller
3285
 */
3286 3287
static void free_block(struct kmem_cache *cachep, void **objpp,
			int nr_objects, int node, struct list_head *list)
L
Linus Torvalds 已提交
3288 3289
{
	int i;
3290
	struct kmem_cache_node *n = get_node(cachep, node);
L
Linus Torvalds 已提交
3291 3292

	for (i = 0; i < nr_objects; i++) {
3293
		void *objp;
3294
		struct page *page;
L
Linus Torvalds 已提交
3295

3296 3297 3298
		clear_obj_pfmemalloc(&objpp[i]);
		objp = objpp[i];

3299 3300
		page = virt_to_head_page(objp);
		list_del(&page->lru);
3301
		check_spinlock_acquired_node(cachep, node);
3302
		slab_put_obj(cachep, page, objp, node);
L
Linus Torvalds 已提交
3303
		STATS_DEC_ACTIVE(cachep);
3304
		n->free_objects++;
L
Linus Torvalds 已提交
3305 3306

		/* fixup slab chains */
3307
		if (page->active == 0) {
3308 3309
			if (n->free_objects > n->free_limit) {
				n->free_objects -= cachep->num;
3310
				list_add_tail(&page->lru, list);
L
Linus Torvalds 已提交
3311
			} else {
3312
				list_add(&page->lru, &n->slabs_free);
L
Linus Torvalds 已提交
3313 3314 3315 3316 3317 3318
			}
		} else {
			/* Unconditionally move a slab to the end of the
			 * partial list on free - maximum time for the
			 * other objects to be freed, too.
			 */
3319
			list_add_tail(&page->lru, &n->slabs_partial);
L
Linus Torvalds 已提交
3320 3321 3322 3323
		}
	}
}

3324
static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
L
Linus Torvalds 已提交
3325 3326
{
	int batchcount;
3327
	struct kmem_cache_node *n;
3328
	int node = numa_mem_id();
3329
	LIST_HEAD(list);
L
Linus Torvalds 已提交
3330 3331 3332 3333 3334 3335

	batchcount = ac->batchcount;
#if DEBUG
	BUG_ON(!batchcount || batchcount > ac->avail);
#endif
	check_irq_off();
3336
	n = get_node(cachep, node);
3337 3338 3339
	spin_lock(&n->list_lock);
	if (n->shared) {
		struct array_cache *shared_array = n->shared;
P
Pekka Enberg 已提交
3340
		int max = shared_array->limit - shared_array->avail;
L
Linus Torvalds 已提交
3341 3342 3343
		if (max) {
			if (batchcount > max)
				batchcount = max;
3344
			memcpy(&(shared_array->entry[shared_array->avail]),
P
Pekka Enberg 已提交
3345
			       ac->entry, sizeof(void *) * batchcount);
L
Linus Torvalds 已提交
3346 3347 3348 3349 3350
			shared_array->avail += batchcount;
			goto free_done;
		}
	}

3351
	free_block(cachep, ac->entry, batchcount, node, &list);
A
Andrew Morton 已提交
3352
free_done:
L
Linus Torvalds 已提交
3353 3354 3355 3356 3357
#if STATS
	{
		int i = 0;
		struct list_head *p;

3358 3359
		p = n->slabs_free.next;
		while (p != &(n->slabs_free)) {
3360
			struct page *page;
L
Linus Torvalds 已提交
3361

3362 3363
			page = list_entry(p, struct page, lru);
			BUG_ON(page->active);
L
Linus Torvalds 已提交
3364 3365 3366 3367 3368 3369 3370

			i++;
			p = p->next;
		}
		STATS_SET_FREEABLE(cachep, i);
	}
#endif
3371
	spin_unlock(&n->list_lock);
3372
	slabs_destroy(cachep, &list);
L
Linus Torvalds 已提交
3373
	ac->avail -= batchcount;
A
Andrew Morton 已提交
3374
	memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
L
Linus Torvalds 已提交
3375 3376 3377
}

/*
A
Andrew Morton 已提交
3378 3379
 * Release an obj back to its cache. If the obj has a constructed state, it must
 * be in this state _before_ it is released.  Called with disabled ints.
L
Linus Torvalds 已提交
3380
 */
3381
static inline void __cache_free(struct kmem_cache *cachep, void *objp,
3382
				unsigned long caller)
L
Linus Torvalds 已提交
3383
{
3384
	struct array_cache *ac = cpu_cache_get(cachep);
L
Linus Torvalds 已提交
3385 3386

	check_irq_off();
3387
	kmemleak_free_recursive(objp, cachep->flags);
3388
	objp = cache_free_debugcheck(cachep, objp, caller);
L
Linus Torvalds 已提交
3389

3390
	kmemcheck_slab_free(cachep, objp, cachep->object_size);
P
Pekka Enberg 已提交
3391

3392 3393 3394 3395 3396 3397 3398
	/*
	 * Skip calling cache_free_alien() when the platform is not numa.
	 * This will avoid cache misses that happen while accessing slabp (which
	 * is per page memory  reference) to get nodeid. Instead use a global
	 * variable to skip the call, which is mostly likely to be present in
	 * the cache.
	 */
3399
	if (nr_online_nodes > 1 && cache_free_alien(cachep, objp))
3400 3401
		return;

3402
	if (ac->avail < ac->limit) {
L
Linus Torvalds 已提交
3403 3404 3405 3406 3407
		STATS_INC_FREEHIT(cachep);
	} else {
		STATS_INC_FREEMISS(cachep);
		cache_flusharray(cachep, ac);
	}
Z
Zhao Jin 已提交
3408

3409
	ac_put_obj(cachep, ac, objp);
L
Linus Torvalds 已提交
3410 3411 3412 3413 3414 3415 3416 3417 3418 3419
}

/**
 * kmem_cache_alloc - Allocate an object
 * @cachep: The cache to allocate from.
 * @flags: See kmalloc().
 *
 * Allocate an object from this cache.  The flags are only relevant
 * if the cache has no available objects.
 */
3420
void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
L
Linus Torvalds 已提交
3421
{
3422
	void *ret = slab_alloc(cachep, flags, _RET_IP_);
E
Eduard - Gabriel Munteanu 已提交
3423

3424
	trace_kmem_cache_alloc(_RET_IP_, ret,
3425
			       cachep->object_size, cachep->size, flags);
E
Eduard - Gabriel Munteanu 已提交
3426 3427

	return ret;
L
Linus Torvalds 已提交
3428 3429 3430
}
EXPORT_SYMBOL(kmem_cache_alloc);

3431
#ifdef CONFIG_TRACING
3432
void *
3433
kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
E
Eduard - Gabriel Munteanu 已提交
3434
{
3435 3436
	void *ret;

3437
	ret = slab_alloc(cachep, flags, _RET_IP_);
3438 3439

	trace_kmalloc(_RET_IP_, ret,
3440
		      size, cachep->size, flags);
3441
	return ret;
E
Eduard - Gabriel Munteanu 已提交
3442
}
3443
EXPORT_SYMBOL(kmem_cache_alloc_trace);
E
Eduard - Gabriel Munteanu 已提交
3444 3445
#endif

L
Linus Torvalds 已提交
3446
#ifdef CONFIG_NUMA
3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457
/**
 * kmem_cache_alloc_node - Allocate an object on the specified node
 * @cachep: The cache to allocate from.
 * @flags: See kmalloc().
 * @nodeid: node number of the target node.
 *
 * Identical to kmem_cache_alloc but it will allocate memory on the given
 * node, which can improve the performance for cpu bound structures.
 *
 * Fallback to other node is possible if __GFP_THISNODE is not set.
 */
3458 3459
void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
3460
	void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
E
Eduard - Gabriel Munteanu 已提交
3461

3462
	trace_kmem_cache_alloc_node(_RET_IP_, ret,
3463
				    cachep->object_size, cachep->size,
3464
				    flags, nodeid);
E
Eduard - Gabriel Munteanu 已提交
3465 3466

	return ret;
3467
}
L
Linus Torvalds 已提交
3468 3469
EXPORT_SYMBOL(kmem_cache_alloc_node);

3470
#ifdef CONFIG_TRACING
3471
void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
3472
				  gfp_t flags,
3473 3474
				  int nodeid,
				  size_t size)
E
Eduard - Gabriel Munteanu 已提交
3475
{
3476 3477
	void *ret;

3478
	ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
3479

3480
	trace_kmalloc_node(_RET_IP_, ret,
3481
			   size, cachep->size,
3482 3483
			   flags, nodeid);
	return ret;
E
Eduard - Gabriel Munteanu 已提交
3484
}
3485
EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
E
Eduard - Gabriel Munteanu 已提交
3486 3487
#endif

3488
static __always_inline void *
3489
__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
3490
{
3491
	struct kmem_cache *cachep;
3492

3493
	cachep = kmalloc_slab(size, flags);
3494 3495
	if (unlikely(ZERO_OR_NULL_PTR(cachep)))
		return cachep;
3496
	return kmem_cache_alloc_node_trace(cachep, flags, node, size);
3497
}
3498 3499 3500

void *__kmalloc_node(size_t size, gfp_t flags, int node)
{
3501
	return __do_kmalloc_node(size, flags, node, _RET_IP_);
3502
}
3503
EXPORT_SYMBOL(__kmalloc_node);
3504 3505

void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
3506
		int node, unsigned long caller)
3507
{
3508
	return __do_kmalloc_node(size, flags, node, caller);
3509 3510 3511
}
EXPORT_SYMBOL(__kmalloc_node_track_caller);
#endif /* CONFIG_NUMA */
L
Linus Torvalds 已提交
3512 3513

/**
3514
 * __do_kmalloc - allocate memory
L
Linus Torvalds 已提交
3515
 * @size: how many bytes of memory are required.
3516
 * @flags: the type of memory to allocate (see kmalloc).
3517
 * @caller: function caller for debug tracking of the caller
L
Linus Torvalds 已提交
3518
 */
3519
static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3520
					  unsigned long caller)
L
Linus Torvalds 已提交
3521
{
3522
	struct kmem_cache *cachep;
E
Eduard - Gabriel Munteanu 已提交
3523
	void *ret;
L
Linus Torvalds 已提交
3524

3525
	cachep = kmalloc_slab(size, flags);
3526 3527
	if (unlikely(ZERO_OR_NULL_PTR(cachep)))
		return cachep;
3528
	ret = slab_alloc(cachep, flags, caller);
E
Eduard - Gabriel Munteanu 已提交
3529

3530
	trace_kmalloc(caller, ret,
3531
		      size, cachep->size, flags);
E
Eduard - Gabriel Munteanu 已提交
3532 3533

	return ret;
3534 3535 3536 3537
}

void *__kmalloc(size_t size, gfp_t flags)
{
3538
	return __do_kmalloc(size, flags, _RET_IP_);
L
Linus Torvalds 已提交
3539 3540 3541
}
EXPORT_SYMBOL(__kmalloc);

3542
void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
3543
{
3544
	return __do_kmalloc(size, flags, caller);
3545 3546
}
EXPORT_SYMBOL(__kmalloc_track_caller);
3547

L
Linus Torvalds 已提交
3548 3549 3550 3551 3552 3553 3554 3555
/**
 * kmem_cache_free - Deallocate an object
 * @cachep: The cache the allocation was from.
 * @objp: The previously allocated object.
 *
 * Free an object which was previously allocated from this
 * cache.
 */
3556
void kmem_cache_free(struct kmem_cache *cachep, void *objp)
L
Linus Torvalds 已提交
3557 3558
{
	unsigned long flags;
3559 3560 3561
	cachep = cache_from_obj(cachep, objp);
	if (!cachep)
		return;
L
Linus Torvalds 已提交
3562 3563

	local_irq_save(flags);
3564
	debug_check_no_locks_freed(objp, cachep->object_size);
3565
	if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
3566
		debug_check_no_obj_freed(objp, cachep->object_size);
3567
	__cache_free(cachep, objp, _RET_IP_);
L
Linus Torvalds 已提交
3568
	local_irq_restore(flags);
E
Eduard - Gabriel Munteanu 已提交
3569

3570
	trace_kmem_cache_free(_RET_IP_, objp);
L
Linus Torvalds 已提交
3571 3572 3573 3574 3575 3576 3577
}
EXPORT_SYMBOL(kmem_cache_free);

/**
 * kfree - free previously allocated memory
 * @objp: pointer returned by kmalloc.
 *
3578 3579
 * If @objp is NULL, no operation is performed.
 *
L
Linus Torvalds 已提交
3580 3581 3582 3583 3584
 * Don't free memory not originally allocated by kmalloc()
 * or you will run into trouble.
 */
void kfree(const void *objp)
{
3585
	struct kmem_cache *c;
L
Linus Torvalds 已提交
3586 3587
	unsigned long flags;

3588 3589
	trace_kfree(_RET_IP_, objp);

3590
	if (unlikely(ZERO_OR_NULL_PTR(objp)))
L
Linus Torvalds 已提交
3591 3592 3593
		return;
	local_irq_save(flags);
	kfree_debugcheck(objp);
3594
	c = virt_to_cache(objp);
3595 3596 3597
	debug_check_no_locks_freed(objp, c->object_size);

	debug_check_no_obj_freed(objp, c->object_size);
3598
	__cache_free(c, (void *)objp, _RET_IP_);
L
Linus Torvalds 已提交
3599 3600 3601 3602
	local_irq_restore(flags);
}
EXPORT_SYMBOL(kfree);

3603
/*
3604
 * This initializes kmem_cache_node or resizes various caches for all nodes.
3605
 */
3606
static int alloc_kmem_cache_node(struct kmem_cache *cachep, gfp_t gfp)
3607 3608
{
	int node;
3609
	struct kmem_cache_node *n;
3610
	struct array_cache *new_shared;
J
Joonsoo Kim 已提交
3611
	struct alien_cache **new_alien = NULL;
3612

3613
	for_each_online_node(node) {
3614

3615
                if (use_alien_caches) {
3616
                        new_alien = alloc_alien_cache(node, cachep->limit, gfp);
3617 3618 3619
                        if (!new_alien)
                                goto fail;
                }
3620

3621 3622 3623
		new_shared = NULL;
		if (cachep->shared) {
			new_shared = alloc_arraycache(node,
3624
				cachep->shared*cachep->batchcount,
3625
					0xbaadf00d, gfp);
3626 3627 3628 3629
			if (!new_shared) {
				free_alien_cache(new_alien);
				goto fail;
			}
3630
		}
3631

3632
		n = get_node(cachep, node);
3633 3634
		if (n) {
			struct array_cache *shared = n->shared;
3635
			LIST_HEAD(list);
3636

3637
			spin_lock_irq(&n->list_lock);
3638

3639
			if (shared)
3640
				free_block(cachep, shared->entry,
3641
						shared->avail, node, &list);
3642

3643 3644 3645
			n->shared = new_shared;
			if (!n->alien) {
				n->alien = new_alien;
3646 3647
				new_alien = NULL;
			}
3648
			n->free_limit = (1 + nr_cpus_node(node)) *
A
Andrew Morton 已提交
3649
					cachep->batchcount + cachep->num;
3650
			spin_unlock_irq(&n->list_lock);
3651
			slabs_destroy(cachep, &list);
3652
			kfree(shared);
3653 3654 3655
			free_alien_cache(new_alien);
			continue;
		}
3656 3657
		n = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node);
		if (!n) {
3658 3659
			free_alien_cache(new_alien);
			kfree(new_shared);
3660
			goto fail;
3661
		}
3662

3663
		kmem_cache_node_init(n);
3664 3665
		n->next_reap = jiffies + REAPTIMEOUT_NODE +
				((unsigned long)cachep) % REAPTIMEOUT_NODE;
3666 3667 3668
		n->shared = new_shared;
		n->alien = new_alien;
		n->free_limit = (1 + nr_cpus_node(node)) *
A
Andrew Morton 已提交
3669
					cachep->batchcount + cachep->num;
3670
		cachep->node[node] = n;
3671
	}
3672
	return 0;
3673

A
Andrew Morton 已提交
3674
fail:
3675
	if (!cachep->list.next) {
3676 3677 3678
		/* Cache is not active yet. Roll back what we did */
		node--;
		while (node >= 0) {
3679 3680
			n = get_node(cachep, node);
			if (n) {
3681 3682 3683
				kfree(n->shared);
				free_alien_cache(n->alien);
				kfree(n);
3684
				cachep->node[node] = NULL;
3685 3686 3687 3688
			}
			node--;
		}
	}
3689
	return -ENOMEM;
3690 3691
}

L
Linus Torvalds 已提交
3692
struct ccupdate_struct {
3693
	struct kmem_cache *cachep;
3694
	struct array_cache *new[0];
L
Linus Torvalds 已提交
3695 3696 3697 3698
};

static void do_ccupdate_local(void *info)
{
A
Andrew Morton 已提交
3699
	struct ccupdate_struct *new = info;
L
Linus Torvalds 已提交
3700 3701 3702
	struct array_cache *old;

	check_irq_off();
3703
	old = cpu_cache_get(new->cachep);
3704

L
Linus Torvalds 已提交
3705 3706 3707 3708
	new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()];
	new->new[smp_processor_id()] = old;
}

3709
/* Always called with the slab_mutex held */
G
Glauber Costa 已提交
3710
static int __do_tune_cpucache(struct kmem_cache *cachep, int limit,
3711
				int batchcount, int shared, gfp_t gfp)
L
Linus Torvalds 已提交
3712
{
3713
	struct ccupdate_struct *new;
3714
	int i;
L
Linus Torvalds 已提交
3715

3716 3717
	new = kzalloc(sizeof(*new) + nr_cpu_ids * sizeof(struct array_cache *),
		      gfp);
3718 3719 3720
	if (!new)
		return -ENOMEM;

3721
	for_each_online_cpu(i) {
3722
		new->new[i] = alloc_arraycache(cpu_to_mem(i), limit,
3723
						batchcount, gfp);
3724
		if (!new->new[i]) {
P
Pekka Enberg 已提交
3725
			for (i--; i >= 0; i--)
3726 3727
				kfree(new->new[i]);
			kfree(new);
3728
			return -ENOMEM;
L
Linus Torvalds 已提交
3729 3730
		}
	}
3731
	new->cachep = cachep;
L
Linus Torvalds 已提交
3732

3733
	on_each_cpu(do_ccupdate_local, (void *)new, 1);
3734

L
Linus Torvalds 已提交
3735 3736 3737
	check_irq_on();
	cachep->batchcount = batchcount;
	cachep->limit = limit;
3738
	cachep->shared = shared;
L
Linus Torvalds 已提交
3739

3740
	for_each_online_cpu(i) {
3741
		LIST_HEAD(list);
3742
		struct array_cache *ccold = new->new[i];
3743 3744 3745
		int node;
		struct kmem_cache_node *n;

L
Linus Torvalds 已提交
3746 3747
		if (!ccold)
			continue;
3748 3749 3750 3751

		node = cpu_to_mem(i);
		n = get_node(cachep, node);
		spin_lock_irq(&n->list_lock);
3752
		free_block(cachep, ccold->entry, ccold->avail, node, &list);
3753
		spin_unlock_irq(&n->list_lock);
3754
		slabs_destroy(cachep, &list);
L
Linus Torvalds 已提交
3755 3756
		kfree(ccold);
	}
3757
	kfree(new);
3758
	return alloc_kmem_cache_node(cachep, gfp);
L
Linus Torvalds 已提交
3759 3760
}

G
Glauber Costa 已提交
3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775
static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
				int batchcount, int shared, gfp_t gfp)
{
	int ret;
	struct kmem_cache *c = NULL;
	int i = 0;

	ret = __do_tune_cpucache(cachep, limit, batchcount, shared, gfp);

	if (slab_state < FULL)
		return ret;

	if ((ret < 0) || !is_root_cache(cachep))
		return ret;

3776
	VM_BUG_ON(!mutex_is_locked(&slab_mutex));
G
Glauber Costa 已提交
3777
	for_each_memcg_cache_index(i) {
3778
		c = cache_from_memcg_idx(cachep, i);
G
Glauber Costa 已提交
3779 3780 3781 3782 3783 3784 3785 3786
		if (c)
			/* return value determined by the parent cache only */
			__do_tune_cpucache(c, limit, batchcount, shared, gfp);
	}

	return ret;
}

3787
/* Called with slab_mutex held always */
3788
static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
L
Linus Torvalds 已提交
3789 3790
{
	int err;
G
Glauber Costa 已提交
3791 3792 3793 3794 3795 3796 3797 3798 3799 3800
	int limit = 0;
	int shared = 0;
	int batchcount = 0;

	if (!is_root_cache(cachep)) {
		struct kmem_cache *root = memcg_root_cache(cachep);
		limit = root->limit;
		shared = root->shared;
		batchcount = root->batchcount;
	}
L
Linus Torvalds 已提交
3801

G
Glauber Costa 已提交
3802 3803
	if (limit && shared && batchcount)
		goto skip_setup;
A
Andrew Morton 已提交
3804 3805
	/*
	 * The head array serves three purposes:
L
Linus Torvalds 已提交
3806 3807
	 * - create a LIFO ordering, i.e. return objects that are cache-warm
	 * - reduce the number of spinlock operations.
A
Andrew Morton 已提交
3808
	 * - reduce the number of linked list operations on the slab and
L
Linus Torvalds 已提交
3809 3810 3811 3812
	 *   bufctl chains: array operations are cheaper.
	 * The numbers are guessed, we should auto-tune as described by
	 * Bonwick.
	 */
3813
	if (cachep->size > 131072)
L
Linus Torvalds 已提交
3814
		limit = 1;
3815
	else if (cachep->size > PAGE_SIZE)
L
Linus Torvalds 已提交
3816
		limit = 8;
3817
	else if (cachep->size > 1024)
L
Linus Torvalds 已提交
3818
		limit = 24;
3819
	else if (cachep->size > 256)
L
Linus Torvalds 已提交
3820 3821 3822 3823
		limit = 54;
	else
		limit = 120;

A
Andrew Morton 已提交
3824 3825
	/*
	 * CPU bound tasks (e.g. network routing) can exhibit cpu bound
L
Linus Torvalds 已提交
3826 3827 3828 3829 3830 3831 3832 3833
	 * allocation behaviour: Most allocs on one cpu, most free operations
	 * on another cpu. For these cases, an efficient object passing between
	 * cpus is necessary. This is provided by a shared array. The array
	 * replaces Bonwick's magazine layer.
	 * On uniprocessor, it's functionally equivalent (but less efficient)
	 * to a larger limit. Thus disabled by default.
	 */
	shared = 0;
3834
	if (cachep->size <= PAGE_SIZE && num_possible_cpus() > 1)
L
Linus Torvalds 已提交
3835 3836 3837
		shared = 8;

#if DEBUG
A
Andrew Morton 已提交
3838 3839 3840
	/*
	 * With debugging enabled, large batchcount lead to excessively long
	 * periods with disabled local interrupts. Limit the batchcount
L
Linus Torvalds 已提交
3841 3842 3843 3844
	 */
	if (limit > 32)
		limit = 32;
#endif
G
Glauber Costa 已提交
3845 3846 3847
	batchcount = (limit + 1) / 2;
skip_setup:
	err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
L
Linus Torvalds 已提交
3848 3849
	if (err)
		printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
P
Pekka Enberg 已提交
3850
		       cachep->name, -err);
3851
	return err;
L
Linus Torvalds 已提交
3852 3853
}

3854
/*
3855 3856
 * Drain an array if it contains any elements taking the node lock only if
 * necessary. Note that the node listlock also protects the array_cache
3857
 * if drain_array() is used on the shared array.
3858
 */
3859
static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
3860
			 struct array_cache *ac, int force, int node)
L
Linus Torvalds 已提交
3861
{
3862
	LIST_HEAD(list);
L
Linus Torvalds 已提交
3863 3864
	int tofree;

3865 3866
	if (!ac || !ac->avail)
		return;
L
Linus Torvalds 已提交
3867 3868
	if (ac->touched && !force) {
		ac->touched = 0;
3869
	} else {
3870
		spin_lock_irq(&n->list_lock);
3871 3872 3873 3874
		if (ac->avail) {
			tofree = force ? ac->avail : (ac->limit + 4) / 5;
			if (tofree > ac->avail)
				tofree = (ac->avail + 1) / 2;
3875
			free_block(cachep, ac->entry, tofree, node, &list);
3876 3877 3878 3879
			ac->avail -= tofree;
			memmove(ac->entry, &(ac->entry[tofree]),
				sizeof(void *) * ac->avail);
		}
3880
		spin_unlock_irq(&n->list_lock);
3881
		slabs_destroy(cachep, &list);
L
Linus Torvalds 已提交
3882 3883 3884 3885 3886
	}
}

/**
 * cache_reap - Reclaim memory from caches.
3887
 * @w: work descriptor
L
Linus Torvalds 已提交
3888 3889 3890 3891 3892 3893
 *
 * Called from workqueue/eventd every few seconds.
 * Purpose:
 * - clear the per-cpu caches for this CPU.
 * - return freeable pages to the main free memory pool.
 *
A
Andrew Morton 已提交
3894 3895
 * If we cannot acquire the cache chain mutex then just give up - we'll try
 * again on the next iteration.
L
Linus Torvalds 已提交
3896
 */
3897
static void cache_reap(struct work_struct *w)
L
Linus Torvalds 已提交
3898
{
3899
	struct kmem_cache *searchp;
3900
	struct kmem_cache_node *n;
3901
	int node = numa_mem_id();
3902
	struct delayed_work *work = to_delayed_work(w);
L
Linus Torvalds 已提交
3903

3904
	if (!mutex_trylock(&slab_mutex))
L
Linus Torvalds 已提交
3905
		/* Give up. Setup the next iteration. */
3906
		goto out;
L
Linus Torvalds 已提交
3907

3908
	list_for_each_entry(searchp, &slab_caches, list) {
L
Linus Torvalds 已提交
3909 3910
		check_irq_on();

3911
		/*
3912
		 * We only take the node lock if absolutely necessary and we
3913 3914 3915
		 * have established with reasonable certainty that
		 * we can do some work if the lock was obtained.
		 */
3916
		n = get_node(searchp, node);
3917

3918
		reap_alien(searchp, n);
L
Linus Torvalds 已提交
3919

3920
		drain_array(searchp, n, cpu_cache_get(searchp), 0, node);
L
Linus Torvalds 已提交
3921

3922 3923 3924 3925
		/*
		 * These are racy checks but it does not matter
		 * if we skip one check or scan twice.
		 */
3926
		if (time_after(n->next_reap, jiffies))
3927
			goto next;
L
Linus Torvalds 已提交
3928

3929
		n->next_reap = jiffies + REAPTIMEOUT_NODE;
L
Linus Torvalds 已提交
3930

3931
		drain_array(searchp, n, n->shared, 0, node);
L
Linus Torvalds 已提交
3932

3933 3934
		if (n->free_touched)
			n->free_touched = 0;
3935 3936
		else {
			int freed;
L
Linus Torvalds 已提交
3937

3938
			freed = drain_freelist(searchp, n, (n->free_limit +
3939 3940 3941
				5 * searchp->num - 1) / (5 * searchp->num));
			STATS_ADD_REAPED(searchp, freed);
		}
3942
next:
L
Linus Torvalds 已提交
3943 3944 3945
		cond_resched();
	}
	check_irq_on();
3946
	mutex_unlock(&slab_mutex);
3947
	next_reap_node();
3948
out:
A
Andrew Morton 已提交
3949
	/* Set up the next iteration */
3950
	schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_AC));
L
Linus Torvalds 已提交
3951 3952
}

3953
#ifdef CONFIG_SLABINFO
3954
void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
L
Linus Torvalds 已提交
3955
{
3956
	struct page *page;
P
Pekka Enberg 已提交
3957 3958 3959 3960
	unsigned long active_objs;
	unsigned long num_objs;
	unsigned long active_slabs = 0;
	unsigned long num_slabs, free_objects = 0, shared_avail = 0;
3961
	const char *name;
L
Linus Torvalds 已提交
3962
	char *error = NULL;
3963
	int node;
3964
	struct kmem_cache_node *n;
L
Linus Torvalds 已提交
3965 3966 3967

	active_objs = 0;
	num_slabs = 0;
3968
	for_each_kmem_cache_node(cachep, node, n) {
3969

3970
		check_irq_on();
3971
		spin_lock_irq(&n->list_lock);
3972

3973 3974
		list_for_each_entry(page, &n->slabs_full, lru) {
			if (page->active != cachep->num && !error)
3975 3976 3977 3978
				error = "slabs_full accounting error";
			active_objs += cachep->num;
			active_slabs++;
		}
3979 3980
		list_for_each_entry(page, &n->slabs_partial, lru) {
			if (page->active == cachep->num && !error)
3981
				error = "slabs_partial accounting error";
3982
			if (!page->active && !error)
3983
				error = "slabs_partial accounting error";
3984
			active_objs += page->active;
3985 3986
			active_slabs++;
		}
3987 3988
		list_for_each_entry(page, &n->slabs_free, lru) {
			if (page->active && !error)
3989
				error = "slabs_free accounting error";
3990 3991
			num_slabs++;
		}
3992 3993 3994
		free_objects += n->free_objects;
		if (n->shared)
			shared_avail += n->shared->avail;
3995

3996
		spin_unlock_irq(&n->list_lock);
L
Linus Torvalds 已提交
3997
	}
P
Pekka Enberg 已提交
3998 3999
	num_slabs += active_slabs;
	num_objs = num_slabs * cachep->num;
4000
	if (num_objs - active_objs != free_objects && !error)
L
Linus Torvalds 已提交
4001 4002
		error = "free_objects accounting error";

P
Pekka Enberg 已提交
4003
	name = cachep->name;
L
Linus Torvalds 已提交
4004 4005 4006
	if (error)
		printk(KERN_ERR "slab: cache %s error: %s\n", name, error);

4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020
	sinfo->active_objs = active_objs;
	sinfo->num_objs = num_objs;
	sinfo->active_slabs = active_slabs;
	sinfo->num_slabs = num_slabs;
	sinfo->shared_avail = shared_avail;
	sinfo->limit = cachep->limit;
	sinfo->batchcount = cachep->batchcount;
	sinfo->shared = cachep->shared;
	sinfo->objects_per_slab = cachep->num;
	sinfo->cache_order = cachep->gfporder;
}

void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
{
L
Linus Torvalds 已提交
4021
#if STATS
4022
	{			/* node stats */
L
Linus Torvalds 已提交
4023 4024 4025 4026 4027 4028 4029
		unsigned long high = cachep->high_mark;
		unsigned long allocs = cachep->num_allocations;
		unsigned long grown = cachep->grown;
		unsigned long reaped = cachep->reaped;
		unsigned long errors = cachep->errors;
		unsigned long max_freeable = cachep->max_freeable;
		unsigned long node_allocs = cachep->node_allocs;
4030
		unsigned long node_frees = cachep->node_frees;
4031
		unsigned long overflows = cachep->node_overflow;
L
Linus Torvalds 已提交
4032

J
Joe Perches 已提交
4033 4034 4035 4036 4037
		seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu "
			   "%4lu %4lu %4lu %4lu %4lu",
			   allocs, high, grown,
			   reaped, errors, max_freeable, node_allocs,
			   node_frees, overflows);
L
Linus Torvalds 已提交
4038 4039 4040 4041 4042 4043 4044 4045 4046
	}
	/* cpu stats */
	{
		unsigned long allochit = atomic_read(&cachep->allochit);
		unsigned long allocmiss = atomic_read(&cachep->allocmiss);
		unsigned long freehit = atomic_read(&cachep->freehit);
		unsigned long freemiss = atomic_read(&cachep->freemiss);

		seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
P
Pekka Enberg 已提交
4047
			   allochit, allocmiss, freehit, freemiss);
L
Linus Torvalds 已提交
4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059
	}
#endif
}

#define MAX_SLABINFO_WRITE 128
/**
 * slabinfo_write - Tuning for the slab allocator
 * @file: unused
 * @buffer: user buffer
 * @count: data length
 * @ppos: unused
 */
4060
ssize_t slabinfo_write(struct file *file, const char __user *buffer,
P
Pekka Enberg 已提交
4061
		       size_t count, loff_t *ppos)
L
Linus Torvalds 已提交
4062
{
P
Pekka Enberg 已提交
4063
	char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
L
Linus Torvalds 已提交
4064
	int limit, batchcount, shared, res;
4065
	struct kmem_cache *cachep;
P
Pekka Enberg 已提交
4066

L
Linus Torvalds 已提交
4067 4068 4069 4070
	if (count > MAX_SLABINFO_WRITE)
		return -EINVAL;
	if (copy_from_user(&kbuf, buffer, count))
		return -EFAULT;
P
Pekka Enberg 已提交
4071
	kbuf[MAX_SLABINFO_WRITE] = '\0';
L
Linus Torvalds 已提交
4072 4073 4074 4075 4076 4077 4078 4079 4080 4081

	tmp = strchr(kbuf, ' ');
	if (!tmp)
		return -EINVAL;
	*tmp = '\0';
	tmp++;
	if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3)
		return -EINVAL;

	/* Find the cache in the chain of caches. */
4082
	mutex_lock(&slab_mutex);
L
Linus Torvalds 已提交
4083
	res = -EINVAL;
4084
	list_for_each_entry(cachep, &slab_caches, list) {
L
Linus Torvalds 已提交
4085
		if (!strcmp(cachep->name, kbuf)) {
A
Andrew Morton 已提交
4086 4087
			if (limit < 1 || batchcount < 1 ||
					batchcount > limit || shared < 0) {
4088
				res = 0;
L
Linus Torvalds 已提交
4089
			} else {
4090
				res = do_tune_cpucache(cachep, limit,
4091 4092
						       batchcount, shared,
						       GFP_KERNEL);
L
Linus Torvalds 已提交
4093 4094 4095 4096
			}
			break;
		}
	}
4097
	mutex_unlock(&slab_mutex);
L
Linus Torvalds 已提交
4098 4099 4100 4101
	if (res >= 0)
		res = count;
	return res;
}
4102 4103 4104 4105 4106

#ifdef CONFIG_DEBUG_SLAB_LEAK

static void *leaks_start(struct seq_file *m, loff_t *pos)
{
4107 4108
	mutex_lock(&slab_mutex);
	return seq_list_start(&slab_caches, *pos);
4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140
}

static inline int add_caller(unsigned long *n, unsigned long v)
{
	unsigned long *p;
	int l;
	if (!v)
		return 1;
	l = n[1];
	p = n + 2;
	while (l) {
		int i = l/2;
		unsigned long *q = p + 2 * i;
		if (*q == v) {
			q[1]++;
			return 1;
		}
		if (*q > v) {
			l = i;
		} else {
			p = q + 2;
			l -= i + 1;
		}
	}
	if (++n[1] == n[0])
		return 0;
	memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n));
	p[0] = v;
	p[1] = 1;
	return 1;
}

4141 4142
static void handle_slab(unsigned long *n, struct kmem_cache *c,
						struct page *page)
4143 4144
{
	void *p;
4145
	int i;
4146

4147 4148
	if (n[0] == n[1])
		return;
4149
	for (i = 0, p = page->s_mem; i < c->num; i++, p += c->size) {
4150
		if (get_obj_status(page, i) != OBJECT_ACTIVE)
4151
			continue;
4152

4153 4154 4155 4156 4157 4158 4159 4160 4161
		if (!add_caller(n, (unsigned long)*dbg_userword(c, p)))
			return;
	}
}

static void show_symbol(struct seq_file *m, unsigned long address)
{
#ifdef CONFIG_KALLSYMS
	unsigned long offset, size;
4162
	char modname[MODULE_NAME_LEN], name[KSYM_NAME_LEN];
4163

4164
	if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) {
4165
		seq_printf(m, "%s+%#lx/%#lx", name, offset, size);
4166
		if (modname[0])
4167 4168 4169 4170 4171 4172 4173 4174 4175
			seq_printf(m, " [%s]", modname);
		return;
	}
#endif
	seq_printf(m, "%p", (void *)address);
}

static int leaks_show(struct seq_file *m, void *p)
{
4176
	struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list);
4177
	struct page *page;
4178
	struct kmem_cache_node *n;
4179
	const char *name;
4180
	unsigned long *x = m->private;
4181 4182 4183 4184 4185 4186 4187 4188 4189 4190
	int node;
	int i;

	if (!(cachep->flags & SLAB_STORE_USER))
		return 0;
	if (!(cachep->flags & SLAB_RED_ZONE))
		return 0;

	/* OK, we can do it */

4191
	x[1] = 0;
4192

4193
	for_each_kmem_cache_node(cachep, node, n) {
4194 4195

		check_irq_on();
4196
		spin_lock_irq(&n->list_lock);
4197

4198 4199 4200 4201
		list_for_each_entry(page, &n->slabs_full, lru)
			handle_slab(x, cachep, page);
		list_for_each_entry(page, &n->slabs_partial, lru)
			handle_slab(x, cachep, page);
4202
		spin_unlock_irq(&n->list_lock);
4203 4204
	}
	name = cachep->name;
4205
	if (x[0] == x[1]) {
4206
		/* Increase the buffer size */
4207
		mutex_unlock(&slab_mutex);
4208
		m->private = kzalloc(x[0] * 4 * sizeof(unsigned long), GFP_KERNEL);
4209 4210
		if (!m->private) {
			/* Too bad, we are really out */
4211
			m->private = x;
4212
			mutex_lock(&slab_mutex);
4213 4214
			return -ENOMEM;
		}
4215 4216
		*(unsigned long *)m->private = x[0] * 2;
		kfree(x);
4217
		mutex_lock(&slab_mutex);
4218 4219 4220 4221
		/* Now make sure this entry will be retried */
		m->count = m->size;
		return 0;
	}
4222 4223 4224
	for (i = 0; i < x[1]; i++) {
		seq_printf(m, "%s: %lu ", name, x[2*i+3]);
		show_symbol(m, x[2*i+2]);
4225 4226
		seq_putc(m, '\n');
	}
4227

4228 4229 4230
	return 0;
}

4231
static const struct seq_operations slabstats_op = {
4232
	.start = leaks_start,
4233 4234
	.next = slab_next,
	.stop = slab_stop,
4235 4236
	.show = leaks_show,
};
4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266

static int slabstats_open(struct inode *inode, struct file *file)
{
	unsigned long *n = kzalloc(PAGE_SIZE, GFP_KERNEL);
	int ret = -ENOMEM;
	if (n) {
		ret = seq_open(file, &slabstats_op);
		if (!ret) {
			struct seq_file *m = file->private_data;
			*n = PAGE_SIZE / (2 * sizeof(unsigned long));
			m->private = n;
			n = NULL;
		}
		kfree(n);
	}
	return ret;
}

static const struct file_operations proc_slabstats_operations = {
	.open		= slabstats_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= seq_release_private,
};
#endif

static int __init slab_proc_init(void)
{
#ifdef CONFIG_DEBUG_SLAB_LEAK
	proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
4267
#endif
4268 4269 4270
	return 0;
}
module_init(slab_proc_init);
L
Linus Torvalds 已提交
4271 4272
#endif

4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284
/**
 * ksize - get the actual amount of memory allocated for a given object
 * @objp: Pointer to the object
 *
 * kmalloc may internally round up allocations and return more memory
 * than requested. ksize() can be used to determine the actual amount of
 * memory allocated. The caller may use this additional memory, even though
 * a smaller amount of memory was initially specified with the kmalloc call.
 * The caller must guarantee that objp points to a valid object previously
 * allocated with either kmalloc() or kmem_cache_alloc(). The object
 * must not be freed during the duration of the call.
 */
P
Pekka Enberg 已提交
4285
size_t ksize(const void *objp)
L
Linus Torvalds 已提交
4286
{
4287 4288
	BUG_ON(!objp);
	if (unlikely(objp == ZERO_SIZE_PTR))
4289
		return 0;
L
Linus Torvalds 已提交
4290

4291
	return virt_to_cache(objp)->object_size;
L
Linus Torvalds 已提交
4292
}
K
Kirill A. Shutemov 已提交
4293
EXPORT_SYMBOL(ksize);