slab.c 111.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * linux/mm/slab.c
 * Written by Mark Hemment, 1996/97.
 * (markhe@nextd.demon.co.uk)
 *
 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
 *
 * Major cleanup, different bufctl logic, per-cpu arrays
 *	(c) 2000 Manfred Spraul
 *
 * Cleanup, make the head arrays unconditional, preparation for NUMA
 * 	(c) 2002 Manfred Spraul
 *
 * An implementation of the Slab Allocator as described in outline in;
 *	UNIX Internals: The New Frontiers by Uresh Vahalia
 *	Pub: Prentice Hall	ISBN 0-13-101908-2
 * or with a little more detail in;
 *	The Slab Allocator: An Object-Caching Kernel Memory Allocator
 *	Jeff Bonwick (Sun Microsystems).
 *	Presented at: USENIX Summer 1994 Technical Conference
 *
 * The memory is organized in caches, one cache for each object type.
 * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
 * Each cache consists out of many slabs (they are small (usually one
 * page long) and always contiguous), and each slab contains multiple
 * initialized objects.
 *
 * This means, that your constructor is used only for newly allocated
S
Simon Arlott 已提交
29
 * slabs and you must pass objects with the same initializations to
L
Linus Torvalds 已提交
30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52
 * kmem_cache_free.
 *
 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
 * normal). If you need a special memory type, then must create a new
 * cache for that memory type.
 *
 * In order to reduce fragmentation, the slabs are sorted in 3 groups:
 *   full slabs with 0 free objects
 *   partial slabs
 *   empty slabs with no allocated objects
 *
 * If partial slabs exist, then new allocations come from these slabs,
 * otherwise from empty slabs or new slabs are allocated.
 *
 * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
 * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
 *
 * Each cache has a short per-cpu head array, most allocs
 * and frees go into that array, and if that array overflows, then 1/2
 * of the entries in the array are given back into the global cache.
 * The head array is strictly LIFO and should improve the cache hit rates.
 * On SMP, it additionally reduces the spinlock operations.
 *
A
Andrew Morton 已提交
53
 * The c_cpuarray may not be read with enabled local interrupts -
L
Linus Torvalds 已提交
54 55 56 57
 * it's changed with a smp_call_function().
 *
 * SMP synchronization:
 *  constructors and destructors are called without any locking.
58
 *  Several members in struct kmem_cache and struct slab never change, they
L
Linus Torvalds 已提交
59 60 61 62 63 64 65 66 67 68 69 70
 *	are accessed without any locking.
 *  The per-cpu arrays are never accessed from the wrong cpu, no locking,
 *  	and local interrupts are disabled so slab code is preempt-safe.
 *  The non-constant members are protected with a per-cache irq spinlock.
 *
 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
 * in 2000 - many ideas in the current implementation are derived from
 * his patch.
 *
 * Further notes from the original documentation:
 *
 * 11 April '97.  Started multi-threading - markhe
71
 *	The global cache-chain is protected by the mutex 'slab_mutex'.
L
Linus Torvalds 已提交
72 73 74 75 76 77
 *	The sem is only needed when accessing/extending the cache-chain, which
 *	can never happen inside an interrupt (kmem_cache_create(),
 *	kmem_cache_shrink() and kmem_cache_reap()).
 *
 *	At present, each engine can be growing a cache.  This should be blocked.
 *
78 79 80 81 82 83 84 85 86
 * 15 March 2005. NUMA slab allocator.
 *	Shai Fultheim <shai@scalex86.org>.
 *	Shobhit Dayal <shobhit@calsoftinc.com>
 *	Alok N Kataria <alokk@calsoftinc.com>
 *	Christoph Lameter <christoph@lameter.com>
 *
 *	Modified the slab allocator to be node aware on NUMA systems.
 *	Each node has its own list of partial, free and full slabs.
 *	All object allocations for a node occur from node specific slab lists.
L
Linus Torvalds 已提交
87 88 89 90
 */

#include	<linux/slab.h>
#include	<linux/mm.h>
91
#include	<linux/poison.h>
L
Linus Torvalds 已提交
92 93 94 95 96
#include	<linux/swap.h>
#include	<linux/cache.h>
#include	<linux/interrupt.h>
#include	<linux/init.h>
#include	<linux/compiler.h>
97
#include	<linux/cpuset.h>
98
#include	<linux/proc_fs.h>
L
Linus Torvalds 已提交
99 100 101 102 103 104 105
#include	<linux/seq_file.h>
#include	<linux/notifier.h>
#include	<linux/kallsyms.h>
#include	<linux/cpu.h>
#include	<linux/sysctl.h>
#include	<linux/module.h>
#include	<linux/rcupdate.h>
106
#include	<linux/string.h>
107
#include	<linux/uaccess.h>
108
#include	<linux/nodemask.h>
109
#include	<linux/kmemleak.h>
110
#include	<linux/mempolicy.h>
I
Ingo Molnar 已提交
111
#include	<linux/mutex.h>
112
#include	<linux/fault-inject.h>
I
Ingo Molnar 已提交
113
#include	<linux/rtmutex.h>
114
#include	<linux/reciprocal_div.h>
115
#include	<linux/debugobjects.h>
P
Pekka Enberg 已提交
116
#include	<linux/kmemcheck.h>
117
#include	<linux/memory.h>
118
#include	<linux/prefetch.h>
L
Linus Torvalds 已提交
119

120 121
#include	<net/sock.h>

L
Linus Torvalds 已提交
122 123 124 125
#include	<asm/cacheflush.h>
#include	<asm/tlbflush.h>
#include	<asm/page.h>

126 127
#include <trace/events/kmem.h>

128 129
#include	"internal.h"

130 131
#include	"slab.h"

L
Linus Torvalds 已提交
132
/*
133
 * DEBUG	- 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
L
Linus Torvalds 已提交
134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153
 *		  0 for faster, smaller code (especially in the critical paths).
 *
 * STATS	- 1 to collect stats for /proc/slabinfo.
 *		  0 for faster, smaller code (especially in the critical paths).
 *
 * FORCED_DEBUG	- 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
 */

#ifdef CONFIG_DEBUG_SLAB
#define	DEBUG		1
#define	STATS		1
#define	FORCED_DEBUG	1
#else
#define	DEBUG		0
#define	STATS		0
#define	FORCED_DEBUG	0
#endif

/* Shouldn't this be in a header file somewhere? */
#define	BYTES_PER_WORD		sizeof(void *)
D
David Woodhouse 已提交
154
#define	REDZONE_ALIGN		max(BYTES_PER_WORD, __alignof__(unsigned long long))
L
Linus Torvalds 已提交
155 156 157 158 159

#ifndef ARCH_KMALLOC_FLAGS
#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
#endif

160 161 162 163 164 165 166 167 168
#define FREELIST_BYTE_INDEX (((PAGE_SIZE >> BITS_PER_BYTE) \
				<= SLAB_OBJ_MIN_SIZE) ? 1 : 0)

#if FREELIST_BYTE_INDEX
typedef unsigned char freelist_idx_t;
#else
typedef unsigned short freelist_idx_t;
#endif

169
#define SLAB_OBJ_MAX_NUM ((1 << sizeof(freelist_idx_t) * BITS_PER_BYTE) - 1)
170

171 172 173 174 175 176
/*
 * true if a page was allocated from pfmemalloc reserves for network-based
 * swap
 */
static bool pfmemalloc_active __read_mostly;

L
Linus Torvalds 已提交
177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193
/*
 * struct array_cache
 *
 * Purpose:
 * - LIFO ordering, to hand out cache-warm objects from _alloc
 * - reduce the number of linked list operations
 * - reduce spinlock operations
 *
 * The limit is stored in the per-cpu structure to reduce the data cache
 * footprint.
 *
 */
struct array_cache {
	unsigned int avail;
	unsigned int limit;
	unsigned int batchcount;
	unsigned int touched;
194
	spinlock_t lock;
195
	void *entry[];	/*
A
Andrew Morton 已提交
196 197 198
			 * Must have this definition in here for the proper
			 * alignment of array_cache. Also simplifies accessing
			 * the entries.
199 200 201 202
			 *
			 * Entries should not be directly dereferenced as
			 * entries belonging to slabs marked pfmemalloc will
			 * have the lower bits set SLAB_OBJ_PFMEMALLOC
A
Andrew Morton 已提交
203
			 */
L
Linus Torvalds 已提交
204 205
};

206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222
#define SLAB_OBJ_PFMEMALLOC	1
static inline bool is_obj_pfmemalloc(void *objp)
{
	return (unsigned long)objp & SLAB_OBJ_PFMEMALLOC;
}

static inline void set_obj_pfmemalloc(void **objp)
{
	*objp = (void *)((unsigned long)*objp | SLAB_OBJ_PFMEMALLOC);
	return;
}

static inline void clear_obj_pfmemalloc(void **objp)
{
	*objp = (void *)((unsigned long)*objp & ~SLAB_OBJ_PFMEMALLOC);
}

A
Andrew Morton 已提交
223 224 225
/*
 * bootstrap: The caches do not work without cpuarrays anymore, but the
 * cpuarrays are allocated from the generic caches...
L
Linus Torvalds 已提交
226 227 228 229
 */
#define BOOT_CPUCACHE_ENTRIES	1
struct arraycache_init {
	struct array_cache cache;
P
Pekka Enberg 已提交
230
	void *entries[BOOT_CPUCACHE_ENTRIES];
L
Linus Torvalds 已提交
231 232
};

233 234 235
/*
 * Need this for bootstrapping a per node allocator.
 */
236
#define NUM_INIT_LISTS (3 * MAX_NUMNODES)
237
static struct kmem_cache_node __initdata init_kmem_cache_node[NUM_INIT_LISTS];
238
#define	CACHE_CACHE 0
239
#define	SIZE_AC MAX_NUMNODES
240
#define	SIZE_NODE (2 * MAX_NUMNODES)
241

242
static int drain_freelist(struct kmem_cache *cache,
243
			struct kmem_cache_node *n, int tofree);
244 245
static void free_block(struct kmem_cache *cachep, void **objpp, int len,
			int node);
246
static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
247
static void cache_reap(struct work_struct *unused);
248

249 250
static int slab_early_init = 1;

251
#define INDEX_AC kmalloc_index(sizeof(struct arraycache_init))
252
#define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node))
L
Linus Torvalds 已提交
253

254
static void kmem_cache_node_init(struct kmem_cache_node *parent)
255 256 257 258 259 260
{
	INIT_LIST_HEAD(&parent->slabs_full);
	INIT_LIST_HEAD(&parent->slabs_partial);
	INIT_LIST_HEAD(&parent->slabs_free);
	parent->shared = NULL;
	parent->alien = NULL;
261
	parent->colour_next = 0;
262 263 264 265 266
	spin_lock_init(&parent->list_lock);
	parent->free_objects = 0;
	parent->free_touched = 0;
}

A
Andrew Morton 已提交
267 268 269
#define MAKE_LIST(cachep, listp, slab, nodeid)				\
	do {								\
		INIT_LIST_HEAD(listp);					\
270
		list_splice(&(cachep->node[nodeid]->slab), listp);	\
271 272
	} while (0)

A
Andrew Morton 已提交
273 274
#define	MAKE_ALL_LISTS(cachep, ptr, nodeid)				\
	do {								\
275 276 277 278
	MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid);	\
	MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
	MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid);	\
	} while (0)
L
Linus Torvalds 已提交
279 280 281 282 283

#define CFLGS_OFF_SLAB		(0x80000000UL)
#define	OFF_SLAB(x)	((x)->flags & CFLGS_OFF_SLAB)

#define BATCHREFILL_LIMIT	16
A
Andrew Morton 已提交
284 285 286
/*
 * Optimization question: fewer reaps means less probability for unnessary
 * cpucache drain/refill cycles.
L
Linus Torvalds 已提交
287
 *
A
Adrian Bunk 已提交
288
 * OTOH the cpuarrays can contain lots of objects,
L
Linus Torvalds 已提交
289 290
 * which could lock up otherwise freeable slabs.
 */
291 292
#define REAPTIMEOUT_AC		(2*HZ)
#define REAPTIMEOUT_NODE	(4*HZ)
L
Linus Torvalds 已提交
293 294 295 296 297 298

#if STATS
#define	STATS_INC_ACTIVE(x)	((x)->num_active++)
#define	STATS_DEC_ACTIVE(x)	((x)->num_active--)
#define	STATS_INC_ALLOCED(x)	((x)->num_allocations++)
#define	STATS_INC_GROWN(x)	((x)->grown++)
299
#define	STATS_ADD_REAPED(x,y)	((x)->reaped += (y))
A
Andrew Morton 已提交
300 301 302 303 304
#define	STATS_SET_HIGH(x)						\
	do {								\
		if ((x)->num_active > (x)->high_mark)			\
			(x)->high_mark = (x)->num_active;		\
	} while (0)
L
Linus Torvalds 已提交
305 306
#define	STATS_INC_ERR(x)	((x)->errors++)
#define	STATS_INC_NODEALLOCS(x)	((x)->node_allocs++)
307
#define	STATS_INC_NODEFREES(x)	((x)->node_frees++)
308
#define STATS_INC_ACOVERFLOW(x)   ((x)->node_overflow++)
A
Andrew Morton 已提交
309 310 311 312 313
#define	STATS_SET_FREEABLE(x, i)					\
	do {								\
		if ((x)->max_freeable < i)				\
			(x)->max_freeable = i;				\
	} while (0)
L
Linus Torvalds 已提交
314 315 316 317 318 319 320 321 322
#define STATS_INC_ALLOCHIT(x)	atomic_inc(&(x)->allochit)
#define STATS_INC_ALLOCMISS(x)	atomic_inc(&(x)->allocmiss)
#define STATS_INC_FREEHIT(x)	atomic_inc(&(x)->freehit)
#define STATS_INC_FREEMISS(x)	atomic_inc(&(x)->freemiss)
#else
#define	STATS_INC_ACTIVE(x)	do { } while (0)
#define	STATS_DEC_ACTIVE(x)	do { } while (0)
#define	STATS_INC_ALLOCED(x)	do { } while (0)
#define	STATS_INC_GROWN(x)	do { } while (0)
323
#define	STATS_ADD_REAPED(x,y)	do { (void)(y); } while (0)
L
Linus Torvalds 已提交
324 325 326
#define	STATS_SET_HIGH(x)	do { } while (0)
#define	STATS_INC_ERR(x)	do { } while (0)
#define	STATS_INC_NODEALLOCS(x)	do { } while (0)
327
#define	STATS_INC_NODEFREES(x)	do { } while (0)
328
#define STATS_INC_ACOVERFLOW(x)   do { } while (0)
A
Andrew Morton 已提交
329
#define	STATS_SET_FREEABLE(x, i) do { } while (0)
L
Linus Torvalds 已提交
330 331 332 333 334 335 336 337
#define STATS_INC_ALLOCHIT(x)	do { } while (0)
#define STATS_INC_ALLOCMISS(x)	do { } while (0)
#define STATS_INC_FREEHIT(x)	do { } while (0)
#define STATS_INC_FREEMISS(x)	do { } while (0)
#endif

#if DEBUG

A
Andrew Morton 已提交
338 339
/*
 * memory layout of objects:
L
Linus Torvalds 已提交
340
 * 0		: objp
341
 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
L
Linus Torvalds 已提交
342 343
 * 		the end of an object is aligned with the end of the real
 * 		allocation. Catches writes behind the end of the allocation.
344
 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
L
Linus Torvalds 已提交
345
 * 		redzone word.
346
 * cachep->obj_offset: The real object.
347 348
 * cachep->size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
 * cachep->size - 1* BYTES_PER_WORD: last caller address
A
Andrew Morton 已提交
349
 *					[BYTES_PER_WORD long]
L
Linus Torvalds 已提交
350
 */
351
static int obj_offset(struct kmem_cache *cachep)
L
Linus Torvalds 已提交
352
{
353
	return cachep->obj_offset;
L
Linus Torvalds 已提交
354 355
}

356
static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
L
Linus Torvalds 已提交
357 358
{
	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
359 360
	return (unsigned long long*) (objp + obj_offset(cachep) -
				      sizeof(unsigned long long));
L
Linus Torvalds 已提交
361 362
}

363
static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
L
Linus Torvalds 已提交
364 365 366
{
	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
	if (cachep->flags & SLAB_STORE_USER)
367
		return (unsigned long long *)(objp + cachep->size -
368
					      sizeof(unsigned long long) -
D
David Woodhouse 已提交
369
					      REDZONE_ALIGN);
370
	return (unsigned long long *) (objp + cachep->size -
371
				       sizeof(unsigned long long));
L
Linus Torvalds 已提交
372 373
}

374
static void **dbg_userword(struct kmem_cache *cachep, void *objp)
L
Linus Torvalds 已提交
375 376
{
	BUG_ON(!(cachep->flags & SLAB_STORE_USER));
377
	return (void **)(objp + cachep->size - BYTES_PER_WORD);
L
Linus Torvalds 已提交
378 379 380 381
}

#else

382
#define obj_offset(x)			0
383 384
#define dbg_redzone1(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
#define dbg_redzone2(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
L
Linus Torvalds 已提交
385 386 387 388
#define dbg_userword(cachep, objp)	({BUG(); (void **)NULL;})

#endif

389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421
#define OBJECT_FREE (0)
#define OBJECT_ACTIVE (1)

#ifdef CONFIG_DEBUG_SLAB_LEAK

static void set_obj_status(struct page *page, int idx, int val)
{
	int freelist_size;
	char *status;
	struct kmem_cache *cachep = page->slab_cache;

	freelist_size = cachep->num * sizeof(freelist_idx_t);
	status = (char *)page->freelist + freelist_size;
	status[idx] = val;
}

static inline unsigned int get_obj_status(struct page *page, int idx)
{
	int freelist_size;
	char *status;
	struct kmem_cache *cachep = page->slab_cache;

	freelist_size = cachep->num * sizeof(freelist_idx_t);
	status = (char *)page->freelist + freelist_size;

	return status[idx];
}

#else
static inline void set_obj_status(struct page *page, int idx, int val) {}

#endif

L
Linus Torvalds 已提交
422
/*
423 424
 * Do not go above this order unless 0 objects fit into the slab or
 * overridden on the command line.
L
Linus Torvalds 已提交
425
 */
426 427 428
#define	SLAB_MAX_ORDER_HI	1
#define	SLAB_MAX_ORDER_LO	0
static int slab_max_order = SLAB_MAX_ORDER_LO;
429
static bool slab_max_order_set __initdata;
L
Linus Torvalds 已提交
430

431 432
static inline struct kmem_cache *virt_to_cache(const void *obj)
{
433
	struct page *page = virt_to_head_page(obj);
C
Christoph Lameter 已提交
434
	return page->slab_cache;
435 436
}

437
static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
438 439
				 unsigned int idx)
{
440
	return page->s_mem + cache->size * idx;
441 442
}

443
/*
444 445 446
 * We want to avoid an expensive divide : (offset / cache->size)
 *   Using the fact that size is a constant for a particular cache,
 *   we can replace (offset / cache->size) by
447 448 449
 *   reciprocal_divide(offset, cache->reciprocal_buffer_size)
 */
static inline unsigned int obj_to_index(const struct kmem_cache *cache,
450
					const struct page *page, void *obj)
451
{
452
	u32 offset = (obj - page->s_mem);
453
	return reciprocal_divide(offset, cache->reciprocal_buffer_size);
454 455
}

L
Linus Torvalds 已提交
456
static struct arraycache_init initarray_generic =
P
Pekka Enberg 已提交
457
    { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
L
Linus Torvalds 已提交
458 459

/* internal cache of cache description objs */
460
static struct kmem_cache kmem_cache_boot = {
P
Pekka Enberg 已提交
461 462 463
	.batchcount = 1,
	.limit = BOOT_CPUCACHE_ENTRIES,
	.shared = 1,
464
	.size = sizeof(struct kmem_cache),
P
Pekka Enberg 已提交
465
	.name = "kmem_cache",
L
Linus Torvalds 已提交
466 467
};

468 469
#define BAD_ALIEN_MAGIC 0x01020304ul

470 471 472 473 474 475 476 477
#ifdef CONFIG_LOCKDEP

/*
 * Slab sometimes uses the kmalloc slabs to store the slab headers
 * for other slabs "off slab".
 * The locking for this is tricky in that it nests within the locks
 * of all other slabs in a few places; to deal with this special
 * locking we put on-slab caches into a separate lock-class.
478 479 480 481
 *
 * We set lock class for alien array caches which are up during init.
 * The lock annotation will be lost if all cpus of a node goes down and
 * then comes back up during hotplug
482
 */
483 484 485
static struct lock_class_key on_slab_l3_key;
static struct lock_class_key on_slab_alc_key;

486 487 488 489 490 491 492 493
static struct lock_class_key debugobj_l3_key;
static struct lock_class_key debugobj_alc_key;

static void slab_set_lock_classes(struct kmem_cache *cachep,
		struct lock_class_key *l3_key, struct lock_class_key *alc_key,
		int q)
{
	struct array_cache **alc;
494
	struct kmem_cache_node *n;
495 496
	int r;

497 498
	n = cachep->node[q];
	if (!n)
499 500
		return;

501 502
	lockdep_set_class(&n->list_lock, l3_key);
	alc = n->alien;
503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530
	/*
	 * FIXME: This check for BAD_ALIEN_MAGIC
	 * should go away when common slab code is taught to
	 * work even without alien caches.
	 * Currently, non NUMA code returns BAD_ALIEN_MAGIC
	 * for alloc_alien_cache,
	 */
	if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC)
		return;
	for_each_node(r) {
		if (alc[r])
			lockdep_set_class(&alc[r]->lock, alc_key);
	}
}

static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node)
{
	slab_set_lock_classes(cachep, &debugobj_l3_key, &debugobj_alc_key, node);
}

static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep)
{
	int node;

	for_each_online_node(node)
		slab_set_debugobj_lock_classes_node(cachep, node);
}

531
static void init_node_lock_keys(int q)
532
{
533
	int i;
534

535
	if (slab_state < UP)
536 537
		return;

C
Christoph Lameter 已提交
538
	for (i = 1; i <= KMALLOC_SHIFT_HIGH; i++) {
539
		struct kmem_cache_node *n;
540 541 542 543
		struct kmem_cache *cache = kmalloc_caches[i];

		if (!cache)
			continue;
544

545 546
		n = cache->node[q];
		if (!n || OFF_SLAB(cache))
547
			continue;
548

549
		slab_set_lock_classes(cache, &on_slab_l3_key,
550
				&on_slab_alc_key, q);
551 552
	}
}
553

554 555
static void on_slab_lock_classes_node(struct kmem_cache *cachep, int q)
{
556
	if (!cachep->node[q])
557 558 559 560 561 562 563 564 565 566 567 568 569 570 571
		return;

	slab_set_lock_classes(cachep, &on_slab_l3_key,
			&on_slab_alc_key, q);
}

static inline void on_slab_lock_classes(struct kmem_cache *cachep)
{
	int node;

	VM_BUG_ON(OFF_SLAB(cachep));
	for_each_node(node)
		on_slab_lock_classes_node(cachep, node);
}

572 573 574 575 576 577 578
static inline void init_lock_keys(void)
{
	int node;

	for_each_node(node)
		init_node_lock_keys(node);
}
579
#else
580 581 582 583
static void init_node_lock_keys(int q)
{
}

584
static inline void init_lock_keys(void)
585 586
{
}
587

588 589 590 591 592 593 594 595
static inline void on_slab_lock_classes(struct kmem_cache *cachep)
{
}

static inline void on_slab_lock_classes_node(struct kmem_cache *cachep, int node)
{
}

596 597 598 599 600 601 602
static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node)
{
}

static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep)
{
}
603 604
#endif

605
static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
L
Linus Torvalds 已提交
606

607
static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
L
Linus Torvalds 已提交
608 609 610 611
{
	return cachep->array[smp_processor_id()];
}

612 613 614 615 616 617 618 619 620 621 622 623 624 625
static size_t calculate_freelist_size(int nr_objs, size_t align)
{
	size_t freelist_size;

	freelist_size = nr_objs * sizeof(freelist_idx_t);
	if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK))
		freelist_size += nr_objs * sizeof(char);

	if (align)
		freelist_size = ALIGN(freelist_size, align);

	return freelist_size;
}

626 627
static int calculate_nr_objs(size_t slab_size, size_t buffer_size,
				size_t idx_size, size_t align)
L
Linus Torvalds 已提交
628
{
629
	int nr_objs;
630
	size_t remained_size;
631
	size_t freelist_size;
632
	int extra_space = 0;
633

634 635
	if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK))
		extra_space = sizeof(char);
636 637 638 639 640 641 642 643
	/*
	 * Ignore padding for the initial guess. The padding
	 * is at most @align-1 bytes, and @buffer_size is at
	 * least @align. In the worst case, this result will
	 * be one greater than the number of objects that fit
	 * into the memory allocation when taking the padding
	 * into account.
	 */
644
	nr_objs = slab_size / (buffer_size + idx_size + extra_space);
645 646 647 648 649

	/*
	 * This calculated number will be either the right
	 * amount, or one greater than what we want.
	 */
650 651 652
	remained_size = slab_size - nr_objs * buffer_size;
	freelist_size = calculate_freelist_size(nr_objs, align);
	if (remained_size < freelist_size)
653 654 655
		nr_objs--;

	return nr_objs;
656
}
L
Linus Torvalds 已提交
657

A
Andrew Morton 已提交
658 659 660
/*
 * Calculate the number of objects and left-over bytes for a given buffer size.
 */
661 662 663 664 665 666 667
static void cache_estimate(unsigned long gfporder, size_t buffer_size,
			   size_t align, int flags, size_t *left_over,
			   unsigned int *num)
{
	int nr_objs;
	size_t mgmt_size;
	size_t slab_size = PAGE_SIZE << gfporder;
L
Linus Torvalds 已提交
668

669 670 671 672 673
	/*
	 * The slab management structure can be either off the slab or
	 * on it. For the latter case, the memory allocated for a
	 * slab is used for:
	 *
J
Joonsoo Kim 已提交
674
	 * - One unsigned int for each object
675 676 677 678 679 680 681 682 683 684 685 686 687
	 * - Padding to respect alignment of @align
	 * - @buffer_size bytes for each object
	 *
	 * If the slab management structure is off the slab, then the
	 * alignment will already be calculated into the size. Because
	 * the slabs are all pages aligned, the objects will be at the
	 * correct alignment when allocated.
	 */
	if (flags & CFLGS_OFF_SLAB) {
		mgmt_size = 0;
		nr_objs = slab_size / buffer_size;

	} else {
688
		nr_objs = calculate_nr_objs(slab_size, buffer_size,
689
					sizeof(freelist_idx_t), align);
690
		mgmt_size = calculate_freelist_size(nr_objs, align);
691 692 693
	}
	*num = nr_objs;
	*left_over = slab_size - nr_objs*buffer_size - mgmt_size;
L
Linus Torvalds 已提交
694 695
}

696
#if DEBUG
697
#define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
L
Linus Torvalds 已提交
698

A
Andrew Morton 已提交
699 700
static void __slab_error(const char *function, struct kmem_cache *cachep,
			char *msg)
L
Linus Torvalds 已提交
701 702
{
	printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
P
Pekka Enberg 已提交
703
	       function, cachep->name, msg);
L
Linus Torvalds 已提交
704
	dump_stack();
705
	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
L
Linus Torvalds 已提交
706
}
707
#endif
L
Linus Torvalds 已提交
708

709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724
/*
 * By default on NUMA we use alien caches to stage the freeing of
 * objects allocated from other nodes. This causes massive memory
 * inefficiencies when using fake NUMA setup to split memory into a
 * large number of small nodes, so it can be disabled on the command
 * line
  */

static int use_alien_caches __read_mostly = 1;
static int __init noaliencache_setup(char *s)
{
	use_alien_caches = 0;
	return 1;
}
__setup("noaliencache", noaliencache_setup);

725 726 727 728 729 730 731 732 733 734 735
static int __init slab_max_order_setup(char *str)
{
	get_option(&str, &slab_max_order);
	slab_max_order = slab_max_order < 0 ? 0 :
				min(slab_max_order, MAX_ORDER - 1);
	slab_max_order_set = true;

	return 1;
}
__setup("slab_max_order=", slab_max_order_setup);

736 737 738 739 740 741 742
#ifdef CONFIG_NUMA
/*
 * Special reaping functions for NUMA systems called from cache_reap().
 * These take care of doing round robin flushing of alien caches (containing
 * objects freed on different nodes from which they were allocated) and the
 * flushing of remote pcps by calling drain_node_pages.
 */
743
static DEFINE_PER_CPU(unsigned long, slab_reap_node);
744 745 746 747 748

static void init_reap_node(int cpu)
{
	int node;

749
	node = next_node(cpu_to_mem(cpu), node_online_map);
750
	if (node == MAX_NUMNODES)
751
		node = first_node(node_online_map);
752

753
	per_cpu(slab_reap_node, cpu) = node;
754 755 756 757
}

static void next_reap_node(void)
{
758
	int node = __this_cpu_read(slab_reap_node);
759 760 761 762

	node = next_node(node, node_online_map);
	if (unlikely(node >= MAX_NUMNODES))
		node = first_node(node_online_map);
763
	__this_cpu_write(slab_reap_node, node);
764 765 766 767 768 769 770
}

#else
#define init_reap_node(cpu) do { } while (0)
#define next_reap_node(void) do { } while (0)
#endif

L
Linus Torvalds 已提交
771 772 773 774 775 776 777
/*
 * Initiate the reap timer running on the target CPU.  We run at around 1 to 2Hz
 * via the workqueue/eventd.
 * Add the CPU number into the expiration time to minimize the possibility of
 * the CPUs getting into lockstep and contending for the global cache chain
 * lock.
 */
778
static void start_cpu_timer(int cpu)
L
Linus Torvalds 已提交
779
{
780
	struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
L
Linus Torvalds 已提交
781 782 783 784 785 786

	/*
	 * When this gets called from do_initcalls via cpucache_init(),
	 * init_workqueues() has already run, so keventd will be setup
	 * at that time.
	 */
787
	if (keventd_up() && reap_work->work.func == NULL) {
788
		init_reap_node(cpu);
789
		INIT_DEFERRABLE_WORK(reap_work, cache_reap);
790 791
		schedule_delayed_work_on(cpu, reap_work,
					__round_jiffies_relative(HZ, cpu));
L
Linus Torvalds 已提交
792 793 794
	}
}

795
static struct array_cache *alloc_arraycache(int node, int entries,
796
					    int batchcount, gfp_t gfp)
L
Linus Torvalds 已提交
797
{
P
Pekka Enberg 已提交
798
	int memsize = sizeof(void *) * entries + sizeof(struct array_cache);
L
Linus Torvalds 已提交
799 800
	struct array_cache *nc = NULL;

801
	nc = kmalloc_node(memsize, gfp, node);
802 803
	/*
	 * The array_cache structures contain pointers to free object.
L
Lucas De Marchi 已提交
804
	 * However, when such objects are allocated or transferred to another
805 806 807 808 809
	 * cache the pointers are not cleared and they could be counted as
	 * valid references during a kmemleak scan. Therefore, kmemleak must
	 * not scan such objects.
	 */
	kmemleak_no_scan(nc);
L
Linus Torvalds 已提交
810 811 812 813 814
	if (nc) {
		nc->avail = 0;
		nc->limit = entries;
		nc->batchcount = batchcount;
		nc->touched = 0;
815
		spin_lock_init(&nc->lock);
L
Linus Torvalds 已提交
816 817 818 819
	}
	return nc;
}

820
static inline bool is_slab_pfmemalloc(struct page *page)
821 822 823 824 825 826 827 828
{
	return PageSlabPfmemalloc(page);
}

/* Clears pfmemalloc_active if no slabs have pfmalloc set */
static void recheck_pfmemalloc_active(struct kmem_cache *cachep,
						struct array_cache *ac)
{
829
	struct kmem_cache_node *n = cachep->node[numa_mem_id()];
830
	struct page *page;
831 832 833 834 835
	unsigned long flags;

	if (!pfmemalloc_active)
		return;

836
	spin_lock_irqsave(&n->list_lock, flags);
837 838
	list_for_each_entry(page, &n->slabs_full, lru)
		if (is_slab_pfmemalloc(page))
839 840
			goto out;

841 842
	list_for_each_entry(page, &n->slabs_partial, lru)
		if (is_slab_pfmemalloc(page))
843 844
			goto out;

845 846
	list_for_each_entry(page, &n->slabs_free, lru)
		if (is_slab_pfmemalloc(page))
847 848 849 850
			goto out;

	pfmemalloc_active = false;
out:
851
	spin_unlock_irqrestore(&n->list_lock, flags);
852 853
}

854
static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac,
855 856 857 858 859 860 861
						gfp_t flags, bool force_refill)
{
	int i;
	void *objp = ac->entry[--ac->avail];

	/* Ensure the caller is allowed to use objects from PFMEMALLOC slab */
	if (unlikely(is_obj_pfmemalloc(objp))) {
862
		struct kmem_cache_node *n;
863 864 865 866 867 868 869

		if (gfp_pfmemalloc_allowed(flags)) {
			clear_obj_pfmemalloc(&objp);
			return objp;
		}

		/* The caller cannot use PFMEMALLOC objects, find another one */
870
		for (i = 0; i < ac->avail; i++) {
871 872 873 874 875 876 877 878 879 880 881 882 883
			/* If a !PFMEMALLOC object is found, swap them */
			if (!is_obj_pfmemalloc(ac->entry[i])) {
				objp = ac->entry[i];
				ac->entry[i] = ac->entry[ac->avail];
				ac->entry[ac->avail] = objp;
				return objp;
			}
		}

		/*
		 * If there are empty slabs on the slabs_free list and we are
		 * being forced to refill the cache, mark this one !pfmemalloc.
		 */
884 885
		n = cachep->node[numa_mem_id()];
		if (!list_empty(&n->slabs_free) && force_refill) {
886
			struct page *page = virt_to_head_page(objp);
887
			ClearPageSlabPfmemalloc(page);
888 889 890 891 892 893 894 895 896 897 898 899 900
			clear_obj_pfmemalloc(&objp);
			recheck_pfmemalloc_active(cachep, ac);
			return objp;
		}

		/* No !PFMEMALLOC objects available */
		ac->avail++;
		objp = NULL;
	}

	return objp;
}

901 902 903 904 905 906 907 908 909 910 911 912 913 914
static inline void *ac_get_obj(struct kmem_cache *cachep,
			struct array_cache *ac, gfp_t flags, bool force_refill)
{
	void *objp;

	if (unlikely(sk_memalloc_socks()))
		objp = __ac_get_obj(cachep, ac, flags, force_refill);
	else
		objp = ac->entry[--ac->avail];

	return objp;
}

static void *__ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac,
915 916 917 918
								void *objp)
{
	if (unlikely(pfmemalloc_active)) {
		/* Some pfmemalloc slabs exist, check if this is one */
919
		struct page *page = virt_to_head_page(objp);
920 921 922 923
		if (PageSlabPfmemalloc(page))
			set_obj_pfmemalloc(&objp);
	}

924 925 926 927 928 929 930 931 932
	return objp;
}

static inline void ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac,
								void *objp)
{
	if (unlikely(sk_memalloc_socks()))
		objp = __ac_put_obj(cachep, ac, objp);

933 934 935
	ac->entry[ac->avail++] = objp;
}

936 937 938 939 940 941 942 943 944 945
/*
 * Transfer objects in one arraycache to another.
 * Locking must be handled by the caller.
 *
 * Return the number of entries transferred.
 */
static int transfer_objects(struct array_cache *to,
		struct array_cache *from, unsigned int max)
{
	/* Figure out how many entries to transfer */
946
	int nr = min3(from->avail, max, to->limit - to->avail);
947 948 949 950 951 952 953 954 955 956 957 958

	if (!nr)
		return 0;

	memcpy(to->entry + to->avail, from->entry + from->avail -nr,
			sizeof(void *) *nr);

	from->avail -= nr;
	to->avail += nr;
	return nr;
}

959 960 961
#ifndef CONFIG_NUMA

#define drain_alien_cache(cachep, alien) do { } while (0)
962
#define reap_alien(cachep, n) do { } while (0)
963

964
static inline struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983
{
	return (struct array_cache **)BAD_ALIEN_MAGIC;
}

static inline void free_alien_cache(struct array_cache **ac_ptr)
{
}

static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
{
	return 0;
}

static inline void *alternate_node_alloc(struct kmem_cache *cachep,
		gfp_t flags)
{
	return NULL;
}

984
static inline void *____cache_alloc_node(struct kmem_cache *cachep,
985 986 987 988 989 990 991
		 gfp_t flags, int nodeid)
{
	return NULL;
}

#else	/* CONFIG_NUMA */

992
static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
993
static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
994

995
static struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
996 997
{
	struct array_cache **ac_ptr;
998
	int memsize = sizeof(void *) * nr_node_ids;
999 1000 1001 1002
	int i;

	if (limit > 1)
		limit = 12;
1003
	ac_ptr = kzalloc_node(memsize, gfp, node);
1004 1005
	if (ac_ptr) {
		for_each_node(i) {
1006
			if (i == node || !node_online(i))
1007
				continue;
1008
			ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d, gfp);
1009
			if (!ac_ptr[i]) {
1010
				for (i--; i >= 0; i--)
1011 1012 1013 1014 1015 1016 1017 1018 1019
					kfree(ac_ptr[i]);
				kfree(ac_ptr);
				return NULL;
			}
		}
	}
	return ac_ptr;
}

P
Pekka Enberg 已提交
1020
static void free_alien_cache(struct array_cache **ac_ptr)
1021 1022 1023 1024 1025 1026
{
	int i;

	if (!ac_ptr)
		return;
	for_each_node(i)
P
Pekka Enberg 已提交
1027
	    kfree(ac_ptr[i]);
1028 1029 1030
	kfree(ac_ptr);
}

1031
static void __drain_alien_cache(struct kmem_cache *cachep,
P
Pekka Enberg 已提交
1032
				struct array_cache *ac, int node)
1033
{
1034
	struct kmem_cache_node *n = cachep->node[node];
1035 1036

	if (ac->avail) {
1037
		spin_lock(&n->list_lock);
1038 1039 1040 1041 1042
		/*
		 * Stuff objects into the remote nodes shared array first.
		 * That way we could avoid the overhead of putting the objects
		 * into the free lists and getting them back later.
		 */
1043 1044
		if (n->shared)
			transfer_objects(n->shared, ac, ac->limit);
1045

1046
		free_block(cachep, ac->entry, ac->avail, node);
1047
		ac->avail = 0;
1048
		spin_unlock(&n->list_lock);
1049 1050 1051
	}
}

1052 1053 1054
/*
 * Called from cache_reap() to regularly drain alien caches round robin.
 */
1055
static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n)
1056
{
1057
	int node = __this_cpu_read(slab_reap_node);
1058

1059 1060
	if (n->alien) {
		struct array_cache *ac = n->alien[node];
1061 1062

		if (ac && ac->avail && spin_trylock_irq(&ac->lock)) {
1063 1064 1065 1066 1067 1068
			__drain_alien_cache(cachep, ac, node);
			spin_unlock_irq(&ac->lock);
		}
	}
}

A
Andrew Morton 已提交
1069 1070
static void drain_alien_cache(struct kmem_cache *cachep,
				struct array_cache **alien)
1071
{
P
Pekka Enberg 已提交
1072
	int i = 0;
1073 1074 1075 1076
	struct array_cache *ac;
	unsigned long flags;

	for_each_online_node(i) {
1077
		ac = alien[i];
1078 1079 1080 1081 1082 1083 1084
		if (ac) {
			spin_lock_irqsave(&ac->lock, flags);
			__drain_alien_cache(cachep, ac, i);
			spin_unlock_irqrestore(&ac->lock, flags);
		}
	}
}
1085

1086
static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1087
{
J
Joonsoo Kim 已提交
1088
	int nodeid = page_to_nid(virt_to_page(objp));
1089
	struct kmem_cache_node *n;
1090
	struct array_cache *alien = NULL;
P
Pekka Enberg 已提交
1091 1092
	int node;

1093
	node = numa_mem_id();
1094 1095 1096 1097 1098

	/*
	 * Make sure we are not freeing a object from another node to the array
	 * cache on this cpu.
	 */
J
Joonsoo Kim 已提交
1099
	if (likely(nodeid == node))
1100 1101
		return 0;

1102
	n = cachep->node[node];
1103
	STATS_INC_NODEFREES(cachep);
1104 1105
	if (n->alien && n->alien[nodeid]) {
		alien = n->alien[nodeid];
1106
		spin_lock(&alien->lock);
1107 1108 1109 1110
		if (unlikely(alien->avail == alien->limit)) {
			STATS_INC_ACOVERFLOW(cachep);
			__drain_alien_cache(cachep, alien, nodeid);
		}
1111
		ac_put_obj(cachep, alien, objp);
1112 1113
		spin_unlock(&alien->lock);
	} else {
1114
		spin_lock(&(cachep->node[nodeid])->list_lock);
1115
		free_block(cachep, &objp, 1, nodeid);
1116
		spin_unlock(&(cachep->node[nodeid])->list_lock);
1117 1118 1119
	}
	return 1;
}
1120 1121
#endif

1122
/*
1123
 * Allocates and initializes node for a node on each slab cache, used for
1124
 * either memory or cpu hotplug.  If memory is being hot-added, the kmem_cache_node
1125
 * will be allocated off-node since memory is not yet online for the new node.
1126
 * When hotplugging memory or a cpu, existing node are not replaced if
1127 1128
 * already in use.
 *
1129
 * Must hold slab_mutex.
1130
 */
1131
static int init_cache_node_node(int node)
1132 1133
{
	struct kmem_cache *cachep;
1134
	struct kmem_cache_node *n;
1135
	const int memsize = sizeof(struct kmem_cache_node);
1136

1137
	list_for_each_entry(cachep, &slab_caches, list) {
1138
		/*
1139
		 * Set up the kmem_cache_node for cpu before we can
1140 1141 1142
		 * begin anything. Make sure some other cpu on this
		 * node has not already allocated this
		 */
1143
		if (!cachep->node[node]) {
1144 1145
			n = kmalloc_node(memsize, GFP_KERNEL, node);
			if (!n)
1146
				return -ENOMEM;
1147
			kmem_cache_node_init(n);
1148 1149
			n->next_reap = jiffies + REAPTIMEOUT_NODE +
			    ((unsigned long)cachep) % REAPTIMEOUT_NODE;
1150 1151

			/*
1152 1153
			 * The kmem_cache_nodes don't come and go as CPUs
			 * come and go.  slab_mutex is sufficient
1154 1155
			 * protection here.
			 */
1156
			cachep->node[node] = n;
1157 1158
		}

1159 1160
		spin_lock_irq(&cachep->node[node]->list_lock);
		cachep->node[node]->free_limit =
1161 1162
			(1 + nr_cpus_node(node)) *
			cachep->batchcount + cachep->num;
1163
		spin_unlock_irq(&cachep->node[node]->list_lock);
1164 1165 1166 1167
	}
	return 0;
}

1168 1169 1170 1171 1172 1173
static inline int slabs_tofree(struct kmem_cache *cachep,
						struct kmem_cache_node *n)
{
	return (n->free_objects + cachep->num - 1) / cachep->num;
}

1174
static void cpuup_canceled(long cpu)
1175 1176
{
	struct kmem_cache *cachep;
1177
	struct kmem_cache_node *n = NULL;
1178
	int node = cpu_to_mem(cpu);
1179
	const struct cpumask *mask = cpumask_of_node(node);
1180

1181
	list_for_each_entry(cachep, &slab_caches, list) {
1182 1183 1184 1185 1186 1187 1188
		struct array_cache *nc;
		struct array_cache *shared;
		struct array_cache **alien;

		/* cpu is dead; no one can alloc from it. */
		nc = cachep->array[cpu];
		cachep->array[cpu] = NULL;
1189
		n = cachep->node[node];
1190

1191
		if (!n)
1192 1193
			goto free_array_cache;

1194
		spin_lock_irq(&n->list_lock);
1195

1196 1197
		/* Free limit for this kmem_cache_node */
		n->free_limit -= cachep->batchcount;
1198 1199 1200
		if (nc)
			free_block(cachep, nc->entry, nc->avail, node);

1201
		if (!cpumask_empty(mask)) {
1202
			spin_unlock_irq(&n->list_lock);
1203 1204 1205
			goto free_array_cache;
		}

1206
		shared = n->shared;
1207 1208 1209
		if (shared) {
			free_block(cachep, shared->entry,
				   shared->avail, node);
1210
			n->shared = NULL;
1211 1212
		}

1213 1214
		alien = n->alien;
		n->alien = NULL;
1215

1216
		spin_unlock_irq(&n->list_lock);
1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230

		kfree(shared);
		if (alien) {
			drain_alien_cache(cachep, alien);
			free_alien_cache(alien);
		}
free_array_cache:
		kfree(nc);
	}
	/*
	 * In the previous loop, all the objects were freed to
	 * the respective cache's slabs,  now we can go ahead and
	 * shrink each nodelist to its limit.
	 */
1231
	list_for_each_entry(cachep, &slab_caches, list) {
1232 1233
		n = cachep->node[node];
		if (!n)
1234
			continue;
1235
		drain_freelist(cachep, n, slabs_tofree(cachep, n));
1236 1237 1238
	}
}

1239
static int cpuup_prepare(long cpu)
L
Linus Torvalds 已提交
1240
{
1241
	struct kmem_cache *cachep;
1242
	struct kmem_cache_node *n = NULL;
1243
	int node = cpu_to_mem(cpu);
1244
	int err;
L
Linus Torvalds 已提交
1245

1246 1247 1248 1249
	/*
	 * We need to do this right in the beginning since
	 * alloc_arraycache's are going to use this list.
	 * kmalloc_node allows us to add the slab to the right
1250
	 * kmem_cache_node and not this cpu's kmem_cache_node
1251
	 */
1252
	err = init_cache_node_node(node);
1253 1254
	if (err < 0)
		goto bad;
1255 1256 1257 1258 1259

	/*
	 * Now we can go ahead with allocating the shared arrays and
	 * array caches
	 */
1260
	list_for_each_entry(cachep, &slab_caches, list) {
1261 1262 1263 1264 1265
		struct array_cache *nc;
		struct array_cache *shared = NULL;
		struct array_cache **alien = NULL;

		nc = alloc_arraycache(node, cachep->limit,
1266
					cachep->batchcount, GFP_KERNEL);
1267 1268 1269 1270 1271
		if (!nc)
			goto bad;
		if (cachep->shared) {
			shared = alloc_arraycache(node,
				cachep->shared * cachep->batchcount,
1272
				0xbaadf00d, GFP_KERNEL);
1273 1274
			if (!shared) {
				kfree(nc);
L
Linus Torvalds 已提交
1275
				goto bad;
1276
			}
1277 1278
		}
		if (use_alien_caches) {
1279
			alien = alloc_alien_cache(node, cachep->limit, GFP_KERNEL);
1280 1281 1282
			if (!alien) {
				kfree(shared);
				kfree(nc);
1283
				goto bad;
1284
			}
1285 1286
		}
		cachep->array[cpu] = nc;
1287 1288
		n = cachep->node[node];
		BUG_ON(!n);
1289

1290 1291
		spin_lock_irq(&n->list_lock);
		if (!n->shared) {
1292 1293 1294 1295
			/*
			 * We are serialised from CPU_DEAD or
			 * CPU_UP_CANCELLED by the cpucontrol lock
			 */
1296
			n->shared = shared;
1297 1298
			shared = NULL;
		}
1299
#ifdef CONFIG_NUMA
1300 1301
		if (!n->alien) {
			n->alien = alien;
1302
			alien = NULL;
L
Linus Torvalds 已提交
1303
		}
1304
#endif
1305
		spin_unlock_irq(&n->list_lock);
1306 1307
		kfree(shared);
		free_alien_cache(alien);
1308 1309
		if (cachep->flags & SLAB_DEBUG_OBJECTS)
			slab_set_debugobj_lock_classes_node(cachep, node);
1310 1311 1312
		else if (!OFF_SLAB(cachep) &&
			 !(cachep->flags & SLAB_DESTROY_BY_RCU))
			on_slab_lock_classes_node(cachep, node);
1313
	}
1314 1315
	init_node_lock_keys(node);

1316 1317
	return 0;
bad:
1318
	cpuup_canceled(cpu);
1319 1320 1321
	return -ENOMEM;
}

1322
static int cpuup_callback(struct notifier_block *nfb,
1323 1324 1325 1326 1327 1328 1329 1330
				    unsigned long action, void *hcpu)
{
	long cpu = (long)hcpu;
	int err = 0;

	switch (action) {
	case CPU_UP_PREPARE:
	case CPU_UP_PREPARE_FROZEN:
1331
		mutex_lock(&slab_mutex);
1332
		err = cpuup_prepare(cpu);
1333
		mutex_unlock(&slab_mutex);
L
Linus Torvalds 已提交
1334 1335
		break;
	case CPU_ONLINE:
1336
	case CPU_ONLINE_FROZEN:
L
Linus Torvalds 已提交
1337 1338 1339
		start_cpu_timer(cpu);
		break;
#ifdef CONFIG_HOTPLUG_CPU
1340
  	case CPU_DOWN_PREPARE:
1341
  	case CPU_DOWN_PREPARE_FROZEN:
1342
		/*
1343
		 * Shutdown cache reaper. Note that the slab_mutex is
1344 1345 1346 1347
		 * held so that if cache_reap() is invoked it cannot do
		 * anything expensive but will only modify reap_work
		 * and reschedule the timer.
		*/
1348
		cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu));
1349
		/* Now the cache_reaper is guaranteed to be not running. */
1350
		per_cpu(slab_reap_work, cpu).work.func = NULL;
1351 1352
  		break;
  	case CPU_DOWN_FAILED:
1353
  	case CPU_DOWN_FAILED_FROZEN:
1354 1355
		start_cpu_timer(cpu);
  		break;
L
Linus Torvalds 已提交
1356
	case CPU_DEAD:
1357
	case CPU_DEAD_FROZEN:
1358 1359
		/*
		 * Even if all the cpus of a node are down, we don't free the
1360
		 * kmem_cache_node of any cache. This to avoid a race between
1361
		 * cpu_down, and a kmalloc allocation from another cpu for
1362
		 * memory from the node of the cpu going down.  The node
1363 1364 1365
		 * structure is usually allocated from kmem_cache_create() and
		 * gets destroyed at kmem_cache_destroy().
		 */
S
Simon Arlott 已提交
1366
		/* fall through */
1367
#endif
L
Linus Torvalds 已提交
1368
	case CPU_UP_CANCELED:
1369
	case CPU_UP_CANCELED_FROZEN:
1370
		mutex_lock(&slab_mutex);
1371
		cpuup_canceled(cpu);
1372
		mutex_unlock(&slab_mutex);
L
Linus Torvalds 已提交
1373 1374
		break;
	}
1375
	return notifier_from_errno(err);
L
Linus Torvalds 已提交
1376 1377
}

1378
static struct notifier_block cpucache_notifier = {
1379 1380
	&cpuup_callback, NULL, 0
};
L
Linus Torvalds 已提交
1381

1382 1383 1384 1385 1386 1387
#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
/*
 * Drains freelist for a node on each slab cache, used for memory hot-remove.
 * Returns -EBUSY if all objects cannot be drained so that the node is not
 * removed.
 *
1388
 * Must hold slab_mutex.
1389
 */
1390
static int __meminit drain_cache_node_node(int node)
1391 1392 1393 1394
{
	struct kmem_cache *cachep;
	int ret = 0;

1395
	list_for_each_entry(cachep, &slab_caches, list) {
1396
		struct kmem_cache_node *n;
1397

1398 1399
		n = cachep->node[node];
		if (!n)
1400 1401
			continue;

1402
		drain_freelist(cachep, n, slabs_tofree(cachep, n));
1403

1404 1405
		if (!list_empty(&n->slabs_full) ||
		    !list_empty(&n->slabs_partial)) {
1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425
			ret = -EBUSY;
			break;
		}
	}
	return ret;
}

static int __meminit slab_memory_callback(struct notifier_block *self,
					unsigned long action, void *arg)
{
	struct memory_notify *mnb = arg;
	int ret = 0;
	int nid;

	nid = mnb->status_change_nid;
	if (nid < 0)
		goto out;

	switch (action) {
	case MEM_GOING_ONLINE:
1426
		mutex_lock(&slab_mutex);
1427
		ret = init_cache_node_node(nid);
1428
		mutex_unlock(&slab_mutex);
1429 1430
		break;
	case MEM_GOING_OFFLINE:
1431
		mutex_lock(&slab_mutex);
1432
		ret = drain_cache_node_node(nid);
1433
		mutex_unlock(&slab_mutex);
1434 1435 1436 1437 1438 1439 1440 1441
		break;
	case MEM_ONLINE:
	case MEM_OFFLINE:
	case MEM_CANCEL_ONLINE:
	case MEM_CANCEL_OFFLINE:
		break;
	}
out:
1442
	return notifier_from_errno(ret);
1443 1444 1445
}
#endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */

1446
/*
1447
 * swap the static kmem_cache_node with kmalloced memory
1448
 */
1449
static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *list,
1450
				int nodeid)
1451
{
1452
	struct kmem_cache_node *ptr;
1453

1454
	ptr = kmalloc_node(sizeof(struct kmem_cache_node), GFP_NOWAIT, nodeid);
1455 1456
	BUG_ON(!ptr);

1457
	memcpy(ptr, list, sizeof(struct kmem_cache_node));
1458 1459 1460 1461 1462
	/*
	 * Do not assume that spinlocks can be initialized via memcpy:
	 */
	spin_lock_init(&ptr->list_lock);

1463
	MAKE_ALL_LISTS(cachep, ptr, nodeid);
1464
	cachep->node[nodeid] = ptr;
1465 1466
}

1467
/*
1468 1469
 * For setting up all the kmem_cache_node for cache whose buffer_size is same as
 * size of kmem_cache_node.
1470
 */
1471
static void __init set_up_node(struct kmem_cache *cachep, int index)
1472 1473 1474 1475
{
	int node;

	for_each_online_node(node) {
1476
		cachep->node[node] = &init_kmem_cache_node[index + node];
1477
		cachep->node[node]->next_reap = jiffies +
1478 1479
		    REAPTIMEOUT_NODE +
		    ((unsigned long)cachep) % REAPTIMEOUT_NODE;
1480 1481 1482
	}
}

C
Christoph Lameter 已提交
1483 1484
/*
 * The memory after the last cpu cache pointer is used for the
1485
 * the node pointer.
C
Christoph Lameter 已提交
1486
 */
1487
static void setup_node_pointer(struct kmem_cache *cachep)
C
Christoph Lameter 已提交
1488
{
1489
	cachep->node = (struct kmem_cache_node **)&cachep->array[nr_cpu_ids];
C
Christoph Lameter 已提交
1490 1491
}

A
Andrew Morton 已提交
1492 1493 1494
/*
 * Initialisation.  Called after the page allocator have been initialised and
 * before smp_init().
L
Linus Torvalds 已提交
1495 1496 1497
 */
void __init kmem_cache_init(void)
{
1498 1499
	int i;

1500 1501
	BUILD_BUG_ON(sizeof(((struct page *)NULL)->lru) <
					sizeof(struct rcu_head));
1502
	kmem_cache = &kmem_cache_boot;
1503
	setup_node_pointer(kmem_cache);
1504

1505
	if (num_possible_nodes() == 1)
1506 1507
		use_alien_caches = 0;

C
Christoph Lameter 已提交
1508
	for (i = 0; i < NUM_INIT_LISTS; i++)
1509
		kmem_cache_node_init(&init_kmem_cache_node[i]);
C
Christoph Lameter 已提交
1510

1511
	set_up_node(kmem_cache, CACHE_CACHE);
L
Linus Torvalds 已提交
1512 1513 1514

	/*
	 * Fragmentation resistance on low memory - only use bigger
1515 1516
	 * page orders on machines with more than 32MB of memory if
	 * not overridden on the command line.
L
Linus Torvalds 已提交
1517
	 */
1518
	if (!slab_max_order_set && totalram_pages > (32 << 20) >> PAGE_SHIFT)
1519
		slab_max_order = SLAB_MAX_ORDER_HI;
L
Linus Torvalds 已提交
1520 1521 1522

	/* Bootstrap is tricky, because several objects are allocated
	 * from caches that do not exist yet:
1523 1524 1525
	 * 1) initialize the kmem_cache cache: it contains the struct
	 *    kmem_cache structures of all caches, except kmem_cache itself:
	 *    kmem_cache is statically allocated.
1526
	 *    Initially an __init data area is used for the head array and the
1527
	 *    kmem_cache_node structures, it's replaced with a kmalloc allocated
1528
	 *    array at the end of the bootstrap.
L
Linus Torvalds 已提交
1529
	 * 2) Create the first kmalloc cache.
1530
	 *    The struct kmem_cache for the new cache is allocated normally.
1531 1532 1533
	 *    An __init data area is used for the head array.
	 * 3) Create the remaining kmalloc caches, with minimally sized
	 *    head arrays.
1534
	 * 4) Replace the __init data head arrays for kmem_cache and the first
L
Linus Torvalds 已提交
1535
	 *    kmalloc cache with kmalloc allocated arrays.
1536
	 * 5) Replace the __init data for kmem_cache_node for kmem_cache and
1537 1538
	 *    the other cache's with kmalloc allocated memory.
	 * 6) Resize the head arrays of the kmalloc caches to their final sizes.
L
Linus Torvalds 已提交
1539 1540
	 */

1541
	/* 1) create the kmem_cache */
L
Linus Torvalds 已提交
1542

E
Eric Dumazet 已提交
1543
	/*
1544
	 * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids
E
Eric Dumazet 已提交
1545
	 */
1546 1547
	create_boot_cache(kmem_cache, "kmem_cache",
		offsetof(struct kmem_cache, array[nr_cpu_ids]) +
1548
				  nr_node_ids * sizeof(struct kmem_cache_node *),
1549 1550
				  SLAB_HWCACHE_ALIGN);
	list_add(&kmem_cache->list, &slab_caches);
L
Linus Torvalds 已提交
1551 1552 1553

	/* 2+3) create the kmalloc caches */

A
Andrew Morton 已提交
1554 1555
	/*
	 * Initialize the caches that provide memory for the array cache and the
1556
	 * kmem_cache_node structures first.  Without this, further allocations will
A
Andrew Morton 已提交
1557
	 * bug.
1558 1559
	 */

1560 1561
	kmalloc_caches[INDEX_AC] = create_kmalloc_cache("kmalloc-ac",
					kmalloc_size(INDEX_AC), ARCH_KMALLOC_FLAGS);
1562

1563 1564 1565 1566
	if (INDEX_AC != INDEX_NODE)
		kmalloc_caches[INDEX_NODE] =
			create_kmalloc_cache("kmalloc-node",
				kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
1567

1568 1569
	slab_early_init = 0;

L
Linus Torvalds 已提交
1570 1571
	/* 4) Replace the bootstrap head arrays */
	{
1572
		struct array_cache *ptr;
1573

1574
		ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
1575

1576
		memcpy(ptr, cpu_cache_get(kmem_cache),
P
Pekka Enberg 已提交
1577
		       sizeof(struct arraycache_init));
1578 1579 1580 1581 1582
		/*
		 * Do not assume that spinlocks can be initialized via memcpy:
		 */
		spin_lock_init(&ptr->lock);

1583
		kmem_cache->array[smp_processor_id()] = ptr;
1584

1585
		ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
1586

1587
		BUG_ON(cpu_cache_get(kmalloc_caches[INDEX_AC])
P
Pekka Enberg 已提交
1588
		       != &initarray_generic.cache);
1589
		memcpy(ptr, cpu_cache_get(kmalloc_caches[INDEX_AC]),
P
Pekka Enberg 已提交
1590
		       sizeof(struct arraycache_init));
1591 1592 1593 1594 1595
		/*
		 * Do not assume that spinlocks can be initialized via memcpy:
		 */
		spin_lock_init(&ptr->lock);

1596
		kmalloc_caches[INDEX_AC]->array[smp_processor_id()] = ptr;
L
Linus Torvalds 已提交
1597
	}
1598
	/* 5) Replace the bootstrap kmem_cache_node */
1599
	{
P
Pekka Enberg 已提交
1600 1601
		int nid;

1602
		for_each_online_node(nid) {
1603
			init_list(kmem_cache, &init_kmem_cache_node[CACHE_CACHE + nid], nid);
1604

1605
			init_list(kmalloc_caches[INDEX_AC],
1606
				  &init_kmem_cache_node[SIZE_AC + nid], nid);
1607

1608 1609 1610
			if (INDEX_AC != INDEX_NODE) {
				init_list(kmalloc_caches[INDEX_NODE],
					  &init_kmem_cache_node[SIZE_NODE + nid], nid);
1611 1612 1613
			}
		}
	}
L
Linus Torvalds 已提交
1614

1615
	create_kmalloc_caches(ARCH_KMALLOC_FLAGS);
1616 1617 1618 1619 1620 1621
}

void __init kmem_cache_init_late(void)
{
	struct kmem_cache *cachep;

1622
	slab_state = UP;
P
Peter Zijlstra 已提交
1623

1624
	/* 6) resize the head arrays to their final sizes */
1625 1626
	mutex_lock(&slab_mutex);
	list_for_each_entry(cachep, &slab_caches, list)
1627 1628
		if (enable_cpucache(cachep, GFP_NOWAIT))
			BUG();
1629
	mutex_unlock(&slab_mutex);
1630

1631 1632 1633
	/* Annotate slab for lockdep -- annotate the malloc caches */
	init_lock_keys();

1634 1635 1636
	/* Done! */
	slab_state = FULL;

A
Andrew Morton 已提交
1637 1638 1639
	/*
	 * Register a cpu startup notifier callback that initializes
	 * cpu_cache_get for all new cpus
L
Linus Torvalds 已提交
1640 1641 1642
	 */
	register_cpu_notifier(&cpucache_notifier);

1643 1644 1645
#ifdef CONFIG_NUMA
	/*
	 * Register a memory hotplug callback that initializes and frees
1646
	 * node.
1647 1648 1649 1650
	 */
	hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
#endif

A
Andrew Morton 已提交
1651 1652 1653
	/*
	 * The reap timers are started later, with a module init call: That part
	 * of the kernel is not yet operational.
L
Linus Torvalds 已提交
1654 1655 1656 1657 1658 1659 1660
	 */
}

static int __init cpucache_init(void)
{
	int cpu;

A
Andrew Morton 已提交
1661 1662
	/*
	 * Register the timers that return unneeded pages to the page allocator
L
Linus Torvalds 已提交
1663
	 */
1664
	for_each_online_cpu(cpu)
A
Andrew Morton 已提交
1665
		start_cpu_timer(cpu);
1666 1667

	/* Done! */
1668
	slab_state = FULL;
L
Linus Torvalds 已提交
1669 1670 1671 1672
	return 0;
}
__initcall(cpucache_init);

1673 1674 1675
static noinline void
slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
{
1676
#if DEBUG
1677
	struct kmem_cache_node *n;
1678
	struct page *page;
1679 1680
	unsigned long flags;
	int node;
1681 1682 1683 1684 1685
	static DEFINE_RATELIMIT_STATE(slab_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
				      DEFAULT_RATELIMIT_BURST);

	if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slab_oom_rs))
		return;
1686 1687 1688 1689 1690

	printk(KERN_WARNING
		"SLAB: Unable to allocate memory on node %d (gfp=0x%x)\n",
		nodeid, gfpflags);
	printk(KERN_WARNING "  cache: %s, object size: %d, order: %d\n",
1691
		cachep->name, cachep->size, cachep->gfporder);
1692 1693 1694 1695 1696

	for_each_online_node(node) {
		unsigned long active_objs = 0, num_objs = 0, free_objects = 0;
		unsigned long active_slabs = 0, num_slabs = 0;

1697 1698
		n = cachep->node[node];
		if (!n)
1699 1700
			continue;

1701
		spin_lock_irqsave(&n->list_lock, flags);
1702
		list_for_each_entry(page, &n->slabs_full, lru) {
1703 1704 1705
			active_objs += cachep->num;
			active_slabs++;
		}
1706 1707
		list_for_each_entry(page, &n->slabs_partial, lru) {
			active_objs += page->active;
1708 1709
			active_slabs++;
		}
1710
		list_for_each_entry(page, &n->slabs_free, lru)
1711 1712
			num_slabs++;

1713 1714
		free_objects += n->free_objects;
		spin_unlock_irqrestore(&n->list_lock, flags);
1715 1716 1717 1718 1719 1720 1721 1722

		num_slabs += active_slabs;
		num_objs = num_slabs * cachep->num;
		printk(KERN_WARNING
			"  node %d: slabs: %ld/%ld, objs: %ld/%ld, free: %ld\n",
			node, active_slabs, num_slabs, active_objs, num_objs,
			free_objects);
	}
1723
#endif
1724 1725
}

L
Linus Torvalds 已提交
1726 1727 1728 1729 1730 1731 1732
/*
 * Interface to system's page allocator. No need to hold the cache-lock.
 *
 * If we requested dmaable memory, we will get it. Even if we
 * did not request dmaable memory, we might get it, but that
 * would be relatively rare and ignorable.
 */
1733 1734
static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
								int nodeid)
L
Linus Torvalds 已提交
1735 1736
{
	struct page *page;
1737
	int nr_pages;
1738

1739
	flags |= cachep->allocflags;
1740 1741
	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
		flags |= __GFP_RECLAIMABLE;
1742

1743 1744 1745
	if (memcg_charge_slab(cachep, flags, cachep->gfporder))
		return NULL;

L
Linus Torvalds 已提交
1746
	page = alloc_pages_exact_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder);
1747
	if (!page) {
1748
		memcg_uncharge_slab(cachep, cachep->gfporder);
1749
		slab_out_of_memory(cachep, flags, nodeid);
L
Linus Torvalds 已提交
1750
		return NULL;
1751
	}
L
Linus Torvalds 已提交
1752

1753
	/* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
1754 1755 1756
	if (unlikely(page->pfmemalloc))
		pfmemalloc_active = true;

1757
	nr_pages = (1 << cachep->gfporder);
L
Linus Torvalds 已提交
1758
	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1759 1760 1761 1762 1763
		add_zone_page_state(page_zone(page),
			NR_SLAB_RECLAIMABLE, nr_pages);
	else
		add_zone_page_state(page_zone(page),
			NR_SLAB_UNRECLAIMABLE, nr_pages);
1764 1765 1766
	__SetPageSlab(page);
	if (page->pfmemalloc)
		SetPageSlabPfmemalloc(page);
1767

1768 1769 1770 1771 1772 1773 1774 1775
	if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
		kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid);

		if (cachep->ctor)
			kmemcheck_mark_uninitialized_pages(page, nr_pages);
		else
			kmemcheck_mark_unallocated_pages(page, nr_pages);
	}
P
Pekka Enberg 已提交
1776

1777
	return page;
L
Linus Torvalds 已提交
1778 1779 1780 1781 1782
}

/*
 * Interface to system's page release.
 */
1783
static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
L
Linus Torvalds 已提交
1784
{
1785
	const unsigned long nr_freed = (1 << cachep->gfporder);
L
Linus Torvalds 已提交
1786

1787
	kmemcheck_free_shadow(page, cachep->gfporder);
P
Pekka Enberg 已提交
1788

1789 1790 1791 1792 1793 1794
	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
		sub_zone_page_state(page_zone(page),
				NR_SLAB_RECLAIMABLE, nr_freed);
	else
		sub_zone_page_state(page_zone(page),
				NR_SLAB_UNRECLAIMABLE, nr_freed);
J
Joonsoo Kim 已提交
1795

1796
	BUG_ON(!PageSlab(page));
J
Joonsoo Kim 已提交
1797
	__ClearPageSlabPfmemalloc(page);
1798
	__ClearPageSlab(page);
1799 1800
	page_mapcount_reset(page);
	page->mapping = NULL;
G
Glauber Costa 已提交
1801

L
Linus Torvalds 已提交
1802 1803
	if (current->reclaim_state)
		current->reclaim_state->reclaimed_slab += nr_freed;
1804 1805
	__free_pages(page, cachep->gfporder);
	memcg_uncharge_slab(cachep, cachep->gfporder);
L
Linus Torvalds 已提交
1806 1807 1808 1809
}

static void kmem_rcu_free(struct rcu_head *head)
{
1810 1811
	struct kmem_cache *cachep;
	struct page *page;
L
Linus Torvalds 已提交
1812

1813 1814 1815 1816
	page = container_of(head, struct page, rcu_head);
	cachep = page->slab_cache;

	kmem_freepages(cachep, page);
L
Linus Torvalds 已提交
1817 1818 1819 1820 1821
}

#if DEBUG

#ifdef CONFIG_DEBUG_PAGEALLOC
1822
static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
P
Pekka Enberg 已提交
1823
			    unsigned long caller)
L
Linus Torvalds 已提交
1824
{
1825
	int size = cachep->object_size;
L
Linus Torvalds 已提交
1826

1827
	addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];
L
Linus Torvalds 已提交
1828

P
Pekka Enberg 已提交
1829
	if (size < 5 * sizeof(unsigned long))
L
Linus Torvalds 已提交
1830 1831
		return;

P
Pekka Enberg 已提交
1832 1833 1834 1835
	*addr++ = 0x12345678;
	*addr++ = caller;
	*addr++ = smp_processor_id();
	size -= 3 * sizeof(unsigned long);
L
Linus Torvalds 已提交
1836 1837 1838 1839 1840 1841 1842
	{
		unsigned long *sptr = &caller;
		unsigned long svalue;

		while (!kstack_end(sptr)) {
			svalue = *sptr++;
			if (kernel_text_address(svalue)) {
P
Pekka Enberg 已提交
1843
				*addr++ = svalue;
L
Linus Torvalds 已提交
1844 1845 1846 1847 1848 1849 1850
				size -= sizeof(unsigned long);
				if (size <= sizeof(unsigned long))
					break;
			}
		}

	}
P
Pekka Enberg 已提交
1851
	*addr++ = 0x87654321;
L
Linus Torvalds 已提交
1852 1853 1854
}
#endif

1855
static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
L
Linus Torvalds 已提交
1856
{
1857
	int size = cachep->object_size;
1858
	addr = &((char *)addr)[obj_offset(cachep)];
L
Linus Torvalds 已提交
1859 1860

	memset(addr, val, size);
P
Pekka Enberg 已提交
1861
	*(unsigned char *)(addr + size - 1) = POISON_END;
L
Linus Torvalds 已提交
1862 1863 1864 1865 1866
}

static void dump_line(char *data, int offset, int limit)
{
	int i;
D
Dave Jones 已提交
1867 1868 1869
	unsigned char error = 0;
	int bad_count = 0;

1870
	printk(KERN_ERR "%03x: ", offset);
D
Dave Jones 已提交
1871 1872 1873 1874 1875 1876
	for (i = 0; i < limit; i++) {
		if (data[offset + i] != POISON_FREE) {
			error = data[offset + i];
			bad_count++;
		}
	}
1877 1878
	print_hex_dump(KERN_CONT, "", 0, 16, 1,
			&data[offset], limit, 1);
D
Dave Jones 已提交
1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892

	if (bad_count == 1) {
		error ^= POISON_FREE;
		if (!(error & (error - 1))) {
			printk(KERN_ERR "Single bit error detected. Probably "
					"bad RAM.\n");
#ifdef CONFIG_X86
			printk(KERN_ERR "Run memtest86+ or a similar memory "
					"test tool.\n");
#else
			printk(KERN_ERR "Run a memory test tool.\n");
#endif
		}
	}
L
Linus Torvalds 已提交
1893 1894 1895 1896 1897
}
#endif

#if DEBUG

1898
static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
L
Linus Torvalds 已提交
1899 1900 1901 1902 1903
{
	int i, size;
	char *realobj;

	if (cachep->flags & SLAB_RED_ZONE) {
1904
		printk(KERN_ERR "Redzone: 0x%llx/0x%llx.\n",
A
Andrew Morton 已提交
1905 1906
			*dbg_redzone1(cachep, objp),
			*dbg_redzone2(cachep, objp));
L
Linus Torvalds 已提交
1907 1908 1909
	}

	if (cachep->flags & SLAB_STORE_USER) {
J
Joe Perches 已提交
1910 1911 1912
		printk(KERN_ERR "Last user: [<%p>](%pSR)\n",
		       *dbg_userword(cachep, objp),
		       *dbg_userword(cachep, objp));
L
Linus Torvalds 已提交
1913
	}
1914
	realobj = (char *)objp + obj_offset(cachep);
1915
	size = cachep->object_size;
P
Pekka Enberg 已提交
1916
	for (i = 0; i < size && lines; i += 16, lines--) {
L
Linus Torvalds 已提交
1917 1918
		int limit;
		limit = 16;
P
Pekka Enberg 已提交
1919 1920
		if (i + limit > size)
			limit = size - i;
L
Linus Torvalds 已提交
1921 1922 1923 1924
		dump_line(realobj, i, limit);
	}
}

1925
static void check_poison_obj(struct kmem_cache *cachep, void *objp)
L
Linus Torvalds 已提交
1926 1927 1928 1929 1930
{
	char *realobj;
	int size, i;
	int lines = 0;

1931
	realobj = (char *)objp + obj_offset(cachep);
1932
	size = cachep->object_size;
L
Linus Torvalds 已提交
1933

P
Pekka Enberg 已提交
1934
	for (i = 0; i < size; i++) {
L
Linus Torvalds 已提交
1935
		char exp = POISON_FREE;
P
Pekka Enberg 已提交
1936
		if (i == size - 1)
L
Linus Torvalds 已提交
1937 1938 1939 1940 1941 1942
			exp = POISON_END;
		if (realobj[i] != exp) {
			int limit;
			/* Mismatch ! */
			/* Print header */
			if (lines == 0) {
P
Pekka Enberg 已提交
1943
				printk(KERN_ERR
1944 1945
					"Slab corruption (%s): %s start=%p, len=%d\n",
					print_tainted(), cachep->name, realobj, size);
L
Linus Torvalds 已提交
1946 1947 1948
				print_objinfo(cachep, objp, 0);
			}
			/* Hexdump the affected line */
P
Pekka Enberg 已提交
1949
			i = (i / 16) * 16;
L
Linus Torvalds 已提交
1950
			limit = 16;
P
Pekka Enberg 已提交
1951 1952
			if (i + limit > size)
				limit = size - i;
L
Linus Torvalds 已提交
1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964
			dump_line(realobj, i, limit);
			i += 16;
			lines++;
			/* Limit to 5 lines */
			if (lines > 5)
				break;
		}
	}
	if (lines != 0) {
		/* Print some data about the neighboring objects, if they
		 * exist:
		 */
1965
		struct page *page = virt_to_head_page(objp);
1966
		unsigned int objnr;
L
Linus Torvalds 已提交
1967

1968
		objnr = obj_to_index(cachep, page, objp);
L
Linus Torvalds 已提交
1969
		if (objnr) {
1970
			objp = index_to_obj(cachep, page, objnr - 1);
1971
			realobj = (char *)objp + obj_offset(cachep);
L
Linus Torvalds 已提交
1972
			printk(KERN_ERR "Prev obj: start=%p, len=%d\n",
P
Pekka Enberg 已提交
1973
			       realobj, size);
L
Linus Torvalds 已提交
1974 1975
			print_objinfo(cachep, objp, 2);
		}
P
Pekka Enberg 已提交
1976
		if (objnr + 1 < cachep->num) {
1977
			objp = index_to_obj(cachep, page, objnr + 1);
1978
			realobj = (char *)objp + obj_offset(cachep);
L
Linus Torvalds 已提交
1979
			printk(KERN_ERR "Next obj: start=%p, len=%d\n",
P
Pekka Enberg 已提交
1980
			       realobj, size);
L
Linus Torvalds 已提交
1981 1982 1983 1984 1985 1986
			print_objinfo(cachep, objp, 2);
		}
	}
}
#endif

1987
#if DEBUG
1988 1989
static void slab_destroy_debugcheck(struct kmem_cache *cachep,
						struct page *page)
L
Linus Torvalds 已提交
1990 1991 1992
{
	int i;
	for (i = 0; i < cachep->num; i++) {
1993
		void *objp = index_to_obj(cachep, page, i);
L
Linus Torvalds 已提交
1994 1995 1996

		if (cachep->flags & SLAB_POISON) {
#ifdef CONFIG_DEBUG_PAGEALLOC
1997
			if (cachep->size % PAGE_SIZE == 0 &&
A
Andrew Morton 已提交
1998
					OFF_SLAB(cachep))
P
Pekka Enberg 已提交
1999
				kernel_map_pages(virt_to_page(objp),
2000
					cachep->size / PAGE_SIZE, 1);
L
Linus Torvalds 已提交
2001 2002 2003 2004 2005 2006 2007 2008 2009
			else
				check_poison_obj(cachep, objp);
#else
			check_poison_obj(cachep, objp);
#endif
		}
		if (cachep->flags & SLAB_RED_ZONE) {
			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
				slab_error(cachep, "start of a freed object "
P
Pekka Enberg 已提交
2010
					   "was overwritten");
L
Linus Torvalds 已提交
2011 2012
			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
				slab_error(cachep, "end of a freed object "
P
Pekka Enberg 已提交
2013
					   "was overwritten");
L
Linus Torvalds 已提交
2014 2015
		}
	}
2016
}
L
Linus Torvalds 已提交
2017
#else
2018 2019
static void slab_destroy_debugcheck(struct kmem_cache *cachep,
						struct page *page)
2020 2021
{
}
L
Linus Torvalds 已提交
2022 2023
#endif

2024 2025 2026
/**
 * slab_destroy - destroy and release all objects in a slab
 * @cachep: cache pointer being destroyed
2027
 * @page: page pointer being destroyed
2028
 *
2029
 * Destroy all the objs in a slab, and release the mem back to the system.
A
Andrew Morton 已提交
2030 2031
 * Before calling the slab must have been unlinked from the cache.  The
 * cache-lock is not held/needed.
2032
 */
2033
static void slab_destroy(struct kmem_cache *cachep, struct page *page)
2034
{
2035
	void *freelist;
2036

2037 2038
	freelist = page->freelist;
	slab_destroy_debugcheck(cachep, page);
L
Linus Torvalds 已提交
2039
	if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
2040 2041 2042 2043 2044 2045 2046 2047 2048 2049
		struct rcu_head *head;

		/*
		 * RCU free overloads the RCU head over the LRU.
		 * slab_page has been overloeaded over the LRU,
		 * however it is not used from now on so that
		 * we can use it safely.
		 */
		head = (void *)&page->rcu_head;
		call_rcu(head, kmem_rcu_free);
L
Linus Torvalds 已提交
2050 2051

	} else {
2052
		kmem_freepages(cachep, page);
L
Linus Torvalds 已提交
2053
	}
2054 2055

	/*
2056
	 * From now on, we don't use freelist
2057 2058 2059
	 * although actual page can be freed in rcu context
	 */
	if (OFF_SLAB(cachep))
2060
		kmem_cache_free(cachep->freelist_cache, freelist);
L
Linus Torvalds 已提交
2061 2062
}

2063
/**
2064 2065 2066 2067 2068 2069 2070
 * calculate_slab_order - calculate size (page order) of slabs
 * @cachep: pointer to the cache that is being created
 * @size: size of objects to be created in this cache.
 * @align: required alignment for the objects.
 * @flags: slab allocation flags
 *
 * Also calculates the number of objects per slab.
2071 2072 2073 2074 2075
 *
 * This could be made much more intelligent.  For now, try to avoid using
 * high order pages for slabs.  When the gfp() functions are more friendly
 * towards high-order requests, this should be changed.
 */
A
Andrew Morton 已提交
2076
static size_t calculate_slab_order(struct kmem_cache *cachep,
R
Randy Dunlap 已提交
2077
			size_t size, size_t align, unsigned long flags)
2078
{
2079
	unsigned long offslab_limit;
2080
	size_t left_over = 0;
2081
	int gfporder;
2082

2083
	for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) {
2084 2085 2086
		unsigned int num;
		size_t remainder;

2087
		cache_estimate(gfporder, size, align, flags, &remainder, &num);
2088 2089
		if (!num)
			continue;
2090

2091 2092 2093 2094
		/* Can't handle number of objects more than SLAB_OBJ_MAX_NUM */
		if (num > SLAB_OBJ_MAX_NUM)
			break;

2095
		if (flags & CFLGS_OFF_SLAB) {
2096
			size_t freelist_size_per_obj = sizeof(freelist_idx_t);
2097 2098 2099 2100 2101
			/*
			 * Max number of objs-per-slab for caches which
			 * use off-slab slabs. Needed to avoid a possible
			 * looping condition in cache_grow().
			 */
2102 2103
			if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK))
				freelist_size_per_obj += sizeof(char);
2104
			offslab_limit = size;
2105
			offslab_limit /= freelist_size_per_obj;
2106 2107 2108 2109

 			if (num > offslab_limit)
				break;
		}
2110

2111
		/* Found something acceptable - save it away */
2112
		cachep->num = num;
2113
		cachep->gfporder = gfporder;
2114 2115
		left_over = remainder;

2116 2117 2118 2119 2120 2121 2122 2123
		/*
		 * A VFS-reclaimable slab tends to have most allocations
		 * as GFP_NOFS and we really don't want to have to be allocating
		 * higher-order pages when we are unable to shrink dcache.
		 */
		if (flags & SLAB_RECLAIM_ACCOUNT)
			break;

2124 2125 2126 2127
		/*
		 * Large number of objects is good, but very large slabs are
		 * currently bad for the gfp()s.
		 */
2128
		if (gfporder >= slab_max_order)
2129 2130
			break;

2131 2132 2133
		/*
		 * Acceptable internal fragmentation?
		 */
A
Andrew Morton 已提交
2134
		if (left_over * 8 <= (PAGE_SIZE << gfporder))
2135 2136 2137 2138 2139
			break;
	}
	return left_over;
}

2140
static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
2141
{
2142
	if (slab_state >= FULL)
2143
		return enable_cpucache(cachep, gfp);
2144

2145
	if (slab_state == DOWN) {
2146
		/*
2147
		 * Note: Creation of first cache (kmem_cache).
2148
		 * The setup_node is taken care
2149 2150 2151 2152 2153 2154 2155
		 * of by the caller of __kmem_cache_create
		 */
		cachep->array[smp_processor_id()] = &initarray_generic.cache;
		slab_state = PARTIAL;
	} else if (slab_state == PARTIAL) {
		/*
		 * Note: the second kmem_cache_create must create the cache
2156 2157 2158 2159 2160 2161
		 * that's used by kmalloc(24), otherwise the creation of
		 * further caches will BUG().
		 */
		cachep->array[smp_processor_id()] = &initarray_generic.cache;

		/*
2162 2163
		 * If the cache that's used by kmalloc(sizeof(kmem_cache_node)) is
		 * the second cache, then we need to set up all its node/,
2164 2165
		 * otherwise the creation of further caches will BUG().
		 */
2166 2167 2168
		set_up_node(cachep, SIZE_AC);
		if (INDEX_AC == INDEX_NODE)
			slab_state = PARTIAL_NODE;
2169
		else
2170
			slab_state = PARTIAL_ARRAYCACHE;
2171
	} else {
2172
		/* Remaining boot caches */
2173
		cachep->array[smp_processor_id()] =
2174
			kmalloc(sizeof(struct arraycache_init), gfp);
2175

2176
		if (slab_state == PARTIAL_ARRAYCACHE) {
2177 2178
			set_up_node(cachep, SIZE_NODE);
			slab_state = PARTIAL_NODE;
2179 2180
		} else {
			int node;
2181
			for_each_online_node(node) {
2182
				cachep->node[node] =
2183
				    kmalloc_node(sizeof(struct kmem_cache_node),
2184
						gfp, node);
2185
				BUG_ON(!cachep->node[node]);
2186
				kmem_cache_node_init(cachep->node[node]);
2187 2188 2189
			}
		}
	}
2190
	cachep->node[numa_mem_id()]->next_reap =
2191 2192
			jiffies + REAPTIMEOUT_NODE +
			((unsigned long)cachep) % REAPTIMEOUT_NODE;
2193 2194 2195 2196 2197 2198 2199

	cpu_cache_get(cachep)->avail = 0;
	cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
	cpu_cache_get(cachep)->batchcount = 1;
	cpu_cache_get(cachep)->touched = 0;
	cachep->batchcount = 1;
	cachep->limit = BOOT_CPUCACHE_ENTRIES;
2200
	return 0;
2201 2202
}

L
Linus Torvalds 已提交
2203
/**
2204
 * __kmem_cache_create - Create a cache.
R
Randy Dunlap 已提交
2205
 * @cachep: cache management descriptor
L
Linus Torvalds 已提交
2206 2207 2208 2209
 * @flags: SLAB flags
 *
 * Returns a ptr to the cache on success, NULL on failure.
 * Cannot be called within a int, but can be interrupted.
2210
 * The @ctor is run when new pages are allocated by the cache.
L
Linus Torvalds 已提交
2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223
 *
 * The flags are
 *
 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
 * to catch references to uninitialised memory.
 *
 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
 * for buffer overruns.
 *
 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
 * cacheline.  This can be beneficial if you're counting cycles as closely
 * as davem.
 */
2224
int
2225
__kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
L
Linus Torvalds 已提交
2226
{
2227
	size_t left_over, freelist_size, ralign;
2228
	gfp_t gfp;
2229
	int err;
2230
	size_t size = cachep->size;
L
Linus Torvalds 已提交
2231 2232 2233 2234 2235 2236 2237 2238 2239

#if DEBUG
#if FORCED_DEBUG
	/*
	 * Enable redzoning and last user accounting, except for caches with
	 * large objects, if the increased size would increase the object size
	 * above the next power of two: caches with object sizes just above a
	 * power of two have a significant amount of internal fragmentation.
	 */
D
David Woodhouse 已提交
2240 2241
	if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN +
						2 * sizeof(unsigned long long)))
P
Pekka Enberg 已提交
2242
		flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
L
Linus Torvalds 已提交
2243 2244 2245 2246 2247 2248 2249
	if (!(flags & SLAB_DESTROY_BY_RCU))
		flags |= SLAB_POISON;
#endif
	if (flags & SLAB_DESTROY_BY_RCU)
		BUG_ON(flags & SLAB_POISON);
#endif

A
Andrew Morton 已提交
2250 2251
	/*
	 * Check that size is in terms of words.  This is needed to avoid
L
Linus Torvalds 已提交
2252 2253 2254
	 * unaligned accesses for some archs when redzoning is used, and makes
	 * sure any on-slab bufctl's are also correctly aligned.
	 */
P
Pekka Enberg 已提交
2255 2256 2257
	if (size & (BYTES_PER_WORD - 1)) {
		size += (BYTES_PER_WORD - 1);
		size &= ~(BYTES_PER_WORD - 1);
L
Linus Torvalds 已提交
2258 2259
	}

2260
	/*
D
David Woodhouse 已提交
2261 2262 2263
	 * Redzoning and user store require word alignment or possibly larger.
	 * Note this will be overridden by architecture or caller mandated
	 * alignment if either is greater than BYTES_PER_WORD.
2264
	 */
D
David Woodhouse 已提交
2265 2266 2267 2268 2269 2270 2271 2272 2273 2274
	if (flags & SLAB_STORE_USER)
		ralign = BYTES_PER_WORD;

	if (flags & SLAB_RED_ZONE) {
		ralign = REDZONE_ALIGN;
		/* If redzoning, ensure that the second redzone is suitably
		 * aligned, by adjusting the object size accordingly. */
		size += REDZONE_ALIGN - 1;
		size &= ~(REDZONE_ALIGN - 1);
	}
2275

2276
	/* 3) caller mandated alignment */
2277 2278
	if (ralign < cachep->align) {
		ralign = cachep->align;
L
Linus Torvalds 已提交
2279
	}
2280 2281
	/* disable debug if necessary */
	if (ralign > __alignof__(unsigned long long))
2282
		flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
A
Andrew Morton 已提交
2283
	/*
2284
	 * 4) Store it.
L
Linus Torvalds 已提交
2285
	 */
2286
	cachep->align = ralign;
L
Linus Torvalds 已提交
2287

2288 2289 2290 2291 2292
	if (slab_is_available())
		gfp = GFP_KERNEL;
	else
		gfp = GFP_NOWAIT;

2293
	setup_node_pointer(cachep);
L
Linus Torvalds 已提交
2294 2295
#if DEBUG

2296 2297 2298 2299
	/*
	 * Both debugging options require word-alignment which is calculated
	 * into align above.
	 */
L
Linus Torvalds 已提交
2300 2301
	if (flags & SLAB_RED_ZONE) {
		/* add space for red zone words */
2302 2303
		cachep->obj_offset += sizeof(unsigned long long);
		size += 2 * sizeof(unsigned long long);
L
Linus Torvalds 已提交
2304 2305
	}
	if (flags & SLAB_STORE_USER) {
2306
		/* user store requires one word storage behind the end of
D
David Woodhouse 已提交
2307 2308
		 * the real object. But if the second red zone needs to be
		 * aligned to 64 bits, we must allow that much space.
L
Linus Torvalds 已提交
2309
		 */
D
David Woodhouse 已提交
2310 2311 2312 2313
		if (flags & SLAB_RED_ZONE)
			size += REDZONE_ALIGN;
		else
			size += BYTES_PER_WORD;
L
Linus Torvalds 已提交
2314 2315
	}
#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
2316
	if (size >= kmalloc_size(INDEX_NODE + 1)
2317 2318 2319
	    && cachep->object_size > cache_line_size()
	    && ALIGN(size, cachep->align) < PAGE_SIZE) {
		cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align);
L
Linus Torvalds 已提交
2320 2321 2322 2323 2324
		size = PAGE_SIZE;
	}
#endif
#endif

2325 2326 2327
	/*
	 * Determine if the slab management is 'on' or 'off' slab.
	 * (bootstrapping cannot cope with offslab caches so don't do
2328 2329
	 * it too early on. Always use on-slab management when
	 * SLAB_NOLEAKTRACE to avoid recursive calls into kmemleak)
2330
	 */
2331
	if ((size >= (PAGE_SIZE >> 5)) && !slab_early_init &&
2332
	    !(flags & SLAB_NOLEAKTRACE))
L
Linus Torvalds 已提交
2333 2334 2335 2336 2337 2338
		/*
		 * Size is large, assume best to place the slab management obj
		 * off-slab (should allow better packing of objs).
		 */
		flags |= CFLGS_OFF_SLAB;

2339
	size = ALIGN(size, cachep->align);
2340 2341 2342 2343 2344 2345
	/*
	 * We should restrict the number of objects in a slab to implement
	 * byte sized index. Refer comment on SLAB_OBJ_MIN_SIZE definition.
	 */
	if (FREELIST_BYTE_INDEX && size < SLAB_OBJ_MIN_SIZE)
		size = ALIGN(SLAB_OBJ_MIN_SIZE, cachep->align);
L
Linus Torvalds 已提交
2346

2347
	left_over = calculate_slab_order(cachep, size, cachep->align, flags);
L
Linus Torvalds 已提交
2348

2349
	if (!cachep->num)
2350
		return -E2BIG;
L
Linus Torvalds 已提交
2351

2352
	freelist_size = calculate_freelist_size(cachep->num, cachep->align);
L
Linus Torvalds 已提交
2353 2354 2355 2356 2357

	/*
	 * If the slab has been placed off-slab, and we have enough space then
	 * move it on-slab. This is at the expense of any extra colouring.
	 */
2358
	if (flags & CFLGS_OFF_SLAB && left_over >= freelist_size) {
L
Linus Torvalds 已提交
2359
		flags &= ~CFLGS_OFF_SLAB;
2360
		left_over -= freelist_size;
L
Linus Torvalds 已提交
2361 2362 2363 2364
	}

	if (flags & CFLGS_OFF_SLAB) {
		/* really off slab. No need for manual alignment */
2365
		freelist_size = calculate_freelist_size(cachep->num, 0);
2366 2367 2368 2369 2370 2371 2372 2373 2374

#ifdef CONFIG_PAGE_POISONING
		/* If we're going to use the generic kernel_map_pages()
		 * poisoning, then it's going to smash the contents of
		 * the redzone and userword anyhow, so switch them off.
		 */
		if (size % PAGE_SIZE == 0 && flags & SLAB_POISON)
			flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
#endif
L
Linus Torvalds 已提交
2375 2376 2377 2378
	}

	cachep->colour_off = cache_line_size();
	/* Offset must be a multiple of the alignment. */
2379 2380
	if (cachep->colour_off < cachep->align)
		cachep->colour_off = cachep->align;
P
Pekka Enberg 已提交
2381
	cachep->colour = left_over / cachep->colour_off;
2382
	cachep->freelist_size = freelist_size;
L
Linus Torvalds 已提交
2383
	cachep->flags = flags;
2384
	cachep->allocflags = __GFP_COMP;
2385
	if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA))
2386
		cachep->allocflags |= GFP_DMA;
2387
	cachep->size = size;
2388
	cachep->reciprocal_buffer_size = reciprocal_value(size);
L
Linus Torvalds 已提交
2389

2390
	if (flags & CFLGS_OFF_SLAB) {
2391
		cachep->freelist_cache = kmalloc_slab(freelist_size, 0u);
2392
		/*
2393
		 * This is a possibility for one of the kmalloc_{dma,}_caches.
2394
		 * But since we go off slab only for object size greater than
2395 2396
		 * PAGE_SIZE/8, and kmalloc_{dma,}_caches get created
		 * in ascending order,this should not happen at all.
2397 2398
		 * But leave a BUG_ON for some lucky dude.
		 */
2399
		BUG_ON(ZERO_OR_NULL_PTR(cachep->freelist_cache));
2400
	}
L
Linus Torvalds 已提交
2401

2402 2403
	err = setup_cpu_cache(cachep, gfp);
	if (err) {
2404
		__kmem_cache_shutdown(cachep);
2405
		return err;
2406
	}
L
Linus Torvalds 已提交
2407

2408 2409 2410 2411 2412 2413 2414 2415
	if (flags & SLAB_DEBUG_OBJECTS) {
		/*
		 * Would deadlock through slab_destroy()->call_rcu()->
		 * debug_object_activate()->kmem_cache_alloc().
		 */
		WARN_ON_ONCE(flags & SLAB_DESTROY_BY_RCU);

		slab_set_debugobj_lock_classes(cachep);
2416 2417
	} else if (!OFF_SLAB(cachep) && !(flags & SLAB_DESTROY_BY_RCU))
		on_slab_lock_classes(cachep);
2418

2419
	return 0;
L
Linus Torvalds 已提交
2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432
}

#if DEBUG
static void check_irq_off(void)
{
	BUG_ON(!irqs_disabled());
}

static void check_irq_on(void)
{
	BUG_ON(irqs_disabled());
}

2433
static void check_spinlock_acquired(struct kmem_cache *cachep)
L
Linus Torvalds 已提交
2434 2435 2436
{
#ifdef CONFIG_SMP
	check_irq_off();
2437
	assert_spin_locked(&cachep->node[numa_mem_id()]->list_lock);
L
Linus Torvalds 已提交
2438 2439
#endif
}
2440

2441
static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
2442 2443 2444
{
#ifdef CONFIG_SMP
	check_irq_off();
2445
	assert_spin_locked(&cachep->node[node]->list_lock);
2446 2447 2448
#endif
}

L
Linus Torvalds 已提交
2449 2450 2451 2452
#else
#define check_irq_off()	do { } while(0)
#define check_irq_on()	do { } while(0)
#define check_spinlock_acquired(x) do { } while(0)
2453
#define check_spinlock_acquired_node(x, y) do { } while(0)
L
Linus Torvalds 已提交
2454 2455
#endif

2456
static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
2457 2458 2459
			struct array_cache *ac,
			int force, int node);

L
Linus Torvalds 已提交
2460 2461
static void do_drain(void *arg)
{
A
Andrew Morton 已提交
2462
	struct kmem_cache *cachep = arg;
L
Linus Torvalds 已提交
2463
	struct array_cache *ac;
2464
	int node = numa_mem_id();
L
Linus Torvalds 已提交
2465 2466

	check_irq_off();
2467
	ac = cpu_cache_get(cachep);
2468
	spin_lock(&cachep->node[node]->list_lock);
2469
	free_block(cachep, ac->entry, ac->avail, node);
2470
	spin_unlock(&cachep->node[node]->list_lock);
L
Linus Torvalds 已提交
2471 2472 2473
	ac->avail = 0;
}

2474
static void drain_cpu_caches(struct kmem_cache *cachep)
L
Linus Torvalds 已提交
2475
{
2476
	struct kmem_cache_node *n;
2477 2478
	int node;

2479
	on_each_cpu(do_drain, cachep, 1);
L
Linus Torvalds 已提交
2480
	check_irq_on();
P
Pekka Enberg 已提交
2481
	for_each_online_node(node) {
2482 2483 2484
		n = cachep->node[node];
		if (n && n->alien)
			drain_alien_cache(cachep, n->alien);
2485 2486 2487
	}

	for_each_online_node(node) {
2488 2489 2490
		n = cachep->node[node];
		if (n)
			drain_array(cachep, n, n->shared, 1, node);
2491
	}
L
Linus Torvalds 已提交
2492 2493
}

2494 2495 2496 2497 2498 2499 2500
/*
 * Remove slabs from the list of free slabs.
 * Specify the number of slabs to drain in tofree.
 *
 * Returns the actual number of slabs released.
 */
static int drain_freelist(struct kmem_cache *cache,
2501
			struct kmem_cache_node *n, int tofree)
L
Linus Torvalds 已提交
2502
{
2503 2504
	struct list_head *p;
	int nr_freed;
2505
	struct page *page;
L
Linus Torvalds 已提交
2506

2507
	nr_freed = 0;
2508
	while (nr_freed < tofree && !list_empty(&n->slabs_free)) {
L
Linus Torvalds 已提交
2509

2510 2511 2512 2513
		spin_lock_irq(&n->list_lock);
		p = n->slabs_free.prev;
		if (p == &n->slabs_free) {
			spin_unlock_irq(&n->list_lock);
2514 2515
			goto out;
		}
L
Linus Torvalds 已提交
2516

2517
		page = list_entry(p, struct page, lru);
L
Linus Torvalds 已提交
2518
#if DEBUG
2519
		BUG_ON(page->active);
L
Linus Torvalds 已提交
2520
#endif
2521
		list_del(&page->lru);
2522 2523 2524 2525
		/*
		 * Safe to drop the lock. The slab is no longer linked
		 * to the cache.
		 */
2526 2527
		n->free_objects -= cache->num;
		spin_unlock_irq(&n->list_lock);
2528
		slab_destroy(cache, page);
2529
		nr_freed++;
L
Linus Torvalds 已提交
2530
	}
2531 2532
out:
	return nr_freed;
L
Linus Torvalds 已提交
2533 2534
}

2535
int __kmem_cache_shrink(struct kmem_cache *cachep)
2536 2537
{
	int ret = 0, i = 0;
2538
	struct kmem_cache_node *n;
2539 2540 2541 2542 2543

	drain_cpu_caches(cachep);

	check_irq_on();
	for_each_online_node(i) {
2544 2545
		n = cachep->node[i];
		if (!n)
2546 2547
			continue;

2548
		drain_freelist(cachep, n, slabs_tofree(cachep, n));
2549

2550 2551
		ret += !list_empty(&n->slabs_full) ||
			!list_empty(&n->slabs_partial);
2552 2553 2554 2555
	}
	return (ret ? 1 : 0);
}

2556
int __kmem_cache_shutdown(struct kmem_cache *cachep)
L
Linus Torvalds 已提交
2557
{
2558
	int i;
2559
	struct kmem_cache_node *n;
2560
	int rc = __kmem_cache_shrink(cachep);
L
Linus Torvalds 已提交
2561

2562 2563
	if (rc)
		return rc;
L
Linus Torvalds 已提交
2564

2565 2566
	for_each_online_cpu(i)
	    kfree(cachep->array[i]);
L
Linus Torvalds 已提交
2567

2568
	/* NUMA: free the node structures */
2569
	for_each_online_node(i) {
2570 2571 2572 2573 2574
		n = cachep->node[i];
		if (n) {
			kfree(n->shared);
			free_alien_cache(n->alien);
			kfree(n);
2575 2576 2577
		}
	}
	return 0;
L
Linus Torvalds 已提交
2578 2579
}

2580 2581
/*
 * Get the memory for a slab management obj.
2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592
 *
 * For a slab cache when the slab descriptor is off-slab, the
 * slab descriptor can't come from the same cache which is being created,
 * Because if it is the case, that means we defer the creation of
 * the kmalloc_{dma,}_cache of size sizeof(slab descriptor) to this point.
 * And we eventually call down to __kmem_cache_create(), which
 * in turn looks up in the kmalloc_{dma,}_caches for the disired-size one.
 * This is a "chicken-and-egg" problem.
 *
 * So the off-slab slab descriptor shall come from the kmalloc_{dma,}_caches,
 * which are all initialized during kmem_cache_init().
2593
 */
2594
static void *alloc_slabmgmt(struct kmem_cache *cachep,
2595 2596
				   struct page *page, int colour_off,
				   gfp_t local_flags, int nodeid)
L
Linus Torvalds 已提交
2597
{
2598
	void *freelist;
2599
	void *addr = page_address(page);
P
Pekka Enberg 已提交
2600

L
Linus Torvalds 已提交
2601 2602
	if (OFF_SLAB(cachep)) {
		/* Slab management obj is off-slab. */
2603
		freelist = kmem_cache_alloc_node(cachep->freelist_cache,
2604
					      local_flags, nodeid);
2605
		if (!freelist)
L
Linus Torvalds 已提交
2606 2607
			return NULL;
	} else {
2608 2609
		freelist = addr + colour_off;
		colour_off += cachep->freelist_size;
L
Linus Torvalds 已提交
2610
	}
2611 2612 2613
	page->active = 0;
	page->s_mem = addr + colour_off;
	return freelist;
L
Linus Torvalds 已提交
2614 2615
}

2616
static inline freelist_idx_t get_free_obj(struct page *page, unsigned int idx)
L
Linus Torvalds 已提交
2617
{
2618
	return ((freelist_idx_t *)page->freelist)[idx];
2619 2620 2621
}

static inline void set_free_obj(struct page *page,
2622
					unsigned int idx, freelist_idx_t val)
2623
{
2624
	((freelist_idx_t *)(page->freelist))[idx] = val;
L
Linus Torvalds 已提交
2625 2626
}

2627
static void cache_init_objs(struct kmem_cache *cachep,
2628
			    struct page *page)
L
Linus Torvalds 已提交
2629 2630 2631 2632
{
	int i;

	for (i = 0; i < cachep->num; i++) {
2633
		void *objp = index_to_obj(cachep, page, i);
L
Linus Torvalds 已提交
2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645
#if DEBUG
		/* need to poison the objs? */
		if (cachep->flags & SLAB_POISON)
			poison_obj(cachep, objp, POISON_FREE);
		if (cachep->flags & SLAB_STORE_USER)
			*dbg_userword(cachep, objp) = NULL;

		if (cachep->flags & SLAB_RED_ZONE) {
			*dbg_redzone1(cachep, objp) = RED_INACTIVE;
			*dbg_redzone2(cachep, objp) = RED_INACTIVE;
		}
		/*
A
Andrew Morton 已提交
2646 2647 2648
		 * Constructors are not allowed to allocate memory from the same
		 * cache which they are a constructor for.  Otherwise, deadlock.
		 * They must also be threaded.
L
Linus Torvalds 已提交
2649 2650
		 */
		if (cachep->ctor && !(cachep->flags & SLAB_POISON))
2651
			cachep->ctor(objp + obj_offset(cachep));
L
Linus Torvalds 已提交
2652 2653 2654 2655

		if (cachep->flags & SLAB_RED_ZONE) {
			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
				slab_error(cachep, "constructor overwrote the"
P
Pekka Enberg 已提交
2656
					   " end of an object");
L
Linus Torvalds 已提交
2657 2658
			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
				slab_error(cachep, "constructor overwrote the"
P
Pekka Enberg 已提交
2659
					   " start of an object");
L
Linus Torvalds 已提交
2660
		}
2661
		if ((cachep->size % PAGE_SIZE) == 0 &&
A
Andrew Morton 已提交
2662
			    OFF_SLAB(cachep) && cachep->flags & SLAB_POISON)
P
Pekka Enberg 已提交
2663
			kernel_map_pages(virt_to_page(objp),
2664
					 cachep->size / PAGE_SIZE, 0);
L
Linus Torvalds 已提交
2665 2666
#else
		if (cachep->ctor)
2667
			cachep->ctor(objp);
L
Linus Torvalds 已提交
2668
#endif
2669
		set_obj_status(page, i, OBJECT_FREE);
2670
		set_free_obj(page, i, i);
L
Linus Torvalds 已提交
2671 2672 2673
	}
}

2674
static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
L
Linus Torvalds 已提交
2675
{
2676 2677
	if (CONFIG_ZONE_DMA_FLAG) {
		if (flags & GFP_DMA)
2678
			BUG_ON(!(cachep->allocflags & GFP_DMA));
2679
		else
2680
			BUG_ON(cachep->allocflags & GFP_DMA);
2681
	}
L
Linus Torvalds 已提交
2682 2683
}

2684
static void *slab_get_obj(struct kmem_cache *cachep, struct page *page,
A
Andrew Morton 已提交
2685
				int nodeid)
2686
{
2687
	void *objp;
2688

2689
	objp = index_to_obj(cachep, page, get_free_obj(page, page->active));
2690
	page->active++;
2691
#if DEBUG
J
Joonsoo Kim 已提交
2692
	WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid);
2693 2694 2695 2696 2697
#endif

	return objp;
}

2698
static void slab_put_obj(struct kmem_cache *cachep, struct page *page,
A
Andrew Morton 已提交
2699
				void *objp, int nodeid)
2700
{
2701
	unsigned int objnr = obj_to_index(cachep, page, objp);
2702
#if DEBUG
J
Joonsoo Kim 已提交
2703
	unsigned int i;
2704

2705
	/* Verify that the slab belongs to the intended node */
J
Joonsoo Kim 已提交
2706
	WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid);
2707

2708
	/* Verify double free bug */
2709
	for (i = page->active; i < cachep->num; i++) {
2710
		if (get_free_obj(page, i) == objnr) {
2711 2712 2713 2714
			printk(KERN_ERR "slab: double free detected in cache "
					"'%s', objp %p\n", cachep->name, objp);
			BUG();
		}
2715 2716
	}
#endif
2717
	page->active--;
2718
	set_free_obj(page, page->active, objnr);
2719 2720
}

2721 2722 2723
/*
 * Map pages beginning at addr to the given cache and slab. This is required
 * for the slab allocator to be able to lookup the cache and slab of a
2724
 * virtual address for kfree, ksize, and slab debugging.
2725
 */
2726
static void slab_map_pages(struct kmem_cache *cache, struct page *page,
2727
			   void *freelist)
L
Linus Torvalds 已提交
2728
{
2729
	page->slab_cache = cache;
2730
	page->freelist = freelist;
L
Linus Torvalds 已提交
2731 2732 2733 2734 2735 2736
}

/*
 * Grow (by 1) the number of slabs within a cache.  This is called by
 * kmem_cache_alloc() when there are no active objs left in a cache.
 */
2737
static int cache_grow(struct kmem_cache *cachep,
2738
		gfp_t flags, int nodeid, struct page *page)
L
Linus Torvalds 已提交
2739
{
2740
	void *freelist;
P
Pekka Enberg 已提交
2741 2742
	size_t offset;
	gfp_t local_flags;
2743
	struct kmem_cache_node *n;
L
Linus Torvalds 已提交
2744

A
Andrew Morton 已提交
2745 2746 2747
	/*
	 * Be lazy and only check for valid flags here,  keeping it out of the
	 * critical path in kmem_cache_alloc().
L
Linus Torvalds 已提交
2748
	 */
C
Christoph Lameter 已提交
2749 2750
	BUG_ON(flags & GFP_SLAB_BUG_MASK);
	local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
L
Linus Torvalds 已提交
2751

2752
	/* Take the node list lock to change the colour_next on this node */
L
Linus Torvalds 已提交
2753
	check_irq_off();
2754 2755
	n = cachep->node[nodeid];
	spin_lock(&n->list_lock);
L
Linus Torvalds 已提交
2756 2757

	/* Get colour for the slab, and cal the next value. */
2758 2759 2760 2761 2762
	offset = n->colour_next;
	n->colour_next++;
	if (n->colour_next >= cachep->colour)
		n->colour_next = 0;
	spin_unlock(&n->list_lock);
L
Linus Torvalds 已提交
2763

2764
	offset *= cachep->colour_off;
L
Linus Torvalds 已提交
2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776

	if (local_flags & __GFP_WAIT)
		local_irq_enable();

	/*
	 * The test for missing atomic flag is performed here, rather than
	 * the more obvious place, simply to reduce the critical path length
	 * in kmem_cache_alloc(). If a caller is seriously mis-behaving they
	 * will eventually be caught here (where it matters).
	 */
	kmem_flagcheck(cachep, flags);

A
Andrew Morton 已提交
2777 2778 2779
	/*
	 * Get mem for the objs.  Attempt to allocate a physical page from
	 * 'nodeid'.
2780
	 */
2781 2782 2783
	if (!page)
		page = kmem_getpages(cachep, local_flags, nodeid);
	if (!page)
L
Linus Torvalds 已提交
2784 2785 2786
		goto failed;

	/* Get slab management. */
2787
	freelist = alloc_slabmgmt(cachep, page, offset,
C
Christoph Lameter 已提交
2788
			local_flags & ~GFP_CONSTRAINT_MASK, nodeid);
2789
	if (!freelist)
L
Linus Torvalds 已提交
2790 2791
		goto opps1;

2792
	slab_map_pages(cachep, page, freelist);
L
Linus Torvalds 已提交
2793

2794
	cache_init_objs(cachep, page);
L
Linus Torvalds 已提交
2795 2796 2797 2798

	if (local_flags & __GFP_WAIT)
		local_irq_disable();
	check_irq_off();
2799
	spin_lock(&n->list_lock);
L
Linus Torvalds 已提交
2800 2801

	/* Make slab active. */
2802
	list_add_tail(&page->lru, &(n->slabs_free));
L
Linus Torvalds 已提交
2803
	STATS_INC_GROWN(cachep);
2804 2805
	n->free_objects += cachep->num;
	spin_unlock(&n->list_lock);
L
Linus Torvalds 已提交
2806
	return 1;
A
Andrew Morton 已提交
2807
opps1:
2808
	kmem_freepages(cachep, page);
A
Andrew Morton 已提交
2809
failed:
L
Linus Torvalds 已提交
2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825
	if (local_flags & __GFP_WAIT)
		local_irq_disable();
	return 0;
}

#if DEBUG

/*
 * Perform extra freeing checks:
 * - detect bad pointers.
 * - POISON/RED_ZONE checking
 */
static void kfree_debugcheck(const void *objp)
{
	if (!virt_addr_valid(objp)) {
		printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n",
P
Pekka Enberg 已提交
2826 2827
		       (unsigned long)objp);
		BUG();
L
Linus Torvalds 已提交
2828 2829 2830
	}
}

2831 2832
static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
{
2833
	unsigned long long redzone1, redzone2;
2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848

	redzone1 = *dbg_redzone1(cache, obj);
	redzone2 = *dbg_redzone2(cache, obj);

	/*
	 * Redzone is ok.
	 */
	if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE)
		return;

	if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE)
		slab_error(cache, "double free detected");
	else
		slab_error(cache, "memory outside object was overwritten");

2849
	printk(KERN_ERR "%p: redzone 1:0x%llx, redzone 2:0x%llx.\n",
2850 2851 2852
			obj, redzone1, redzone2);
}

2853
static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
2854
				   unsigned long caller)
L
Linus Torvalds 已提交
2855 2856
{
	unsigned int objnr;
2857
	struct page *page;
L
Linus Torvalds 已提交
2858

2859 2860
	BUG_ON(virt_to_cache(objp) != cachep);

2861
	objp -= obj_offset(cachep);
L
Linus Torvalds 已提交
2862
	kfree_debugcheck(objp);
2863
	page = virt_to_head_page(objp);
L
Linus Torvalds 已提交
2864 2865

	if (cachep->flags & SLAB_RED_ZONE) {
2866
		verify_redzone_free(cachep, objp);
L
Linus Torvalds 已提交
2867 2868 2869 2870
		*dbg_redzone1(cachep, objp) = RED_INACTIVE;
		*dbg_redzone2(cachep, objp) = RED_INACTIVE;
	}
	if (cachep->flags & SLAB_STORE_USER)
2871
		*dbg_userword(cachep, objp) = (void *)caller;
L
Linus Torvalds 已提交
2872

2873
	objnr = obj_to_index(cachep, page, objp);
L
Linus Torvalds 已提交
2874 2875

	BUG_ON(objnr >= cachep->num);
2876
	BUG_ON(objp != index_to_obj(cachep, page, objnr));
L
Linus Torvalds 已提交
2877

2878
	set_obj_status(page, objnr, OBJECT_FREE);
L
Linus Torvalds 已提交
2879 2880
	if (cachep->flags & SLAB_POISON) {
#ifdef CONFIG_DEBUG_PAGEALLOC
2881
		if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
2882
			store_stackinfo(cachep, objp, caller);
P
Pekka Enberg 已提交
2883
			kernel_map_pages(virt_to_page(objp),
2884
					 cachep->size / PAGE_SIZE, 0);
L
Linus Torvalds 已提交
2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899
		} else {
			poison_obj(cachep, objp, POISON_FREE);
		}
#else
		poison_obj(cachep, objp, POISON_FREE);
#endif
	}
	return objp;
}

#else
#define kfree_debugcheck(x) do { } while(0)
#define cache_free_debugcheck(x,objp,z) (objp)
#endif

2900 2901
static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags,
							bool force_refill)
L
Linus Torvalds 已提交
2902 2903
{
	int batchcount;
2904
	struct kmem_cache_node *n;
L
Linus Torvalds 已提交
2905
	struct array_cache *ac;
P
Pekka Enberg 已提交
2906 2907
	int node;

L
Linus Torvalds 已提交
2908
	check_irq_off();
2909
	node = numa_mem_id();
2910 2911 2912
	if (unlikely(force_refill))
		goto force_grow;
retry:
2913
	ac = cpu_cache_get(cachep);
L
Linus Torvalds 已提交
2914 2915
	batchcount = ac->batchcount;
	if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
A
Andrew Morton 已提交
2916 2917 2918 2919
		/*
		 * If there was little recent activity on this cache, then
		 * perform only a partial refill.  Otherwise we could generate
		 * refill bouncing.
L
Linus Torvalds 已提交
2920 2921 2922
		 */
		batchcount = BATCHREFILL_LIMIT;
	}
2923
	n = cachep->node[node];
2924

2925 2926
	BUG_ON(ac->avail > 0 || !n);
	spin_lock(&n->list_lock);
L
Linus Torvalds 已提交
2927

2928
	/* See if we can refill from the shared array */
2929 2930
	if (n->shared && transfer_objects(ac, n->shared, batchcount)) {
		n->shared->touched = 1;
2931
		goto alloc_done;
2932
	}
2933

L
Linus Torvalds 已提交
2934 2935
	while (batchcount > 0) {
		struct list_head *entry;
2936
		struct page *page;
L
Linus Torvalds 已提交
2937
		/* Get slab alloc is to come from. */
2938 2939 2940 2941 2942
		entry = n->slabs_partial.next;
		if (entry == &n->slabs_partial) {
			n->free_touched = 1;
			entry = n->slabs_free.next;
			if (entry == &n->slabs_free)
L
Linus Torvalds 已提交
2943 2944 2945
				goto must_grow;
		}

2946
		page = list_entry(entry, struct page, lru);
L
Linus Torvalds 已提交
2947
		check_spinlock_acquired(cachep);
2948 2949 2950 2951 2952 2953

		/*
		 * The slab was either on partial or free list so
		 * there must be at least one object available for
		 * allocation.
		 */
2954
		BUG_ON(page->active >= cachep->num);
2955

2956
		while (page->active < cachep->num && batchcount--) {
L
Linus Torvalds 已提交
2957 2958 2959 2960
			STATS_INC_ALLOCED(cachep);
			STATS_INC_ACTIVE(cachep);
			STATS_SET_HIGH(cachep);

2961
			ac_put_obj(cachep, ac, slab_get_obj(cachep, page,
2962
									node));
L
Linus Torvalds 已提交
2963 2964 2965
		}

		/* move slabp to correct slabp list: */
2966 2967
		list_del(&page->lru);
		if (page->active == cachep->num)
2968
			list_add(&page->lru, &n->slabs_full);
L
Linus Torvalds 已提交
2969
		else
2970
			list_add(&page->lru, &n->slabs_partial);
L
Linus Torvalds 已提交
2971 2972
	}

A
Andrew Morton 已提交
2973
must_grow:
2974
	n->free_objects -= ac->avail;
A
Andrew Morton 已提交
2975
alloc_done:
2976
	spin_unlock(&n->list_lock);
L
Linus Torvalds 已提交
2977 2978 2979

	if (unlikely(!ac->avail)) {
		int x;
2980
force_grow:
2981
		x = cache_grow(cachep, flags | GFP_THISNODE, node, NULL);
2982

A
Andrew Morton 已提交
2983
		/* cache_grow can reenable interrupts, then ac could change. */
2984
		ac = cpu_cache_get(cachep);
2985
		node = numa_mem_id();
2986 2987 2988

		/* no objects in sight? abort */
		if (!x && (ac->avail == 0 || force_refill))
L
Linus Torvalds 已提交
2989 2990
			return NULL;

A
Andrew Morton 已提交
2991
		if (!ac->avail)		/* objects refilled by interrupt? */
L
Linus Torvalds 已提交
2992 2993 2994
			goto retry;
	}
	ac->touched = 1;
2995 2996

	return ac_get_obj(cachep, ac, flags, force_refill);
L
Linus Torvalds 已提交
2997 2998
}

A
Andrew Morton 已提交
2999 3000
static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
						gfp_t flags)
L
Linus Torvalds 已提交
3001 3002 3003 3004 3005 3006 3007 3008
{
	might_sleep_if(flags & __GFP_WAIT);
#if DEBUG
	kmem_flagcheck(cachep, flags);
#endif
}

#if DEBUG
A
Andrew Morton 已提交
3009
static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
3010
				gfp_t flags, void *objp, unsigned long caller)
L
Linus Torvalds 已提交
3011
{
3012 3013
	struct page *page;

P
Pekka Enberg 已提交
3014
	if (!objp)
L
Linus Torvalds 已提交
3015
		return objp;
P
Pekka Enberg 已提交
3016
	if (cachep->flags & SLAB_POISON) {
L
Linus Torvalds 已提交
3017
#ifdef CONFIG_DEBUG_PAGEALLOC
3018
		if ((cachep->size % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
P
Pekka Enberg 已提交
3019
			kernel_map_pages(virt_to_page(objp),
3020
					 cachep->size / PAGE_SIZE, 1);
L
Linus Torvalds 已提交
3021 3022 3023 3024 3025 3026 3027 3028
		else
			check_poison_obj(cachep, objp);
#else
		check_poison_obj(cachep, objp);
#endif
		poison_obj(cachep, objp, POISON_INUSE);
	}
	if (cachep->flags & SLAB_STORE_USER)
3029
		*dbg_userword(cachep, objp) = (void *)caller;
L
Linus Torvalds 已提交
3030 3031

	if (cachep->flags & SLAB_RED_ZONE) {
A
Andrew Morton 已提交
3032 3033 3034 3035
		if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
				*dbg_redzone2(cachep, objp) != RED_INACTIVE) {
			slab_error(cachep, "double free, or memory outside"
						" object was overwritten");
P
Pekka Enberg 已提交
3036
			printk(KERN_ERR
3037
				"%p: redzone 1:0x%llx, redzone 2:0x%llx\n",
A
Andrew Morton 已提交
3038 3039
				objp, *dbg_redzone1(cachep, objp),
				*dbg_redzone2(cachep, objp));
L
Linus Torvalds 已提交
3040 3041 3042 3043
		}
		*dbg_redzone1(cachep, objp) = RED_ACTIVE;
		*dbg_redzone2(cachep, objp) = RED_ACTIVE;
	}
3044 3045 3046

	page = virt_to_head_page(objp);
	set_obj_status(page, obj_to_index(cachep, page, objp), OBJECT_ACTIVE);
3047
	objp += obj_offset(cachep);
3048
	if (cachep->ctor && cachep->flags & SLAB_POISON)
3049
		cachep->ctor(objp);
T
Tetsuo Handa 已提交
3050 3051
	if (ARCH_SLAB_MINALIGN &&
	    ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) {
3052
		printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",
H
Hugh Dickins 已提交
3053
		       objp, (int)ARCH_SLAB_MINALIGN);
3054
	}
L
Linus Torvalds 已提交
3055 3056 3057 3058 3059 3060
	return objp;
}
#else
#define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
#endif

A
Akinobu Mita 已提交
3061
static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags)
3062
{
3063
	if (cachep == kmem_cache)
A
Akinobu Mita 已提交
3064
		return false;
3065

3066
	return should_failslab(cachep->object_size, flags, cachep->flags);
3067 3068
}

3069
static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
L
Linus Torvalds 已提交
3070
{
P
Pekka Enberg 已提交
3071
	void *objp;
L
Linus Torvalds 已提交
3072
	struct array_cache *ac;
3073
	bool force_refill = false;
L
Linus Torvalds 已提交
3074

3075
	check_irq_off();
3076

3077
	ac = cpu_cache_get(cachep);
L
Linus Torvalds 已提交
3078 3079
	if (likely(ac->avail)) {
		ac->touched = 1;
3080 3081
		objp = ac_get_obj(cachep, ac, flags, false);

3082
		/*
3083 3084
		 * Allow for the possibility all avail objects are not allowed
		 * by the current flags
3085
		 */
3086 3087 3088 3089 3090
		if (objp) {
			STATS_INC_ALLOCHIT(cachep);
			goto out;
		}
		force_refill = true;
L
Linus Torvalds 已提交
3091
	}
3092 3093 3094 3095 3096 3097 3098 3099 3100 3101

	STATS_INC_ALLOCMISS(cachep);
	objp = cache_alloc_refill(cachep, flags, force_refill);
	/*
	 * the 'ac' may be updated by cache_alloc_refill(),
	 * and kmemleak_erase() requires its correct value.
	 */
	ac = cpu_cache_get(cachep);

out:
3102 3103 3104 3105 3106
	/*
	 * To avoid a false negative, if an object that is in one of the
	 * per-CPU caches is leaked, we need to make sure kmemleak doesn't
	 * treat the array pointers as a reference to the object.
	 */
3107 3108
	if (objp)
		kmemleak_erase(&ac->entry[ac->avail]);
3109 3110 3111
	return objp;
}

3112
#ifdef CONFIG_NUMA
3113
/*
3114
 * Try allocating on another node if PF_SPREAD_SLAB is a mempolicy is set.
3115 3116 3117 3118 3119 3120 3121 3122
 *
 * If we are in_interrupt, then process context, including cpusets and
 * mempolicy, may not apply and should not be used for allocation policy.
 */
static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
{
	int nid_alloc, nid_here;

3123
	if (in_interrupt() || (flags & __GFP_THISNODE))
3124
		return NULL;
3125
	nid_alloc = nid_here = numa_mem_id();
3126
	if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
3127
		nid_alloc = cpuset_slab_spread_node();
3128
	else if (current->mempolicy)
3129
		nid_alloc = mempolicy_slab_node();
3130
	if (nid_alloc != nid_here)
3131
		return ____cache_alloc_node(cachep, flags, nid_alloc);
3132 3133 3134
	return NULL;
}

3135 3136
/*
 * Fallback function if there was no memory available and no objects on a
3137
 * certain node and fall back is permitted. First we scan all the
3138
 * available node for available objects. If that fails then we
3139 3140 3141
 * perform an allocation without specifying a node. This allows the page
 * allocator to do its reclaim / fallback magic. We then insert the
 * slab into the proper nodelist and then allocate from it.
3142
 */
3143
static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
3144
{
3145 3146
	struct zonelist *zonelist;
	gfp_t local_flags;
3147
	struct zoneref *z;
3148 3149
	struct zone *zone;
	enum zone_type high_zoneidx = gfp_zone(flags);
3150
	void *obj = NULL;
3151
	int nid;
3152
	unsigned int cpuset_mems_cookie;
3153 3154 3155 3156

	if (flags & __GFP_THISNODE)
		return NULL;

C
Christoph Lameter 已提交
3157
	local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
3158

3159
retry_cpuset:
3160
	cpuset_mems_cookie = read_mems_allowed_begin();
3161
	zonelist = node_zonelist(mempolicy_slab_node(), flags);
3162

3163 3164 3165 3166 3167
retry:
	/*
	 * Look through allowed nodes for objects available
	 * from existing per node queues.
	 */
3168 3169
	for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
		nid = zone_to_nid(zone);
3170

3171
		if (cpuset_zone_allowed_hardwall(zone, flags) &&
3172 3173
			cache->node[nid] &&
			cache->node[nid]->free_objects) {
3174 3175
				obj = ____cache_alloc_node(cache,
					flags | GFP_THISNODE, nid);
3176 3177 3178
				if (obj)
					break;
		}
3179 3180
	}

3181
	if (!obj) {
3182 3183 3184 3185 3186 3187
		/*
		 * This allocation will be performed within the constraints
		 * of the current cpuset / memory policy requirements.
		 * We may trigger various forms of reclaim on the allowed
		 * set and go into memory reserves if necessary.
		 */
3188 3189
		struct page *page;

3190 3191 3192
		if (local_flags & __GFP_WAIT)
			local_irq_enable();
		kmem_flagcheck(cache, flags);
3193
		page = kmem_getpages(cache, local_flags, numa_mem_id());
3194 3195
		if (local_flags & __GFP_WAIT)
			local_irq_disable();
3196
		if (page) {
3197 3198 3199
			/*
			 * Insert into the appropriate per node queues
			 */
3200 3201
			nid = page_to_nid(page);
			if (cache_grow(cache, flags, nid, page)) {
3202 3203 3204 3205 3206 3207 3208 3209 3210 3211
				obj = ____cache_alloc_node(cache,
					flags | GFP_THISNODE, nid);
				if (!obj)
					/*
					 * Another processor may allocate the
					 * objects in the slab since we are
					 * not holding any locks.
					 */
					goto retry;
			} else {
3212
				/* cache_grow already freed obj */
3213 3214 3215
				obj = NULL;
			}
		}
3216
	}
3217

3218
	if (unlikely(!obj && read_mems_allowed_retry(cpuset_mems_cookie)))
3219
		goto retry_cpuset;
3220 3221 3222
	return obj;
}

3223 3224
/*
 * A interface to enable slab creation on nodeid
L
Linus Torvalds 已提交
3225
 */
3226
static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
A
Andrew Morton 已提交
3227
				int nodeid)
3228 3229
{
	struct list_head *entry;
3230
	struct page *page;
3231
	struct kmem_cache_node *n;
P
Pekka Enberg 已提交
3232 3233 3234
	void *obj;
	int x;

3235
	VM_BUG_ON(nodeid > num_online_nodes());
3236 3237
	n = cachep->node[nodeid];
	BUG_ON(!n);
P
Pekka Enberg 已提交
3238

A
Andrew Morton 已提交
3239
retry:
3240
	check_irq_off();
3241 3242 3243 3244 3245 3246
	spin_lock(&n->list_lock);
	entry = n->slabs_partial.next;
	if (entry == &n->slabs_partial) {
		n->free_touched = 1;
		entry = n->slabs_free.next;
		if (entry == &n->slabs_free)
P
Pekka Enberg 已提交
3247 3248 3249
			goto must_grow;
	}

3250
	page = list_entry(entry, struct page, lru);
P
Pekka Enberg 已提交
3251 3252 3253 3254 3255 3256
	check_spinlock_acquired_node(cachep, nodeid);

	STATS_INC_NODEALLOCS(cachep);
	STATS_INC_ACTIVE(cachep);
	STATS_SET_HIGH(cachep);

3257
	BUG_ON(page->active == cachep->num);
P
Pekka Enberg 已提交
3258

3259
	obj = slab_get_obj(cachep, page, nodeid);
3260
	n->free_objects--;
P
Pekka Enberg 已提交
3261
	/* move slabp to correct slabp list: */
3262
	list_del(&page->lru);
P
Pekka Enberg 已提交
3263

3264 3265
	if (page->active == cachep->num)
		list_add(&page->lru, &n->slabs_full);
A
Andrew Morton 已提交
3266
	else
3267
		list_add(&page->lru, &n->slabs_partial);
3268

3269
	spin_unlock(&n->list_lock);
P
Pekka Enberg 已提交
3270
	goto done;
3271

A
Andrew Morton 已提交
3272
must_grow:
3273
	spin_unlock(&n->list_lock);
3274
	x = cache_grow(cachep, flags | GFP_THISNODE, nodeid, NULL);
3275 3276
	if (x)
		goto retry;
L
Linus Torvalds 已提交
3277

3278
	return fallback_alloc(cachep, flags);
3279

A
Andrew Morton 已提交
3280
done:
P
Pekka Enberg 已提交
3281
	return obj;
3282
}
3283 3284

static __always_inline void *
3285
slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3286
		   unsigned long caller)
3287 3288 3289
{
	unsigned long save_flags;
	void *ptr;
3290
	int slab_node = numa_mem_id();
3291

3292
	flags &= gfp_allowed_mask;
3293

3294 3295
	lockdep_trace_alloc(flags);

A
Akinobu Mita 已提交
3296
	if (slab_should_failslab(cachep, flags))
3297 3298
		return NULL;

3299 3300
	cachep = memcg_kmem_get_cache(cachep, flags);

3301 3302 3303
	cache_alloc_debugcheck_before(cachep, flags);
	local_irq_save(save_flags);

A
Andrew Morton 已提交
3304
	if (nodeid == NUMA_NO_NODE)
3305
		nodeid = slab_node;
3306

3307
	if (unlikely(!cachep->node[nodeid])) {
3308 3309 3310 3311 3312
		/* Node not bootstrapped yet */
		ptr = fallback_alloc(cachep, flags);
		goto out;
	}

3313
	if (nodeid == slab_node) {
3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328
		/*
		 * Use the locally cached objects if possible.
		 * However ____cache_alloc does not allow fallback
		 * to other nodes. It may fail while we still have
		 * objects on other nodes available.
		 */
		ptr = ____cache_alloc(cachep, flags);
		if (ptr)
			goto out;
	}
	/* ___cache_alloc_node can fall back to other nodes */
	ptr = ____cache_alloc_node(cachep, flags, nodeid);
  out:
	local_irq_restore(save_flags);
	ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
3329
	kmemleak_alloc_recursive(ptr, cachep->object_size, 1, cachep->flags,
3330
				 flags);
3331

3332
	if (likely(ptr)) {
3333
		kmemcheck_slab_alloc(cachep, flags, ptr, cachep->object_size);
3334 3335 3336
		if (unlikely(flags & __GFP_ZERO))
			memset(ptr, 0, cachep->object_size);
	}
3337

3338 3339 3340 3341 3342 3343 3344 3345
	return ptr;
}

static __always_inline void *
__do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
{
	void *objp;

3346
	if (current->mempolicy || unlikely(current->flags & PF_SPREAD_SLAB)) {
3347 3348 3349 3350 3351 3352 3353 3354 3355 3356
		objp = alternate_node_alloc(cache, flags);
		if (objp)
			goto out;
	}
	objp = ____cache_alloc(cache, flags);

	/*
	 * We may just have run out of memory on the local node.
	 * ____cache_alloc_node() knows how to locate memory on other nodes
	 */
3357 3358
	if (!objp)
		objp = ____cache_alloc_node(cache, flags, numa_mem_id());
3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373

  out:
	return objp;
}
#else

static __always_inline void *
__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
{
	return ____cache_alloc(cachep, flags);
}

#endif /* CONFIG_NUMA */

static __always_inline void *
3374
slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
3375 3376 3377 3378
{
	unsigned long save_flags;
	void *objp;

3379
	flags &= gfp_allowed_mask;
3380

3381 3382
	lockdep_trace_alloc(flags);

A
Akinobu Mita 已提交
3383
	if (slab_should_failslab(cachep, flags))
3384 3385
		return NULL;

3386 3387
	cachep = memcg_kmem_get_cache(cachep, flags);

3388 3389 3390 3391 3392
	cache_alloc_debugcheck_before(cachep, flags);
	local_irq_save(save_flags);
	objp = __do_cache_alloc(cachep, flags);
	local_irq_restore(save_flags);
	objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
3393
	kmemleak_alloc_recursive(objp, cachep->object_size, 1, cachep->flags,
3394
				 flags);
3395 3396
	prefetchw(objp);

3397
	if (likely(objp)) {
3398
		kmemcheck_slab_alloc(cachep, flags, objp, cachep->object_size);
3399 3400 3401
		if (unlikely(flags & __GFP_ZERO))
			memset(objp, 0, cachep->object_size);
	}
3402

3403 3404
	return objp;
}
3405 3406

/*
3407
 * Caller needs to acquire correct kmem_cache_node's list_lock
3408
 */
3409
static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
P
Pekka Enberg 已提交
3410
		       int node)
L
Linus Torvalds 已提交
3411 3412
{
	int i;
3413
	struct kmem_cache_node *n;
L
Linus Torvalds 已提交
3414 3415

	for (i = 0; i < nr_objects; i++) {
3416
		void *objp;
3417
		struct page *page;
L
Linus Torvalds 已提交
3418

3419 3420 3421
		clear_obj_pfmemalloc(&objpp[i]);
		objp = objpp[i];

3422
		page = virt_to_head_page(objp);
3423
		n = cachep->node[node];
3424
		list_del(&page->lru);
3425
		check_spinlock_acquired_node(cachep, node);
3426
		slab_put_obj(cachep, page, objp, node);
L
Linus Torvalds 已提交
3427
		STATS_DEC_ACTIVE(cachep);
3428
		n->free_objects++;
L
Linus Torvalds 已提交
3429 3430

		/* fixup slab chains */
3431
		if (page->active == 0) {
3432 3433
			if (n->free_objects > n->free_limit) {
				n->free_objects -= cachep->num;
3434 3435 3436 3437 3438 3439
				/* No need to drop any previously held
				 * lock here, even if we have a off-slab slab
				 * descriptor it is guaranteed to come from
				 * a different cache, refer to comments before
				 * alloc_slabmgmt.
				 */
3440
				slab_destroy(cachep, page);
L
Linus Torvalds 已提交
3441
			} else {
3442
				list_add(&page->lru, &n->slabs_free);
L
Linus Torvalds 已提交
3443 3444 3445 3446 3447 3448
			}
		} else {
			/* Unconditionally move a slab to the end of the
			 * partial list on free - maximum time for the
			 * other objects to be freed, too.
			 */
3449
			list_add_tail(&page->lru, &n->slabs_partial);
L
Linus Torvalds 已提交
3450 3451 3452 3453
		}
	}
}

3454
static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
L
Linus Torvalds 已提交
3455 3456
{
	int batchcount;
3457
	struct kmem_cache_node *n;
3458
	int node = numa_mem_id();
L
Linus Torvalds 已提交
3459 3460 3461 3462 3463 3464

	batchcount = ac->batchcount;
#if DEBUG
	BUG_ON(!batchcount || batchcount > ac->avail);
#endif
	check_irq_off();
3465 3466 3467 3468
	n = cachep->node[node];
	spin_lock(&n->list_lock);
	if (n->shared) {
		struct array_cache *shared_array = n->shared;
P
Pekka Enberg 已提交
3469
		int max = shared_array->limit - shared_array->avail;
L
Linus Torvalds 已提交
3470 3471 3472
		if (max) {
			if (batchcount > max)
				batchcount = max;
3473
			memcpy(&(shared_array->entry[shared_array->avail]),
P
Pekka Enberg 已提交
3474
			       ac->entry, sizeof(void *) * batchcount);
L
Linus Torvalds 已提交
3475 3476 3477 3478 3479
			shared_array->avail += batchcount;
			goto free_done;
		}
	}

3480
	free_block(cachep, ac->entry, batchcount, node);
A
Andrew Morton 已提交
3481
free_done:
L
Linus Torvalds 已提交
3482 3483 3484 3485 3486
#if STATS
	{
		int i = 0;
		struct list_head *p;

3487 3488
		p = n->slabs_free.next;
		while (p != &(n->slabs_free)) {
3489
			struct page *page;
L
Linus Torvalds 已提交
3490

3491 3492
			page = list_entry(p, struct page, lru);
			BUG_ON(page->active);
L
Linus Torvalds 已提交
3493 3494 3495 3496 3497 3498 3499

			i++;
			p = p->next;
		}
		STATS_SET_FREEABLE(cachep, i);
	}
#endif
3500
	spin_unlock(&n->list_lock);
L
Linus Torvalds 已提交
3501
	ac->avail -= batchcount;
A
Andrew Morton 已提交
3502
	memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
L
Linus Torvalds 已提交
3503 3504 3505
}

/*
A
Andrew Morton 已提交
3506 3507
 * Release an obj back to its cache. If the obj has a constructed state, it must
 * be in this state _before_ it is released.  Called with disabled ints.
L
Linus Torvalds 已提交
3508
 */
3509
static inline void __cache_free(struct kmem_cache *cachep, void *objp,
3510
				unsigned long caller)
L
Linus Torvalds 已提交
3511
{
3512
	struct array_cache *ac = cpu_cache_get(cachep);
L
Linus Torvalds 已提交
3513 3514

	check_irq_off();
3515
	kmemleak_free_recursive(objp, cachep->flags);
3516
	objp = cache_free_debugcheck(cachep, objp, caller);
L
Linus Torvalds 已提交
3517

3518
	kmemcheck_slab_free(cachep, objp, cachep->object_size);
P
Pekka Enberg 已提交
3519

3520 3521 3522 3523 3524 3525 3526
	/*
	 * Skip calling cache_free_alien() when the platform is not numa.
	 * This will avoid cache misses that happen while accessing slabp (which
	 * is per page memory  reference) to get nodeid. Instead use a global
	 * variable to skip the call, which is mostly likely to be present in
	 * the cache.
	 */
3527
	if (nr_online_nodes > 1 && cache_free_alien(cachep, objp))
3528 3529
		return;

L
Linus Torvalds 已提交
3530 3531 3532 3533 3534 3535
	if (likely(ac->avail < ac->limit)) {
		STATS_INC_FREEHIT(cachep);
	} else {
		STATS_INC_FREEMISS(cachep);
		cache_flusharray(cachep, ac);
	}
Z
Zhao Jin 已提交
3536

3537
	ac_put_obj(cachep, ac, objp);
L
Linus Torvalds 已提交
3538 3539 3540 3541 3542 3543 3544 3545 3546 3547
}

/**
 * kmem_cache_alloc - Allocate an object
 * @cachep: The cache to allocate from.
 * @flags: See kmalloc().
 *
 * Allocate an object from this cache.  The flags are only relevant
 * if the cache has no available objects.
 */
3548
void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
L
Linus Torvalds 已提交
3549
{
3550
	void *ret = slab_alloc(cachep, flags, _RET_IP_);
E
Eduard - Gabriel Munteanu 已提交
3551

3552
	trace_kmem_cache_alloc(_RET_IP_, ret,
3553
			       cachep->object_size, cachep->size, flags);
E
Eduard - Gabriel Munteanu 已提交
3554 3555

	return ret;
L
Linus Torvalds 已提交
3556 3557 3558
}
EXPORT_SYMBOL(kmem_cache_alloc);

3559
#ifdef CONFIG_TRACING
3560
void *
3561
kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
E
Eduard - Gabriel Munteanu 已提交
3562
{
3563 3564
	void *ret;

3565
	ret = slab_alloc(cachep, flags, _RET_IP_);
3566 3567

	trace_kmalloc(_RET_IP_, ret,
3568
		      size, cachep->size, flags);
3569
	return ret;
E
Eduard - Gabriel Munteanu 已提交
3570
}
3571
EXPORT_SYMBOL(kmem_cache_alloc_trace);
E
Eduard - Gabriel Munteanu 已提交
3572 3573
#endif

L
Linus Torvalds 已提交
3574
#ifdef CONFIG_NUMA
3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585
/**
 * kmem_cache_alloc_node - Allocate an object on the specified node
 * @cachep: The cache to allocate from.
 * @flags: See kmalloc().
 * @nodeid: node number of the target node.
 *
 * Identical to kmem_cache_alloc but it will allocate memory on the given
 * node, which can improve the performance for cpu bound structures.
 *
 * Fallback to other node is possible if __GFP_THISNODE is not set.
 */
3586 3587
void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
3588
	void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
E
Eduard - Gabriel Munteanu 已提交
3589

3590
	trace_kmem_cache_alloc_node(_RET_IP_, ret,
3591
				    cachep->object_size, cachep->size,
3592
				    flags, nodeid);
E
Eduard - Gabriel Munteanu 已提交
3593 3594

	return ret;
3595
}
L
Linus Torvalds 已提交
3596 3597
EXPORT_SYMBOL(kmem_cache_alloc_node);

3598
#ifdef CONFIG_TRACING
3599
void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
3600
				  gfp_t flags,
3601 3602
				  int nodeid,
				  size_t size)
E
Eduard - Gabriel Munteanu 已提交
3603
{
3604 3605
	void *ret;

3606
	ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
3607

3608
	trace_kmalloc_node(_RET_IP_, ret,
3609
			   size, cachep->size,
3610 3611
			   flags, nodeid);
	return ret;
E
Eduard - Gabriel Munteanu 已提交
3612
}
3613
EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
E
Eduard - Gabriel Munteanu 已提交
3614 3615
#endif

3616
static __always_inline void *
3617
__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
3618
{
3619
	struct kmem_cache *cachep;
3620

3621
	cachep = kmalloc_slab(size, flags);
3622 3623
	if (unlikely(ZERO_OR_NULL_PTR(cachep)))
		return cachep;
3624
	return kmem_cache_alloc_node_trace(cachep, flags, node, size);
3625
}
3626

3627
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
3628 3629
void *__kmalloc_node(size_t size, gfp_t flags, int node)
{
3630
	return __do_kmalloc_node(size, flags, node, _RET_IP_);
3631
}
3632
EXPORT_SYMBOL(__kmalloc_node);
3633 3634

void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
3635
		int node, unsigned long caller)
3636
{
3637
	return __do_kmalloc_node(size, flags, node, caller);
3638 3639 3640 3641 3642
}
EXPORT_SYMBOL(__kmalloc_node_track_caller);
#else
void *__kmalloc_node(size_t size, gfp_t flags, int node)
{
3643
	return __do_kmalloc_node(size, flags, node, 0);
3644 3645
}
EXPORT_SYMBOL(__kmalloc_node);
3646
#endif /* CONFIG_DEBUG_SLAB || CONFIG_TRACING */
3647
#endif /* CONFIG_NUMA */
L
Linus Torvalds 已提交
3648 3649

/**
3650
 * __do_kmalloc - allocate memory
L
Linus Torvalds 已提交
3651
 * @size: how many bytes of memory are required.
3652
 * @flags: the type of memory to allocate (see kmalloc).
3653
 * @caller: function caller for debug tracking of the caller
L
Linus Torvalds 已提交
3654
 */
3655
static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3656
					  unsigned long caller)
L
Linus Torvalds 已提交
3657
{
3658
	struct kmem_cache *cachep;
E
Eduard - Gabriel Munteanu 已提交
3659
	void *ret;
L
Linus Torvalds 已提交
3660

3661
	cachep = kmalloc_slab(size, flags);
3662 3663
	if (unlikely(ZERO_OR_NULL_PTR(cachep)))
		return cachep;
3664
	ret = slab_alloc(cachep, flags, caller);
E
Eduard - Gabriel Munteanu 已提交
3665

3666
	trace_kmalloc(caller, ret,
3667
		      size, cachep->size, flags);
E
Eduard - Gabriel Munteanu 已提交
3668 3669

	return ret;
3670 3671 3672
}


3673
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
3674 3675
void *__kmalloc(size_t size, gfp_t flags)
{
3676
	return __do_kmalloc(size, flags, _RET_IP_);
L
Linus Torvalds 已提交
3677 3678 3679
}
EXPORT_SYMBOL(__kmalloc);

3680
void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
3681
{
3682
	return __do_kmalloc(size, flags, caller);
3683 3684
}
EXPORT_SYMBOL(__kmalloc_track_caller);
3685 3686 3687 3688

#else
void *__kmalloc(size_t size, gfp_t flags)
{
3689
	return __do_kmalloc(size, flags, 0);
3690 3691
}
EXPORT_SYMBOL(__kmalloc);
3692 3693
#endif

L
Linus Torvalds 已提交
3694 3695 3696 3697 3698 3699 3700 3701
/**
 * kmem_cache_free - Deallocate an object
 * @cachep: The cache the allocation was from.
 * @objp: The previously allocated object.
 *
 * Free an object which was previously allocated from this
 * cache.
 */
3702
void kmem_cache_free(struct kmem_cache *cachep, void *objp)
L
Linus Torvalds 已提交
3703 3704
{
	unsigned long flags;
3705 3706 3707
	cachep = cache_from_obj(cachep, objp);
	if (!cachep)
		return;
L
Linus Torvalds 已提交
3708 3709

	local_irq_save(flags);
3710
	debug_check_no_locks_freed(objp, cachep->object_size);
3711
	if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
3712
		debug_check_no_obj_freed(objp, cachep->object_size);
3713
	__cache_free(cachep, objp, _RET_IP_);
L
Linus Torvalds 已提交
3714
	local_irq_restore(flags);
E
Eduard - Gabriel Munteanu 已提交
3715

3716
	trace_kmem_cache_free(_RET_IP_, objp);
L
Linus Torvalds 已提交
3717 3718 3719 3720 3721 3722 3723
}
EXPORT_SYMBOL(kmem_cache_free);

/**
 * kfree - free previously allocated memory
 * @objp: pointer returned by kmalloc.
 *
3724 3725
 * If @objp is NULL, no operation is performed.
 *
L
Linus Torvalds 已提交
3726 3727 3728 3729 3730
 * Don't free memory not originally allocated by kmalloc()
 * or you will run into trouble.
 */
void kfree(const void *objp)
{
3731
	struct kmem_cache *c;
L
Linus Torvalds 已提交
3732 3733
	unsigned long flags;

3734 3735
	trace_kfree(_RET_IP_, objp);

3736
	if (unlikely(ZERO_OR_NULL_PTR(objp)))
L
Linus Torvalds 已提交
3737 3738 3739
		return;
	local_irq_save(flags);
	kfree_debugcheck(objp);
3740
	c = virt_to_cache(objp);
3741 3742 3743
	debug_check_no_locks_freed(objp, c->object_size);

	debug_check_no_obj_freed(objp, c->object_size);
3744
	__cache_free(c, (void *)objp, _RET_IP_);
L
Linus Torvalds 已提交
3745 3746 3747 3748
	local_irq_restore(flags);
}
EXPORT_SYMBOL(kfree);

3749
/*
3750
 * This initializes kmem_cache_node or resizes various caches for all nodes.
3751
 */
3752
static int alloc_kmem_cache_node(struct kmem_cache *cachep, gfp_t gfp)
3753 3754
{
	int node;
3755
	struct kmem_cache_node *n;
3756
	struct array_cache *new_shared;
3757
	struct array_cache **new_alien = NULL;
3758

3759
	for_each_online_node(node) {
3760

3761
                if (use_alien_caches) {
3762
                        new_alien = alloc_alien_cache(node, cachep->limit, gfp);
3763 3764 3765
                        if (!new_alien)
                                goto fail;
                }
3766

3767 3768 3769
		new_shared = NULL;
		if (cachep->shared) {
			new_shared = alloc_arraycache(node,
3770
				cachep->shared*cachep->batchcount,
3771
					0xbaadf00d, gfp);
3772 3773 3774 3775
			if (!new_shared) {
				free_alien_cache(new_alien);
				goto fail;
			}
3776
		}
3777

3778 3779 3780
		n = cachep->node[node];
		if (n) {
			struct array_cache *shared = n->shared;
3781

3782
			spin_lock_irq(&n->list_lock);
3783

3784
			if (shared)
3785 3786
				free_block(cachep, shared->entry,
						shared->avail, node);
3787

3788 3789 3790
			n->shared = new_shared;
			if (!n->alien) {
				n->alien = new_alien;
3791 3792
				new_alien = NULL;
			}
3793
			n->free_limit = (1 + nr_cpus_node(node)) *
A
Andrew Morton 已提交
3794
					cachep->batchcount + cachep->num;
3795
			spin_unlock_irq(&n->list_lock);
3796
			kfree(shared);
3797 3798 3799
			free_alien_cache(new_alien);
			continue;
		}
3800 3801
		n = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node);
		if (!n) {
3802 3803
			free_alien_cache(new_alien);
			kfree(new_shared);
3804
			goto fail;
3805
		}
3806

3807
		kmem_cache_node_init(n);
3808 3809
		n->next_reap = jiffies + REAPTIMEOUT_NODE +
				((unsigned long)cachep) % REAPTIMEOUT_NODE;
3810 3811 3812
		n->shared = new_shared;
		n->alien = new_alien;
		n->free_limit = (1 + nr_cpus_node(node)) *
A
Andrew Morton 已提交
3813
					cachep->batchcount + cachep->num;
3814
		cachep->node[node] = n;
3815
	}
3816
	return 0;
3817

A
Andrew Morton 已提交
3818
fail:
3819
	if (!cachep->list.next) {
3820 3821 3822
		/* Cache is not active yet. Roll back what we did */
		node--;
		while (node >= 0) {
3823
			if (cachep->node[node]) {
3824
				n = cachep->node[node];
3825

3826 3827 3828
				kfree(n->shared);
				free_alien_cache(n->alien);
				kfree(n);
3829
				cachep->node[node] = NULL;
3830 3831 3832 3833
			}
			node--;
		}
	}
3834
	return -ENOMEM;
3835 3836
}

L
Linus Torvalds 已提交
3837
struct ccupdate_struct {
3838
	struct kmem_cache *cachep;
3839
	struct array_cache *new[0];
L
Linus Torvalds 已提交
3840 3841 3842 3843
};

static void do_ccupdate_local(void *info)
{
A
Andrew Morton 已提交
3844
	struct ccupdate_struct *new = info;
L
Linus Torvalds 已提交
3845 3846 3847
	struct array_cache *old;

	check_irq_off();
3848
	old = cpu_cache_get(new->cachep);
3849

L
Linus Torvalds 已提交
3850 3851 3852 3853
	new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()];
	new->new[smp_processor_id()] = old;
}

3854
/* Always called with the slab_mutex held */
G
Glauber Costa 已提交
3855
static int __do_tune_cpucache(struct kmem_cache *cachep, int limit,
3856
				int batchcount, int shared, gfp_t gfp)
L
Linus Torvalds 已提交
3857
{
3858
	struct ccupdate_struct *new;
3859
	int i;
L
Linus Torvalds 已提交
3860

3861 3862
	new = kzalloc(sizeof(*new) + nr_cpu_ids * sizeof(struct array_cache *),
		      gfp);
3863 3864 3865
	if (!new)
		return -ENOMEM;

3866
	for_each_online_cpu(i) {
3867
		new->new[i] = alloc_arraycache(cpu_to_mem(i), limit,
3868
						batchcount, gfp);
3869
		if (!new->new[i]) {
P
Pekka Enberg 已提交
3870
			for (i--; i >= 0; i--)
3871 3872
				kfree(new->new[i]);
			kfree(new);
3873
			return -ENOMEM;
L
Linus Torvalds 已提交
3874 3875
		}
	}
3876
	new->cachep = cachep;
L
Linus Torvalds 已提交
3877

3878
	on_each_cpu(do_ccupdate_local, (void *)new, 1);
3879

L
Linus Torvalds 已提交
3880 3881 3882
	check_irq_on();
	cachep->batchcount = batchcount;
	cachep->limit = limit;
3883
	cachep->shared = shared;
L
Linus Torvalds 已提交
3884

3885
	for_each_online_cpu(i) {
3886
		struct array_cache *ccold = new->new[i];
L
Linus Torvalds 已提交
3887 3888
		if (!ccold)
			continue;
3889
		spin_lock_irq(&cachep->node[cpu_to_mem(i)]->list_lock);
3890
		free_block(cachep, ccold->entry, ccold->avail, cpu_to_mem(i));
3891
		spin_unlock_irq(&cachep->node[cpu_to_mem(i)]->list_lock);
L
Linus Torvalds 已提交
3892 3893
		kfree(ccold);
	}
3894
	kfree(new);
3895
	return alloc_kmem_cache_node(cachep, gfp);
L
Linus Torvalds 已提交
3896 3897
}

G
Glauber Costa 已提交
3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912
static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
				int batchcount, int shared, gfp_t gfp)
{
	int ret;
	struct kmem_cache *c = NULL;
	int i = 0;

	ret = __do_tune_cpucache(cachep, limit, batchcount, shared, gfp);

	if (slab_state < FULL)
		return ret;

	if ((ret < 0) || !is_root_cache(cachep))
		return ret;

3913
	VM_BUG_ON(!mutex_is_locked(&slab_mutex));
G
Glauber Costa 已提交
3914
	for_each_memcg_cache_index(i) {
3915
		c = cache_from_memcg_idx(cachep, i);
G
Glauber Costa 已提交
3916 3917 3918 3919 3920 3921 3922 3923
		if (c)
			/* return value determined by the parent cache only */
			__do_tune_cpucache(c, limit, batchcount, shared, gfp);
	}

	return ret;
}

3924
/* Called with slab_mutex held always */
3925
static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
L
Linus Torvalds 已提交
3926 3927
{
	int err;
G
Glauber Costa 已提交
3928 3929 3930 3931 3932 3933 3934 3935 3936 3937
	int limit = 0;
	int shared = 0;
	int batchcount = 0;

	if (!is_root_cache(cachep)) {
		struct kmem_cache *root = memcg_root_cache(cachep);
		limit = root->limit;
		shared = root->shared;
		batchcount = root->batchcount;
	}
L
Linus Torvalds 已提交
3938

G
Glauber Costa 已提交
3939 3940
	if (limit && shared && batchcount)
		goto skip_setup;
A
Andrew Morton 已提交
3941 3942
	/*
	 * The head array serves three purposes:
L
Linus Torvalds 已提交
3943 3944
	 * - create a LIFO ordering, i.e. return objects that are cache-warm
	 * - reduce the number of spinlock operations.
A
Andrew Morton 已提交
3945
	 * - reduce the number of linked list operations on the slab and
L
Linus Torvalds 已提交
3946 3947 3948 3949
	 *   bufctl chains: array operations are cheaper.
	 * The numbers are guessed, we should auto-tune as described by
	 * Bonwick.
	 */
3950
	if (cachep->size > 131072)
L
Linus Torvalds 已提交
3951
		limit = 1;
3952
	else if (cachep->size > PAGE_SIZE)
L
Linus Torvalds 已提交
3953
		limit = 8;
3954
	else if (cachep->size > 1024)
L
Linus Torvalds 已提交
3955
		limit = 24;
3956
	else if (cachep->size > 256)
L
Linus Torvalds 已提交
3957 3958 3959 3960
		limit = 54;
	else
		limit = 120;

A
Andrew Morton 已提交
3961 3962
	/*
	 * CPU bound tasks (e.g. network routing) can exhibit cpu bound
L
Linus Torvalds 已提交
3963 3964 3965 3966 3967 3968 3969 3970
	 * allocation behaviour: Most allocs on one cpu, most free operations
	 * on another cpu. For these cases, an efficient object passing between
	 * cpus is necessary. This is provided by a shared array. The array
	 * replaces Bonwick's magazine layer.
	 * On uniprocessor, it's functionally equivalent (but less efficient)
	 * to a larger limit. Thus disabled by default.
	 */
	shared = 0;
3971
	if (cachep->size <= PAGE_SIZE && num_possible_cpus() > 1)
L
Linus Torvalds 已提交
3972 3973 3974
		shared = 8;

#if DEBUG
A
Andrew Morton 已提交
3975 3976 3977
	/*
	 * With debugging enabled, large batchcount lead to excessively long
	 * periods with disabled local interrupts. Limit the batchcount
L
Linus Torvalds 已提交
3978 3979 3980 3981
	 */
	if (limit > 32)
		limit = 32;
#endif
G
Glauber Costa 已提交
3982 3983 3984
	batchcount = (limit + 1) / 2;
skip_setup:
	err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
L
Linus Torvalds 已提交
3985 3986
	if (err)
		printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
P
Pekka Enberg 已提交
3987
		       cachep->name, -err);
3988
	return err;
L
Linus Torvalds 已提交
3989 3990
}

3991
/*
3992 3993
 * Drain an array if it contains any elements taking the node lock only if
 * necessary. Note that the node listlock also protects the array_cache
3994
 * if drain_array() is used on the shared array.
3995
 */
3996
static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
3997
			 struct array_cache *ac, int force, int node)
L
Linus Torvalds 已提交
3998 3999 4000
{
	int tofree;

4001 4002
	if (!ac || !ac->avail)
		return;
L
Linus Torvalds 已提交
4003 4004
	if (ac->touched && !force) {
		ac->touched = 0;
4005
	} else {
4006
		spin_lock_irq(&n->list_lock);
4007 4008 4009 4010 4011 4012 4013 4014 4015
		if (ac->avail) {
			tofree = force ? ac->avail : (ac->limit + 4) / 5;
			if (tofree > ac->avail)
				tofree = (ac->avail + 1) / 2;
			free_block(cachep, ac->entry, tofree, node);
			ac->avail -= tofree;
			memmove(ac->entry, &(ac->entry[tofree]),
				sizeof(void *) * ac->avail);
		}
4016
		spin_unlock_irq(&n->list_lock);
L
Linus Torvalds 已提交
4017 4018 4019 4020 4021
	}
}

/**
 * cache_reap - Reclaim memory from caches.
4022
 * @w: work descriptor
L
Linus Torvalds 已提交
4023 4024 4025 4026 4027 4028
 *
 * Called from workqueue/eventd every few seconds.
 * Purpose:
 * - clear the per-cpu caches for this CPU.
 * - return freeable pages to the main free memory pool.
 *
A
Andrew Morton 已提交
4029 4030
 * If we cannot acquire the cache chain mutex then just give up - we'll try
 * again on the next iteration.
L
Linus Torvalds 已提交
4031
 */
4032
static void cache_reap(struct work_struct *w)
L
Linus Torvalds 已提交
4033
{
4034
	struct kmem_cache *searchp;
4035
	struct kmem_cache_node *n;
4036
	int node = numa_mem_id();
4037
	struct delayed_work *work = to_delayed_work(w);
L
Linus Torvalds 已提交
4038

4039
	if (!mutex_trylock(&slab_mutex))
L
Linus Torvalds 已提交
4040
		/* Give up. Setup the next iteration. */
4041
		goto out;
L
Linus Torvalds 已提交
4042

4043
	list_for_each_entry(searchp, &slab_caches, list) {
L
Linus Torvalds 已提交
4044 4045
		check_irq_on();

4046
		/*
4047
		 * We only take the node lock if absolutely necessary and we
4048 4049 4050
		 * have established with reasonable certainty that
		 * we can do some work if the lock was obtained.
		 */
4051
		n = searchp->node[node];
4052

4053
		reap_alien(searchp, n);
L
Linus Torvalds 已提交
4054

4055
		drain_array(searchp, n, cpu_cache_get(searchp), 0, node);
L
Linus Torvalds 已提交
4056

4057 4058 4059 4060
		/*
		 * These are racy checks but it does not matter
		 * if we skip one check or scan twice.
		 */
4061
		if (time_after(n->next_reap, jiffies))
4062
			goto next;
L
Linus Torvalds 已提交
4063

4064
		n->next_reap = jiffies + REAPTIMEOUT_NODE;
L
Linus Torvalds 已提交
4065

4066
		drain_array(searchp, n, n->shared, 0, node);
L
Linus Torvalds 已提交
4067

4068 4069
		if (n->free_touched)
			n->free_touched = 0;
4070 4071
		else {
			int freed;
L
Linus Torvalds 已提交
4072

4073
			freed = drain_freelist(searchp, n, (n->free_limit +
4074 4075 4076
				5 * searchp->num - 1) / (5 * searchp->num));
			STATS_ADD_REAPED(searchp, freed);
		}
4077
next:
L
Linus Torvalds 已提交
4078 4079 4080
		cond_resched();
	}
	check_irq_on();
4081
	mutex_unlock(&slab_mutex);
4082
	next_reap_node();
4083
out:
A
Andrew Morton 已提交
4084
	/* Set up the next iteration */
4085
	schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_AC));
L
Linus Torvalds 已提交
4086 4087
}

4088
#ifdef CONFIG_SLABINFO
4089
void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
L
Linus Torvalds 已提交
4090
{
4091
	struct page *page;
P
Pekka Enberg 已提交
4092 4093 4094 4095
	unsigned long active_objs;
	unsigned long num_objs;
	unsigned long active_slabs = 0;
	unsigned long num_slabs, free_objects = 0, shared_avail = 0;
4096
	const char *name;
L
Linus Torvalds 已提交
4097
	char *error = NULL;
4098
	int node;
4099
	struct kmem_cache_node *n;
L
Linus Torvalds 已提交
4100 4101 4102

	active_objs = 0;
	num_slabs = 0;
4103
	for_each_online_node(node) {
4104 4105
		n = cachep->node[node];
		if (!n)
4106 4107
			continue;

4108
		check_irq_on();
4109
		spin_lock_irq(&n->list_lock);
4110

4111 4112
		list_for_each_entry(page, &n->slabs_full, lru) {
			if (page->active != cachep->num && !error)
4113 4114 4115 4116
				error = "slabs_full accounting error";
			active_objs += cachep->num;
			active_slabs++;
		}
4117 4118
		list_for_each_entry(page, &n->slabs_partial, lru) {
			if (page->active == cachep->num && !error)
4119
				error = "slabs_partial accounting error";
4120
			if (!page->active && !error)
4121
				error = "slabs_partial accounting error";
4122
			active_objs += page->active;
4123 4124
			active_slabs++;
		}
4125 4126
		list_for_each_entry(page, &n->slabs_free, lru) {
			if (page->active && !error)
4127
				error = "slabs_free accounting error";
4128 4129
			num_slabs++;
		}
4130 4131 4132
		free_objects += n->free_objects;
		if (n->shared)
			shared_avail += n->shared->avail;
4133

4134
		spin_unlock_irq(&n->list_lock);
L
Linus Torvalds 已提交
4135
	}
P
Pekka Enberg 已提交
4136 4137
	num_slabs += active_slabs;
	num_objs = num_slabs * cachep->num;
4138
	if (num_objs - active_objs != free_objects && !error)
L
Linus Torvalds 已提交
4139 4140
		error = "free_objects accounting error";

P
Pekka Enberg 已提交
4141
	name = cachep->name;
L
Linus Torvalds 已提交
4142 4143 4144
	if (error)
		printk(KERN_ERR "slab: cache %s error: %s\n", name, error);

4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158
	sinfo->active_objs = active_objs;
	sinfo->num_objs = num_objs;
	sinfo->active_slabs = active_slabs;
	sinfo->num_slabs = num_slabs;
	sinfo->shared_avail = shared_avail;
	sinfo->limit = cachep->limit;
	sinfo->batchcount = cachep->batchcount;
	sinfo->shared = cachep->shared;
	sinfo->objects_per_slab = cachep->num;
	sinfo->cache_order = cachep->gfporder;
}

void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
{
L
Linus Torvalds 已提交
4159
#if STATS
4160
	{			/* node stats */
L
Linus Torvalds 已提交
4161 4162 4163 4164 4165 4166 4167
		unsigned long high = cachep->high_mark;
		unsigned long allocs = cachep->num_allocations;
		unsigned long grown = cachep->grown;
		unsigned long reaped = cachep->reaped;
		unsigned long errors = cachep->errors;
		unsigned long max_freeable = cachep->max_freeable;
		unsigned long node_allocs = cachep->node_allocs;
4168
		unsigned long node_frees = cachep->node_frees;
4169
		unsigned long overflows = cachep->node_overflow;
L
Linus Torvalds 已提交
4170

J
Joe Perches 已提交
4171 4172 4173 4174 4175
		seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu "
			   "%4lu %4lu %4lu %4lu %4lu",
			   allocs, high, grown,
			   reaped, errors, max_freeable, node_allocs,
			   node_frees, overflows);
L
Linus Torvalds 已提交
4176 4177 4178 4179 4180 4181 4182 4183 4184
	}
	/* cpu stats */
	{
		unsigned long allochit = atomic_read(&cachep->allochit);
		unsigned long allocmiss = atomic_read(&cachep->allocmiss);
		unsigned long freehit = atomic_read(&cachep->freehit);
		unsigned long freemiss = atomic_read(&cachep->freemiss);

		seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
P
Pekka Enberg 已提交
4185
			   allochit, allocmiss, freehit, freemiss);
L
Linus Torvalds 已提交
4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197
	}
#endif
}

#define MAX_SLABINFO_WRITE 128
/**
 * slabinfo_write - Tuning for the slab allocator
 * @file: unused
 * @buffer: user buffer
 * @count: data length
 * @ppos: unused
 */
4198
ssize_t slabinfo_write(struct file *file, const char __user *buffer,
P
Pekka Enberg 已提交
4199
		       size_t count, loff_t *ppos)
L
Linus Torvalds 已提交
4200
{
P
Pekka Enberg 已提交
4201
	char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
L
Linus Torvalds 已提交
4202
	int limit, batchcount, shared, res;
4203
	struct kmem_cache *cachep;
P
Pekka Enberg 已提交
4204

L
Linus Torvalds 已提交
4205 4206 4207 4208
	if (count > MAX_SLABINFO_WRITE)
		return -EINVAL;
	if (copy_from_user(&kbuf, buffer, count))
		return -EFAULT;
P
Pekka Enberg 已提交
4209
	kbuf[MAX_SLABINFO_WRITE] = '\0';
L
Linus Torvalds 已提交
4210 4211 4212 4213 4214 4215 4216 4217 4218 4219

	tmp = strchr(kbuf, ' ');
	if (!tmp)
		return -EINVAL;
	*tmp = '\0';
	tmp++;
	if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3)
		return -EINVAL;

	/* Find the cache in the chain of caches. */
4220
	mutex_lock(&slab_mutex);
L
Linus Torvalds 已提交
4221
	res = -EINVAL;
4222
	list_for_each_entry(cachep, &slab_caches, list) {
L
Linus Torvalds 已提交
4223
		if (!strcmp(cachep->name, kbuf)) {
A
Andrew Morton 已提交
4224 4225
			if (limit < 1 || batchcount < 1 ||
					batchcount > limit || shared < 0) {
4226
				res = 0;
L
Linus Torvalds 已提交
4227
			} else {
4228
				res = do_tune_cpucache(cachep, limit,
4229 4230
						       batchcount, shared,
						       GFP_KERNEL);
L
Linus Torvalds 已提交
4231 4232 4233 4234
			}
			break;
		}
	}
4235
	mutex_unlock(&slab_mutex);
L
Linus Torvalds 已提交
4236 4237 4238 4239
	if (res >= 0)
		res = count;
	return res;
}
4240 4241 4242 4243 4244

#ifdef CONFIG_DEBUG_SLAB_LEAK

static void *leaks_start(struct seq_file *m, loff_t *pos)
{
4245 4246
	mutex_lock(&slab_mutex);
	return seq_list_start(&slab_caches, *pos);
4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278
}

static inline int add_caller(unsigned long *n, unsigned long v)
{
	unsigned long *p;
	int l;
	if (!v)
		return 1;
	l = n[1];
	p = n + 2;
	while (l) {
		int i = l/2;
		unsigned long *q = p + 2 * i;
		if (*q == v) {
			q[1]++;
			return 1;
		}
		if (*q > v) {
			l = i;
		} else {
			p = q + 2;
			l -= i + 1;
		}
	}
	if (++n[1] == n[0])
		return 0;
	memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n));
	p[0] = v;
	p[1] = 1;
	return 1;
}

4279 4280
static void handle_slab(unsigned long *n, struct kmem_cache *c,
						struct page *page)
4281 4282
{
	void *p;
4283
	int i;
4284

4285 4286
	if (n[0] == n[1])
		return;
4287
	for (i = 0, p = page->s_mem; i < c->num; i++, p += c->size) {
4288
		if (get_obj_status(page, i) != OBJECT_ACTIVE)
4289
			continue;
4290

4291 4292 4293 4294 4295 4296 4297 4298 4299
		if (!add_caller(n, (unsigned long)*dbg_userword(c, p)))
			return;
	}
}

static void show_symbol(struct seq_file *m, unsigned long address)
{
#ifdef CONFIG_KALLSYMS
	unsigned long offset, size;
4300
	char modname[MODULE_NAME_LEN], name[KSYM_NAME_LEN];
4301

4302
	if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) {
4303
		seq_printf(m, "%s+%#lx/%#lx", name, offset, size);
4304
		if (modname[0])
4305 4306 4307 4308 4309 4310 4311 4312 4313
			seq_printf(m, " [%s]", modname);
		return;
	}
#endif
	seq_printf(m, "%p", (void *)address);
}

static int leaks_show(struct seq_file *m, void *p)
{
4314
	struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list);
4315
	struct page *page;
4316
	struct kmem_cache_node *n;
4317
	const char *name;
4318
	unsigned long *x = m->private;
4319 4320 4321 4322 4323 4324 4325 4326 4327 4328
	int node;
	int i;

	if (!(cachep->flags & SLAB_STORE_USER))
		return 0;
	if (!(cachep->flags & SLAB_RED_ZONE))
		return 0;

	/* OK, we can do it */

4329
	x[1] = 0;
4330 4331

	for_each_online_node(node) {
4332 4333
		n = cachep->node[node];
		if (!n)
4334 4335 4336
			continue;

		check_irq_on();
4337
		spin_lock_irq(&n->list_lock);
4338

4339 4340 4341 4342
		list_for_each_entry(page, &n->slabs_full, lru)
			handle_slab(x, cachep, page);
		list_for_each_entry(page, &n->slabs_partial, lru)
			handle_slab(x, cachep, page);
4343
		spin_unlock_irq(&n->list_lock);
4344 4345
	}
	name = cachep->name;
4346
	if (x[0] == x[1]) {
4347
		/* Increase the buffer size */
4348
		mutex_unlock(&slab_mutex);
4349
		m->private = kzalloc(x[0] * 4 * sizeof(unsigned long), GFP_KERNEL);
4350 4351
		if (!m->private) {
			/* Too bad, we are really out */
4352
			m->private = x;
4353
			mutex_lock(&slab_mutex);
4354 4355
			return -ENOMEM;
		}
4356 4357
		*(unsigned long *)m->private = x[0] * 2;
		kfree(x);
4358
		mutex_lock(&slab_mutex);
4359 4360 4361 4362
		/* Now make sure this entry will be retried */
		m->count = m->size;
		return 0;
	}
4363 4364 4365
	for (i = 0; i < x[1]; i++) {
		seq_printf(m, "%s: %lu ", name, x[2*i+3]);
		show_symbol(m, x[2*i+2]);
4366 4367
		seq_putc(m, '\n');
	}
4368

4369 4370 4371
	return 0;
}

4372
static const struct seq_operations slabstats_op = {
4373
	.start = leaks_start,
4374 4375
	.next = slab_next,
	.stop = slab_stop,
4376 4377
	.show = leaks_show,
};
4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407

static int slabstats_open(struct inode *inode, struct file *file)
{
	unsigned long *n = kzalloc(PAGE_SIZE, GFP_KERNEL);
	int ret = -ENOMEM;
	if (n) {
		ret = seq_open(file, &slabstats_op);
		if (!ret) {
			struct seq_file *m = file->private_data;
			*n = PAGE_SIZE / (2 * sizeof(unsigned long));
			m->private = n;
			n = NULL;
		}
		kfree(n);
	}
	return ret;
}

static const struct file_operations proc_slabstats_operations = {
	.open		= slabstats_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= seq_release_private,
};
#endif

static int __init slab_proc_init(void)
{
#ifdef CONFIG_DEBUG_SLAB_LEAK
	proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
4408
#endif
4409 4410 4411
	return 0;
}
module_init(slab_proc_init);
L
Linus Torvalds 已提交
4412 4413
#endif

4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425
/**
 * ksize - get the actual amount of memory allocated for a given object
 * @objp: Pointer to the object
 *
 * kmalloc may internally round up allocations and return more memory
 * than requested. ksize() can be used to determine the actual amount of
 * memory allocated. The caller may use this additional memory, even though
 * a smaller amount of memory was initially specified with the kmalloc call.
 * The caller must guarantee that objp points to a valid object previously
 * allocated with either kmalloc() or kmem_cache_alloc(). The object
 * must not be freed during the duration of the call.
 */
P
Pekka Enberg 已提交
4426
size_t ksize(const void *objp)
L
Linus Torvalds 已提交
4427
{
4428 4429
	BUG_ON(!objp);
	if (unlikely(objp == ZERO_SIZE_PTR))
4430
		return 0;
L
Linus Torvalds 已提交
4431

4432
	return virt_to_cache(objp)->object_size;
L
Linus Torvalds 已提交
4433
}
K
Kirill A. Shutemov 已提交
4434
EXPORT_SYMBOL(ksize);