slab.c 113.1 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * linux/mm/slab.c
 * Written by Mark Hemment, 1996/97.
 * (markhe@nextd.demon.co.uk)
 *
 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
 *
 * Major cleanup, different bufctl logic, per-cpu arrays
 *	(c) 2000 Manfred Spraul
 *
 * Cleanup, make the head arrays unconditional, preparation for NUMA
 * 	(c) 2002 Manfred Spraul
 *
 * An implementation of the Slab Allocator as described in outline in;
 *	UNIX Internals: The New Frontiers by Uresh Vahalia
 *	Pub: Prentice Hall	ISBN 0-13-101908-2
 * or with a little more detail in;
 *	The Slab Allocator: An Object-Caching Kernel Memory Allocator
 *	Jeff Bonwick (Sun Microsystems).
 *	Presented at: USENIX Summer 1994 Technical Conference
 *
 * The memory is organized in caches, one cache for each object type.
 * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
 * Each cache consists out of many slabs (they are small (usually one
 * page long) and always contiguous), and each slab contains multiple
 * initialized objects.
 *
 * This means, that your constructor is used only for newly allocated
S
Simon Arlott 已提交
29
 * slabs and you must pass objects with the same initializations to
L
Linus Torvalds 已提交
30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52
 * kmem_cache_free.
 *
 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
 * normal). If you need a special memory type, then must create a new
 * cache for that memory type.
 *
 * In order to reduce fragmentation, the slabs are sorted in 3 groups:
 *   full slabs with 0 free objects
 *   partial slabs
 *   empty slabs with no allocated objects
 *
 * If partial slabs exist, then new allocations come from these slabs,
 * otherwise from empty slabs or new slabs are allocated.
 *
 * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
 * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
 *
 * Each cache has a short per-cpu head array, most allocs
 * and frees go into that array, and if that array overflows, then 1/2
 * of the entries in the array are given back into the global cache.
 * The head array is strictly LIFO and should improve the cache hit rates.
 * On SMP, it additionally reduces the spinlock operations.
 *
A
Andrew Morton 已提交
53
 * The c_cpuarray may not be read with enabled local interrupts -
L
Linus Torvalds 已提交
54 55 56 57
 * it's changed with a smp_call_function().
 *
 * SMP synchronization:
 *  constructors and destructors are called without any locking.
58
 *  Several members in struct kmem_cache and struct slab never change, they
L
Linus Torvalds 已提交
59 60 61 62 63 64 65 66 67 68 69 70
 *	are accessed without any locking.
 *  The per-cpu arrays are never accessed from the wrong cpu, no locking,
 *  	and local interrupts are disabled so slab code is preempt-safe.
 *  The non-constant members are protected with a per-cache irq spinlock.
 *
 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
 * in 2000 - many ideas in the current implementation are derived from
 * his patch.
 *
 * Further notes from the original documentation:
 *
 * 11 April '97.  Started multi-threading - markhe
71
 *	The global cache-chain is protected by the mutex 'slab_mutex'.
L
Linus Torvalds 已提交
72 73 74 75 76 77
 *	The sem is only needed when accessing/extending the cache-chain, which
 *	can never happen inside an interrupt (kmem_cache_create(),
 *	kmem_cache_shrink() and kmem_cache_reap()).
 *
 *	At present, each engine can be growing a cache.  This should be blocked.
 *
78 79 80 81 82 83 84 85 86
 * 15 March 2005. NUMA slab allocator.
 *	Shai Fultheim <shai@scalex86.org>.
 *	Shobhit Dayal <shobhit@calsoftinc.com>
 *	Alok N Kataria <alokk@calsoftinc.com>
 *	Christoph Lameter <christoph@lameter.com>
 *
 *	Modified the slab allocator to be node aware on NUMA systems.
 *	Each node has its own list of partial, free and full slabs.
 *	All object allocations for a node occur from node specific slab lists.
L
Linus Torvalds 已提交
87 88 89 90
 */

#include	<linux/slab.h>
#include	<linux/mm.h>
91
#include	<linux/poison.h>
L
Linus Torvalds 已提交
92 93 94 95 96
#include	<linux/swap.h>
#include	<linux/cache.h>
#include	<linux/interrupt.h>
#include	<linux/init.h>
#include	<linux/compiler.h>
97
#include	<linux/cpuset.h>
98
#include	<linux/proc_fs.h>
L
Linus Torvalds 已提交
99 100 101 102 103 104 105
#include	<linux/seq_file.h>
#include	<linux/notifier.h>
#include	<linux/kallsyms.h>
#include	<linux/cpu.h>
#include	<linux/sysctl.h>
#include	<linux/module.h>
#include	<linux/rcupdate.h>
106
#include	<linux/string.h>
107
#include	<linux/uaccess.h>
108
#include	<linux/nodemask.h>
109
#include	<linux/kmemleak.h>
110
#include	<linux/mempolicy.h>
I
Ingo Molnar 已提交
111
#include	<linux/mutex.h>
112
#include	<linux/fault-inject.h>
I
Ingo Molnar 已提交
113
#include	<linux/rtmutex.h>
114
#include	<linux/reciprocal_div.h>
115
#include	<linux/debugobjects.h>
P
Pekka Enberg 已提交
116
#include	<linux/kmemcheck.h>
117
#include	<linux/memory.h>
118
#include	<linux/prefetch.h>
L
Linus Torvalds 已提交
119

120 121
#include	<net/sock.h>

L
Linus Torvalds 已提交
122 123 124 125
#include	<asm/cacheflush.h>
#include	<asm/tlbflush.h>
#include	<asm/page.h>

126 127
#include <trace/events/kmem.h>

128 129
#include	"internal.h"

130 131
#include	"slab.h"

L
Linus Torvalds 已提交
132
/*
133
 * DEBUG	- 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
L
Linus Torvalds 已提交
134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153
 *		  0 for faster, smaller code (especially in the critical paths).
 *
 * STATS	- 1 to collect stats for /proc/slabinfo.
 *		  0 for faster, smaller code (especially in the critical paths).
 *
 * FORCED_DEBUG	- 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
 */

#ifdef CONFIG_DEBUG_SLAB
#define	DEBUG		1
#define	STATS		1
#define	FORCED_DEBUG	1
#else
#define	DEBUG		0
#define	STATS		0
#define	FORCED_DEBUG	0
#endif

/* Shouldn't this be in a header file somewhere? */
#define	BYTES_PER_WORD		sizeof(void *)
D
David Woodhouse 已提交
154
#define	REDZONE_ALIGN		max(BYTES_PER_WORD, __alignof__(unsigned long long))
L
Linus Torvalds 已提交
155 156 157 158 159

#ifndef ARCH_KMALLOC_FLAGS
#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
#endif

160 161 162 163 164 165
/*
 * true if a page was allocated from pfmemalloc reserves for network-based
 * swap
 */
static bool pfmemalloc_active __read_mostly;

L
Linus Torvalds 已提交
166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184
/*
 * kmem_bufctl_t:
 *
 * Bufctl's are used for linking objs within a slab
 * linked offsets.
 *
 * This implementation relies on "struct page" for locating the cache &
 * slab an object belongs to.
 * This allows the bufctl structure to be small (one int), but limits
 * the number of objects a slab (not a cache) can contain when off-slab
 * bufctls are used. The limit is the size of the largest general cache
 * that does not use off-slab slabs.
 * For 32bit archs with 4 kB pages, is this 56.
 * This is not serious, as it is only for large objects, when it is unwise
 * to have too many per slab.
 * Note: This limit can be raised by introducing a general cache whose size
 * is less than 512 (PAGE_SIZE<<3), but greater than 256.
 */

185
typedef unsigned int kmem_bufctl_t;
L
Linus Torvalds 已提交
186 187
#define BUFCTL_END	(((kmem_bufctl_t)(~0U))-0)
#define BUFCTL_FREE	(((kmem_bufctl_t)(~0U))-1)
188 189
#define	BUFCTL_ACTIVE	(((kmem_bufctl_t)(~0U))-2)
#define	SLAB_LIMIT	(((kmem_bufctl_t)(~0U))-3)
L
Linus Torvalds 已提交
190

191 192 193 194 195 196 197 198
/*
 * struct slab
 *
 * Manages the objs in a slab. Placed either at the beginning of mem allocated
 * for a slab, or allocated from an general cache.
 * Slabs are chained into three list: fully used, partial, fully free slabs.
 */
struct slab {
199 200 201 202 203
	struct {
		struct list_head list;
		void *s_mem;		/* including colour offset */
		unsigned int inuse;	/* num of objs active in slab */
		kmem_bufctl_t free;
204 205 206
	};
};

L
Linus Torvalds 已提交
207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223
/*
 * struct array_cache
 *
 * Purpose:
 * - LIFO ordering, to hand out cache-warm objects from _alloc
 * - reduce the number of linked list operations
 * - reduce spinlock operations
 *
 * The limit is stored in the per-cpu structure to reduce the data cache
 * footprint.
 *
 */
struct array_cache {
	unsigned int avail;
	unsigned int limit;
	unsigned int batchcount;
	unsigned int touched;
224
	spinlock_t lock;
225
	void *entry[];	/*
A
Andrew Morton 已提交
226 227 228
			 * Must have this definition in here for the proper
			 * alignment of array_cache. Also simplifies accessing
			 * the entries.
229 230 231 232
			 *
			 * Entries should not be directly dereferenced as
			 * entries belonging to slabs marked pfmemalloc will
			 * have the lower bits set SLAB_OBJ_PFMEMALLOC
A
Andrew Morton 已提交
233
			 */
L
Linus Torvalds 已提交
234 235
};

236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252
#define SLAB_OBJ_PFMEMALLOC	1
static inline bool is_obj_pfmemalloc(void *objp)
{
	return (unsigned long)objp & SLAB_OBJ_PFMEMALLOC;
}

static inline void set_obj_pfmemalloc(void **objp)
{
	*objp = (void *)((unsigned long)*objp | SLAB_OBJ_PFMEMALLOC);
	return;
}

static inline void clear_obj_pfmemalloc(void **objp)
{
	*objp = (void *)((unsigned long)*objp & ~SLAB_OBJ_PFMEMALLOC);
}

A
Andrew Morton 已提交
253 254 255
/*
 * bootstrap: The caches do not work without cpuarrays anymore, but the
 * cpuarrays are allocated from the generic caches...
L
Linus Torvalds 已提交
256 257 258 259
 */
#define BOOT_CPUCACHE_ENTRIES	1
struct arraycache_init {
	struct array_cache cache;
P
Pekka Enberg 已提交
260
	void *entries[BOOT_CPUCACHE_ENTRIES];
L
Linus Torvalds 已提交
261 262
};

263 264 265
/*
 * Need this for bootstrapping a per node allocator.
 */
266
#define NUM_INIT_LISTS (3 * MAX_NUMNODES)
267
static struct kmem_cache_node __initdata init_kmem_cache_node[NUM_INIT_LISTS];
268
#define	CACHE_CACHE 0
269
#define	SIZE_AC MAX_NUMNODES
270
#define	SIZE_NODE (2 * MAX_NUMNODES)
271

272
static int drain_freelist(struct kmem_cache *cache,
273
			struct kmem_cache_node *n, int tofree);
274 275
static void free_block(struct kmem_cache *cachep, void **objpp, int len,
			int node);
276
static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
277
static void cache_reap(struct work_struct *unused);
278

279 280
static int slab_early_init = 1;

281
#define INDEX_AC kmalloc_index(sizeof(struct arraycache_init))
282
#define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node))
L
Linus Torvalds 已提交
283

284
static void kmem_cache_node_init(struct kmem_cache_node *parent)
285 286 287 288 289 290
{
	INIT_LIST_HEAD(&parent->slabs_full);
	INIT_LIST_HEAD(&parent->slabs_partial);
	INIT_LIST_HEAD(&parent->slabs_free);
	parent->shared = NULL;
	parent->alien = NULL;
291
	parent->colour_next = 0;
292 293 294 295 296
	spin_lock_init(&parent->list_lock);
	parent->free_objects = 0;
	parent->free_touched = 0;
}

A
Andrew Morton 已提交
297 298 299
#define MAKE_LIST(cachep, listp, slab, nodeid)				\
	do {								\
		INIT_LIST_HEAD(listp);					\
300
		list_splice(&(cachep->node[nodeid]->slab), listp);	\
301 302
	} while (0)

A
Andrew Morton 已提交
303 304
#define	MAKE_ALL_LISTS(cachep, ptr, nodeid)				\
	do {								\
305 306 307 308
	MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid);	\
	MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
	MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid);	\
	} while (0)
L
Linus Torvalds 已提交
309 310 311 312 313

#define CFLGS_OFF_SLAB		(0x80000000UL)
#define	OFF_SLAB(x)	((x)->flags & CFLGS_OFF_SLAB)

#define BATCHREFILL_LIMIT	16
A
Andrew Morton 已提交
314 315 316
/*
 * Optimization question: fewer reaps means less probability for unnessary
 * cpucache drain/refill cycles.
L
Linus Torvalds 已提交
317
 *
A
Adrian Bunk 已提交
318
 * OTOH the cpuarrays can contain lots of objects,
L
Linus Torvalds 已提交
319 320 321 322 323 324 325 326 327 328
 * which could lock up otherwise freeable slabs.
 */
#define REAPTIMEOUT_CPUC	(2*HZ)
#define REAPTIMEOUT_LIST3	(4*HZ)

#if STATS
#define	STATS_INC_ACTIVE(x)	((x)->num_active++)
#define	STATS_DEC_ACTIVE(x)	((x)->num_active--)
#define	STATS_INC_ALLOCED(x)	((x)->num_allocations++)
#define	STATS_INC_GROWN(x)	((x)->grown++)
329
#define	STATS_ADD_REAPED(x,y)	((x)->reaped += (y))
A
Andrew Morton 已提交
330 331 332 333 334
#define	STATS_SET_HIGH(x)						\
	do {								\
		if ((x)->num_active > (x)->high_mark)			\
			(x)->high_mark = (x)->num_active;		\
	} while (0)
L
Linus Torvalds 已提交
335 336
#define	STATS_INC_ERR(x)	((x)->errors++)
#define	STATS_INC_NODEALLOCS(x)	((x)->node_allocs++)
337
#define	STATS_INC_NODEFREES(x)	((x)->node_frees++)
338
#define STATS_INC_ACOVERFLOW(x)   ((x)->node_overflow++)
A
Andrew Morton 已提交
339 340 341 342 343
#define	STATS_SET_FREEABLE(x, i)					\
	do {								\
		if ((x)->max_freeable < i)				\
			(x)->max_freeable = i;				\
	} while (0)
L
Linus Torvalds 已提交
344 345 346 347 348 349 350 351 352
#define STATS_INC_ALLOCHIT(x)	atomic_inc(&(x)->allochit)
#define STATS_INC_ALLOCMISS(x)	atomic_inc(&(x)->allocmiss)
#define STATS_INC_FREEHIT(x)	atomic_inc(&(x)->freehit)
#define STATS_INC_FREEMISS(x)	atomic_inc(&(x)->freemiss)
#else
#define	STATS_INC_ACTIVE(x)	do { } while (0)
#define	STATS_DEC_ACTIVE(x)	do { } while (0)
#define	STATS_INC_ALLOCED(x)	do { } while (0)
#define	STATS_INC_GROWN(x)	do { } while (0)
353
#define	STATS_ADD_REAPED(x,y)	do { (void)(y); } while (0)
L
Linus Torvalds 已提交
354 355 356
#define	STATS_SET_HIGH(x)	do { } while (0)
#define	STATS_INC_ERR(x)	do { } while (0)
#define	STATS_INC_NODEALLOCS(x)	do { } while (0)
357
#define	STATS_INC_NODEFREES(x)	do { } while (0)
358
#define STATS_INC_ACOVERFLOW(x)   do { } while (0)
A
Andrew Morton 已提交
359
#define	STATS_SET_FREEABLE(x, i) do { } while (0)
L
Linus Torvalds 已提交
360 361 362 363 364 365 366 367
#define STATS_INC_ALLOCHIT(x)	do { } while (0)
#define STATS_INC_ALLOCMISS(x)	do { } while (0)
#define STATS_INC_FREEHIT(x)	do { } while (0)
#define STATS_INC_FREEMISS(x)	do { } while (0)
#endif

#if DEBUG

A
Andrew Morton 已提交
368 369
/*
 * memory layout of objects:
L
Linus Torvalds 已提交
370
 * 0		: objp
371
 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
L
Linus Torvalds 已提交
372 373
 * 		the end of an object is aligned with the end of the real
 * 		allocation. Catches writes behind the end of the allocation.
374
 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
L
Linus Torvalds 已提交
375
 * 		redzone word.
376
 * cachep->obj_offset: The real object.
377 378
 * cachep->size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
 * cachep->size - 1* BYTES_PER_WORD: last caller address
A
Andrew Morton 已提交
379
 *					[BYTES_PER_WORD long]
L
Linus Torvalds 已提交
380
 */
381
static int obj_offset(struct kmem_cache *cachep)
L
Linus Torvalds 已提交
382
{
383
	return cachep->obj_offset;
L
Linus Torvalds 已提交
384 385
}

386
static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
L
Linus Torvalds 已提交
387 388
{
	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
389 390
	return (unsigned long long*) (objp + obj_offset(cachep) -
				      sizeof(unsigned long long));
L
Linus Torvalds 已提交
391 392
}

393
static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
L
Linus Torvalds 已提交
394 395 396
{
	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
	if (cachep->flags & SLAB_STORE_USER)
397
		return (unsigned long long *)(objp + cachep->size -
398
					      sizeof(unsigned long long) -
D
David Woodhouse 已提交
399
					      REDZONE_ALIGN);
400
	return (unsigned long long *) (objp + cachep->size -
401
				       sizeof(unsigned long long));
L
Linus Torvalds 已提交
402 403
}

404
static void **dbg_userword(struct kmem_cache *cachep, void *objp)
L
Linus Torvalds 已提交
405 406
{
	BUG_ON(!(cachep->flags & SLAB_STORE_USER));
407
	return (void **)(objp + cachep->size - BYTES_PER_WORD);
L
Linus Torvalds 已提交
408 409 410 411
}

#else

412
#define obj_offset(x)			0
413 414
#define dbg_redzone1(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
#define dbg_redzone2(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
L
Linus Torvalds 已提交
415 416 417 418 419
#define dbg_userword(cachep, objp)	({BUG(); (void **)NULL;})

#endif

/*
420 421
 * Do not go above this order unless 0 objects fit into the slab or
 * overridden on the command line.
L
Linus Torvalds 已提交
422
 */
423 424 425
#define	SLAB_MAX_ORDER_HI	1
#define	SLAB_MAX_ORDER_LO	0
static int slab_max_order = SLAB_MAX_ORDER_LO;
426
static bool slab_max_order_set __initdata;
L
Linus Torvalds 已提交
427

428 429
static inline struct kmem_cache *virt_to_cache(const void *obj)
{
430
	struct page *page = virt_to_head_page(obj);
C
Christoph Lameter 已提交
431
	return page->slab_cache;
432 433 434 435
}

static inline struct slab *virt_to_slab(const void *obj)
{
436
	struct page *page = virt_to_head_page(obj);
C
Christoph Lameter 已提交
437 438 439

	VM_BUG_ON(!PageSlab(page));
	return page->slab_page;
440 441
}

442 443 444
static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
				 unsigned int idx)
{
445
	return slab->s_mem + cache->size * idx;
446 447
}

448
/*
449 450 451
 * We want to avoid an expensive divide : (offset / cache->size)
 *   Using the fact that size is a constant for a particular cache,
 *   we can replace (offset / cache->size) by
452 453 454 455
 *   reciprocal_divide(offset, cache->reciprocal_buffer_size)
 */
static inline unsigned int obj_to_index(const struct kmem_cache *cache,
					const struct slab *slab, void *obj)
456
{
457 458
	u32 offset = (obj - slab->s_mem);
	return reciprocal_divide(offset, cache->reciprocal_buffer_size);
459 460
}

L
Linus Torvalds 已提交
461
static struct arraycache_init initarray_generic =
P
Pekka Enberg 已提交
462
    { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
L
Linus Torvalds 已提交
463 464

/* internal cache of cache description objs */
465
static struct kmem_cache kmem_cache_boot = {
P
Pekka Enberg 已提交
466 467 468
	.batchcount = 1,
	.limit = BOOT_CPUCACHE_ENTRIES,
	.shared = 1,
469
	.size = sizeof(struct kmem_cache),
P
Pekka Enberg 已提交
470
	.name = "kmem_cache",
L
Linus Torvalds 已提交
471 472
};

473 474
#define BAD_ALIEN_MAGIC 0x01020304ul

475 476 477 478 479 480 481 482
#ifdef CONFIG_LOCKDEP

/*
 * Slab sometimes uses the kmalloc slabs to store the slab headers
 * for other slabs "off slab".
 * The locking for this is tricky in that it nests within the locks
 * of all other slabs in a few places; to deal with this special
 * locking we put on-slab caches into a separate lock-class.
483 484 485 486
 *
 * We set lock class for alien array caches which are up during init.
 * The lock annotation will be lost if all cpus of a node goes down and
 * then comes back up during hotplug
487
 */
488 489 490
static struct lock_class_key on_slab_l3_key;
static struct lock_class_key on_slab_alc_key;

491 492 493 494 495 496 497 498
static struct lock_class_key debugobj_l3_key;
static struct lock_class_key debugobj_alc_key;

static void slab_set_lock_classes(struct kmem_cache *cachep,
		struct lock_class_key *l3_key, struct lock_class_key *alc_key,
		int q)
{
	struct array_cache **alc;
499
	struct kmem_cache_node *n;
500 501
	int r;

502 503
	n = cachep->node[q];
	if (!n)
504 505
		return;

506 507
	lockdep_set_class(&n->list_lock, l3_key);
	alc = n->alien;
508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535
	/*
	 * FIXME: This check for BAD_ALIEN_MAGIC
	 * should go away when common slab code is taught to
	 * work even without alien caches.
	 * Currently, non NUMA code returns BAD_ALIEN_MAGIC
	 * for alloc_alien_cache,
	 */
	if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC)
		return;
	for_each_node(r) {
		if (alc[r])
			lockdep_set_class(&alc[r]->lock, alc_key);
	}
}

static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node)
{
	slab_set_lock_classes(cachep, &debugobj_l3_key, &debugobj_alc_key, node);
}

static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep)
{
	int node;

	for_each_online_node(node)
		slab_set_debugobj_lock_classes_node(cachep, node);
}

536
static void init_node_lock_keys(int q)
537
{
538
	int i;
539

540
	if (slab_state < UP)
541 542
		return;

C
Christoph Lameter 已提交
543
	for (i = 1; i <= KMALLOC_SHIFT_HIGH; i++) {
544
		struct kmem_cache_node *n;
545 546 547 548
		struct kmem_cache *cache = kmalloc_caches[i];

		if (!cache)
			continue;
549

550 551
		n = cache->node[q];
		if (!n || OFF_SLAB(cache))
552
			continue;
553

554
		slab_set_lock_classes(cache, &on_slab_l3_key,
555
				&on_slab_alc_key, q);
556 557
	}
}
558

559 560
static void on_slab_lock_classes_node(struct kmem_cache *cachep, int q)
{
561
	if (!cachep->node[q])
562 563 564 565 566 567 568 569 570 571 572 573 574 575 576
		return;

	slab_set_lock_classes(cachep, &on_slab_l3_key,
			&on_slab_alc_key, q);
}

static inline void on_slab_lock_classes(struct kmem_cache *cachep)
{
	int node;

	VM_BUG_ON(OFF_SLAB(cachep));
	for_each_node(node)
		on_slab_lock_classes_node(cachep, node);
}

577 578 579 580 581 582 583
static inline void init_lock_keys(void)
{
	int node;

	for_each_node(node)
		init_node_lock_keys(node);
}
584
#else
585 586 587 588
static void init_node_lock_keys(int q)
{
}

589
static inline void init_lock_keys(void)
590 591
{
}
592

593 594 595 596 597 598 599 600
static inline void on_slab_lock_classes(struct kmem_cache *cachep)
{
}

static inline void on_slab_lock_classes_node(struct kmem_cache *cachep, int node)
{
}

601 602 603 604 605 606 607
static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node)
{
}

static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep)
{
}
608 609
#endif

610
static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
L
Linus Torvalds 已提交
611

612
static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
L
Linus Torvalds 已提交
613 614 615 616
{
	return cachep->array[smp_processor_id()];
}

617
static size_t slab_mgmt_size(size_t nr_objs, size_t align)
L
Linus Torvalds 已提交
618
{
619 620
	return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align);
}
L
Linus Torvalds 已提交
621

A
Andrew Morton 已提交
622 623 624
/*
 * Calculate the number of objects and left-over bytes for a given buffer size.
 */
625 626 627 628 629 630 631
static void cache_estimate(unsigned long gfporder, size_t buffer_size,
			   size_t align, int flags, size_t *left_over,
			   unsigned int *num)
{
	int nr_objs;
	size_t mgmt_size;
	size_t slab_size = PAGE_SIZE << gfporder;
L
Linus Torvalds 已提交
632

633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680
	/*
	 * The slab management structure can be either off the slab or
	 * on it. For the latter case, the memory allocated for a
	 * slab is used for:
	 *
	 * - The struct slab
	 * - One kmem_bufctl_t for each object
	 * - Padding to respect alignment of @align
	 * - @buffer_size bytes for each object
	 *
	 * If the slab management structure is off the slab, then the
	 * alignment will already be calculated into the size. Because
	 * the slabs are all pages aligned, the objects will be at the
	 * correct alignment when allocated.
	 */
	if (flags & CFLGS_OFF_SLAB) {
		mgmt_size = 0;
		nr_objs = slab_size / buffer_size;

		if (nr_objs > SLAB_LIMIT)
			nr_objs = SLAB_LIMIT;
	} else {
		/*
		 * Ignore padding for the initial guess. The padding
		 * is at most @align-1 bytes, and @buffer_size is at
		 * least @align. In the worst case, this result will
		 * be one greater than the number of objects that fit
		 * into the memory allocation when taking the padding
		 * into account.
		 */
		nr_objs = (slab_size - sizeof(struct slab)) /
			  (buffer_size + sizeof(kmem_bufctl_t));

		/*
		 * This calculated number will be either the right
		 * amount, or one greater than what we want.
		 */
		if (slab_mgmt_size(nr_objs, align) + nr_objs*buffer_size
		       > slab_size)
			nr_objs--;

		if (nr_objs > SLAB_LIMIT)
			nr_objs = SLAB_LIMIT;

		mgmt_size = slab_mgmt_size(nr_objs, align);
	}
	*num = nr_objs;
	*left_over = slab_size - nr_objs*buffer_size - mgmt_size;
L
Linus Torvalds 已提交
681 682
}

683
#if DEBUG
684
#define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
L
Linus Torvalds 已提交
685

A
Andrew Morton 已提交
686 687
static void __slab_error(const char *function, struct kmem_cache *cachep,
			char *msg)
L
Linus Torvalds 已提交
688 689
{
	printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
P
Pekka Enberg 已提交
690
	       function, cachep->name, msg);
L
Linus Torvalds 已提交
691
	dump_stack();
692
	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
L
Linus Torvalds 已提交
693
}
694
#endif
L
Linus Torvalds 已提交
695

696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711
/*
 * By default on NUMA we use alien caches to stage the freeing of
 * objects allocated from other nodes. This causes massive memory
 * inefficiencies when using fake NUMA setup to split memory into a
 * large number of small nodes, so it can be disabled on the command
 * line
  */

static int use_alien_caches __read_mostly = 1;
static int __init noaliencache_setup(char *s)
{
	use_alien_caches = 0;
	return 1;
}
__setup("noaliencache", noaliencache_setup);

712 713 714 715 716 717 718 719 720 721 722
static int __init slab_max_order_setup(char *str)
{
	get_option(&str, &slab_max_order);
	slab_max_order = slab_max_order < 0 ? 0 :
				min(slab_max_order, MAX_ORDER - 1);
	slab_max_order_set = true;

	return 1;
}
__setup("slab_max_order=", slab_max_order_setup);

723 724 725 726 727 728 729
#ifdef CONFIG_NUMA
/*
 * Special reaping functions for NUMA systems called from cache_reap().
 * These take care of doing round robin flushing of alien caches (containing
 * objects freed on different nodes from which they were allocated) and the
 * flushing of remote pcps by calling drain_node_pages.
 */
730
static DEFINE_PER_CPU(unsigned long, slab_reap_node);
731 732 733 734 735

static void init_reap_node(int cpu)
{
	int node;

736
	node = next_node(cpu_to_mem(cpu), node_online_map);
737
	if (node == MAX_NUMNODES)
738
		node = first_node(node_online_map);
739

740
	per_cpu(slab_reap_node, cpu) = node;
741 742 743 744
}

static void next_reap_node(void)
{
745
	int node = __this_cpu_read(slab_reap_node);
746 747 748 749

	node = next_node(node, node_online_map);
	if (unlikely(node >= MAX_NUMNODES))
		node = first_node(node_online_map);
750
	__this_cpu_write(slab_reap_node, node);
751 752 753 754 755 756 757
}

#else
#define init_reap_node(cpu) do { } while (0)
#define next_reap_node(void) do { } while (0)
#endif

L
Linus Torvalds 已提交
758 759 760 761 762 763 764
/*
 * Initiate the reap timer running on the target CPU.  We run at around 1 to 2Hz
 * via the workqueue/eventd.
 * Add the CPU number into the expiration time to minimize the possibility of
 * the CPUs getting into lockstep and contending for the global cache chain
 * lock.
 */
765
static void start_cpu_timer(int cpu)
L
Linus Torvalds 已提交
766
{
767
	struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
L
Linus Torvalds 已提交
768 769 770 771 772 773

	/*
	 * When this gets called from do_initcalls via cpucache_init(),
	 * init_workqueues() has already run, so keventd will be setup
	 * at that time.
	 */
774
	if (keventd_up() && reap_work->work.func == NULL) {
775
		init_reap_node(cpu);
776
		INIT_DEFERRABLE_WORK(reap_work, cache_reap);
777 778
		schedule_delayed_work_on(cpu, reap_work,
					__round_jiffies_relative(HZ, cpu));
L
Linus Torvalds 已提交
779 780 781
	}
}

782
static struct array_cache *alloc_arraycache(int node, int entries,
783
					    int batchcount, gfp_t gfp)
L
Linus Torvalds 已提交
784
{
P
Pekka Enberg 已提交
785
	int memsize = sizeof(void *) * entries + sizeof(struct array_cache);
L
Linus Torvalds 已提交
786 787
	struct array_cache *nc = NULL;

788
	nc = kmalloc_node(memsize, gfp, node);
789 790
	/*
	 * The array_cache structures contain pointers to free object.
L
Lucas De Marchi 已提交
791
	 * However, when such objects are allocated or transferred to another
792 793 794 795 796
	 * cache the pointers are not cleared and they could be counted as
	 * valid references during a kmemleak scan. Therefore, kmemleak must
	 * not scan such objects.
	 */
	kmemleak_no_scan(nc);
L
Linus Torvalds 已提交
797 798 799 800 801
	if (nc) {
		nc->avail = 0;
		nc->limit = entries;
		nc->batchcount = batchcount;
		nc->touched = 0;
802
		spin_lock_init(&nc->lock);
L
Linus Torvalds 已提交
803 804 805 806
	}
	return nc;
}

807 808 809 810 811 812 813 814 815 816 817
static inline bool is_slab_pfmemalloc(struct slab *slabp)
{
	struct page *page = virt_to_page(slabp->s_mem);

	return PageSlabPfmemalloc(page);
}

/* Clears pfmemalloc_active if no slabs have pfmalloc set */
static void recheck_pfmemalloc_active(struct kmem_cache *cachep,
						struct array_cache *ac)
{
818
	struct kmem_cache_node *n = cachep->node[numa_mem_id()];
819 820 821 822 823 824
	struct slab *slabp;
	unsigned long flags;

	if (!pfmemalloc_active)
		return;

825 826
	spin_lock_irqsave(&n->list_lock, flags);
	list_for_each_entry(slabp, &n->slabs_full, list)
827 828 829
		if (is_slab_pfmemalloc(slabp))
			goto out;

830
	list_for_each_entry(slabp, &n->slabs_partial, list)
831 832 833
		if (is_slab_pfmemalloc(slabp))
			goto out;

834
	list_for_each_entry(slabp, &n->slabs_free, list)
835 836 837 838 839
		if (is_slab_pfmemalloc(slabp))
			goto out;

	pfmemalloc_active = false;
out:
840
	spin_unlock_irqrestore(&n->list_lock, flags);
841 842
}

843
static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac,
844 845 846 847 848 849 850
						gfp_t flags, bool force_refill)
{
	int i;
	void *objp = ac->entry[--ac->avail];

	/* Ensure the caller is allowed to use objects from PFMEMALLOC slab */
	if (unlikely(is_obj_pfmemalloc(objp))) {
851
		struct kmem_cache_node *n;
852 853 854 855 856 857 858

		if (gfp_pfmemalloc_allowed(flags)) {
			clear_obj_pfmemalloc(&objp);
			return objp;
		}

		/* The caller cannot use PFMEMALLOC objects, find another one */
859
		for (i = 0; i < ac->avail; i++) {
860 861 862 863 864 865 866 867 868 869 870 871 872
			/* If a !PFMEMALLOC object is found, swap them */
			if (!is_obj_pfmemalloc(ac->entry[i])) {
				objp = ac->entry[i];
				ac->entry[i] = ac->entry[ac->avail];
				ac->entry[ac->avail] = objp;
				return objp;
			}
		}

		/*
		 * If there are empty slabs on the slabs_free list and we are
		 * being forced to refill the cache, mark this one !pfmemalloc.
		 */
873 874
		n = cachep->node[numa_mem_id()];
		if (!list_empty(&n->slabs_free) && force_refill) {
875
			struct slab *slabp = virt_to_slab(objp);
876
			ClearPageSlabPfmemalloc(virt_to_head_page(slabp->s_mem));
877 878 879 880 881 882 883 884 885 886 887 888 889
			clear_obj_pfmemalloc(&objp);
			recheck_pfmemalloc_active(cachep, ac);
			return objp;
		}

		/* No !PFMEMALLOC objects available */
		ac->avail++;
		objp = NULL;
	}

	return objp;
}

890 891 892 893 894 895 896 897 898 899 900 901 902 903
static inline void *ac_get_obj(struct kmem_cache *cachep,
			struct array_cache *ac, gfp_t flags, bool force_refill)
{
	void *objp;

	if (unlikely(sk_memalloc_socks()))
		objp = __ac_get_obj(cachep, ac, flags, force_refill);
	else
		objp = ac->entry[--ac->avail];

	return objp;
}

static void *__ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac,
904 905 906 907
								void *objp)
{
	if (unlikely(pfmemalloc_active)) {
		/* Some pfmemalloc slabs exist, check if this is one */
J
Joonsoo Kim 已提交
908 909
		struct slab *slabp = virt_to_slab(objp);
		struct page *page = virt_to_head_page(slabp->s_mem);
910 911 912 913
		if (PageSlabPfmemalloc(page))
			set_obj_pfmemalloc(&objp);
	}

914 915 916 917 918 919 920 921 922
	return objp;
}

static inline void ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac,
								void *objp)
{
	if (unlikely(sk_memalloc_socks()))
		objp = __ac_put_obj(cachep, ac, objp);

923 924 925
	ac->entry[ac->avail++] = objp;
}

926 927 928 929 930 931 932 933 934 935
/*
 * Transfer objects in one arraycache to another.
 * Locking must be handled by the caller.
 *
 * Return the number of entries transferred.
 */
static int transfer_objects(struct array_cache *to,
		struct array_cache *from, unsigned int max)
{
	/* Figure out how many entries to transfer */
936
	int nr = min3(from->avail, max, to->limit - to->avail);
937 938 939 940 941 942 943 944 945 946 947 948

	if (!nr)
		return 0;

	memcpy(to->entry + to->avail, from->entry + from->avail -nr,
			sizeof(void *) *nr);

	from->avail -= nr;
	to->avail += nr;
	return nr;
}

949 950 951
#ifndef CONFIG_NUMA

#define drain_alien_cache(cachep, alien) do { } while (0)
952
#define reap_alien(cachep, n) do { } while (0)
953

954
static inline struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973
{
	return (struct array_cache **)BAD_ALIEN_MAGIC;
}

static inline void free_alien_cache(struct array_cache **ac_ptr)
{
}

static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
{
	return 0;
}

static inline void *alternate_node_alloc(struct kmem_cache *cachep,
		gfp_t flags)
{
	return NULL;
}

974
static inline void *____cache_alloc_node(struct kmem_cache *cachep,
975 976 977 978 979 980 981
		 gfp_t flags, int nodeid)
{
	return NULL;
}

#else	/* CONFIG_NUMA */

982
static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
983
static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
984

985
static struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
986 987
{
	struct array_cache **ac_ptr;
988
	int memsize = sizeof(void *) * nr_node_ids;
989 990 991 992
	int i;

	if (limit > 1)
		limit = 12;
993
	ac_ptr = kzalloc_node(memsize, gfp, node);
994 995
	if (ac_ptr) {
		for_each_node(i) {
996
			if (i == node || !node_online(i))
997
				continue;
998
			ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d, gfp);
999
			if (!ac_ptr[i]) {
1000
				for (i--; i >= 0; i--)
1001 1002 1003 1004 1005 1006 1007 1008 1009
					kfree(ac_ptr[i]);
				kfree(ac_ptr);
				return NULL;
			}
		}
	}
	return ac_ptr;
}

P
Pekka Enberg 已提交
1010
static void free_alien_cache(struct array_cache **ac_ptr)
1011 1012 1013 1014 1015 1016
{
	int i;

	if (!ac_ptr)
		return;
	for_each_node(i)
P
Pekka Enberg 已提交
1017
	    kfree(ac_ptr[i]);
1018 1019 1020
	kfree(ac_ptr);
}

1021
static void __drain_alien_cache(struct kmem_cache *cachep,
P
Pekka Enberg 已提交
1022
				struct array_cache *ac, int node)
1023
{
1024
	struct kmem_cache_node *n = cachep->node[node];
1025 1026

	if (ac->avail) {
1027
		spin_lock(&n->list_lock);
1028 1029 1030 1031 1032
		/*
		 * Stuff objects into the remote nodes shared array first.
		 * That way we could avoid the overhead of putting the objects
		 * into the free lists and getting them back later.
		 */
1033 1034
		if (n->shared)
			transfer_objects(n->shared, ac, ac->limit);
1035

1036
		free_block(cachep, ac->entry, ac->avail, node);
1037
		ac->avail = 0;
1038
		spin_unlock(&n->list_lock);
1039 1040 1041
	}
}

1042 1043 1044
/*
 * Called from cache_reap() to regularly drain alien caches round robin.
 */
1045
static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n)
1046
{
1047
	int node = __this_cpu_read(slab_reap_node);
1048

1049 1050
	if (n->alien) {
		struct array_cache *ac = n->alien[node];
1051 1052

		if (ac && ac->avail && spin_trylock_irq(&ac->lock)) {
1053 1054 1055 1056 1057 1058
			__drain_alien_cache(cachep, ac, node);
			spin_unlock_irq(&ac->lock);
		}
	}
}

A
Andrew Morton 已提交
1059 1060
static void drain_alien_cache(struct kmem_cache *cachep,
				struct array_cache **alien)
1061
{
P
Pekka Enberg 已提交
1062
	int i = 0;
1063 1064 1065 1066
	struct array_cache *ac;
	unsigned long flags;

	for_each_online_node(i) {
1067
		ac = alien[i];
1068 1069 1070 1071 1072 1073 1074
		if (ac) {
			spin_lock_irqsave(&ac->lock, flags);
			__drain_alien_cache(cachep, ac, i);
			spin_unlock_irqrestore(&ac->lock, flags);
		}
	}
}
1075

1076
static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1077
{
J
Joonsoo Kim 已提交
1078
	int nodeid = page_to_nid(virt_to_page(objp));
1079
	struct kmem_cache_node *n;
1080
	struct array_cache *alien = NULL;
P
Pekka Enberg 已提交
1081 1082
	int node;

1083
	node = numa_mem_id();
1084 1085 1086 1087 1088

	/*
	 * Make sure we are not freeing a object from another node to the array
	 * cache on this cpu.
	 */
J
Joonsoo Kim 已提交
1089
	if (likely(nodeid == node))
1090 1091
		return 0;

1092
	n = cachep->node[node];
1093
	STATS_INC_NODEFREES(cachep);
1094 1095
	if (n->alien && n->alien[nodeid]) {
		alien = n->alien[nodeid];
1096
		spin_lock(&alien->lock);
1097 1098 1099 1100
		if (unlikely(alien->avail == alien->limit)) {
			STATS_INC_ACOVERFLOW(cachep);
			__drain_alien_cache(cachep, alien, nodeid);
		}
1101
		ac_put_obj(cachep, alien, objp);
1102 1103
		spin_unlock(&alien->lock);
	} else {
1104
		spin_lock(&(cachep->node[nodeid])->list_lock);
1105
		free_block(cachep, &objp, 1, nodeid);
1106
		spin_unlock(&(cachep->node[nodeid])->list_lock);
1107 1108 1109
	}
	return 1;
}
1110 1111
#endif

1112
/*
1113
 * Allocates and initializes node for a node on each slab cache, used for
1114
 * either memory or cpu hotplug.  If memory is being hot-added, the kmem_cache_node
1115
 * will be allocated off-node since memory is not yet online for the new node.
1116
 * When hotplugging memory or a cpu, existing node are not replaced if
1117 1118
 * already in use.
 *
1119
 * Must hold slab_mutex.
1120
 */
1121
static int init_cache_node_node(int node)
1122 1123
{
	struct kmem_cache *cachep;
1124
	struct kmem_cache_node *n;
1125
	const int memsize = sizeof(struct kmem_cache_node);
1126

1127
	list_for_each_entry(cachep, &slab_caches, list) {
1128 1129 1130 1131 1132
		/*
		 * Set up the size64 kmemlist for cpu before we can
		 * begin anything. Make sure some other cpu on this
		 * node has not already allocated this
		 */
1133
		if (!cachep->node[node]) {
1134 1135
			n = kmalloc_node(memsize, GFP_KERNEL, node);
			if (!n)
1136
				return -ENOMEM;
1137 1138
			kmem_cache_node_init(n);
			n->next_reap = jiffies + REAPTIMEOUT_LIST3 +
1139 1140 1141 1142
			    ((unsigned long)cachep) % REAPTIMEOUT_LIST3;

			/*
			 * The l3s don't come and go as CPUs come and
1143
			 * go.  slab_mutex is sufficient
1144 1145
			 * protection here.
			 */
1146
			cachep->node[node] = n;
1147 1148
		}

1149 1150
		spin_lock_irq(&cachep->node[node]->list_lock);
		cachep->node[node]->free_limit =
1151 1152
			(1 + nr_cpus_node(node)) *
			cachep->batchcount + cachep->num;
1153
		spin_unlock_irq(&cachep->node[node]->list_lock);
1154 1155 1156 1157
	}
	return 0;
}

1158 1159 1160 1161 1162 1163
static inline int slabs_tofree(struct kmem_cache *cachep,
						struct kmem_cache_node *n)
{
	return (n->free_objects + cachep->num - 1) / cachep->num;
}

1164
static void cpuup_canceled(long cpu)
1165 1166
{
	struct kmem_cache *cachep;
1167
	struct kmem_cache_node *n = NULL;
1168
	int node = cpu_to_mem(cpu);
1169
	const struct cpumask *mask = cpumask_of_node(node);
1170

1171
	list_for_each_entry(cachep, &slab_caches, list) {
1172 1173 1174 1175 1176 1177 1178
		struct array_cache *nc;
		struct array_cache *shared;
		struct array_cache **alien;

		/* cpu is dead; no one can alloc from it. */
		nc = cachep->array[cpu];
		cachep->array[cpu] = NULL;
1179
		n = cachep->node[node];
1180

1181
		if (!n)
1182 1183
			goto free_array_cache;

1184
		spin_lock_irq(&n->list_lock);
1185

1186 1187
		/* Free limit for this kmem_cache_node */
		n->free_limit -= cachep->batchcount;
1188 1189 1190
		if (nc)
			free_block(cachep, nc->entry, nc->avail, node);

1191
		if (!cpumask_empty(mask)) {
1192
			spin_unlock_irq(&n->list_lock);
1193 1194 1195
			goto free_array_cache;
		}

1196
		shared = n->shared;
1197 1198 1199
		if (shared) {
			free_block(cachep, shared->entry,
				   shared->avail, node);
1200
			n->shared = NULL;
1201 1202
		}

1203 1204
		alien = n->alien;
		n->alien = NULL;
1205

1206
		spin_unlock_irq(&n->list_lock);
1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220

		kfree(shared);
		if (alien) {
			drain_alien_cache(cachep, alien);
			free_alien_cache(alien);
		}
free_array_cache:
		kfree(nc);
	}
	/*
	 * In the previous loop, all the objects were freed to
	 * the respective cache's slabs,  now we can go ahead and
	 * shrink each nodelist to its limit.
	 */
1221
	list_for_each_entry(cachep, &slab_caches, list) {
1222 1223
		n = cachep->node[node];
		if (!n)
1224
			continue;
1225
		drain_freelist(cachep, n, slabs_tofree(cachep, n));
1226 1227 1228
	}
}

1229
static int cpuup_prepare(long cpu)
L
Linus Torvalds 已提交
1230
{
1231
	struct kmem_cache *cachep;
1232
	struct kmem_cache_node *n = NULL;
1233
	int node = cpu_to_mem(cpu);
1234
	int err;
L
Linus Torvalds 已提交
1235

1236 1237 1238 1239
	/*
	 * We need to do this right in the beginning since
	 * alloc_arraycache's are going to use this list.
	 * kmalloc_node allows us to add the slab to the right
1240
	 * kmem_cache_node and not this cpu's kmem_cache_node
1241
	 */
1242
	err = init_cache_node_node(node);
1243 1244
	if (err < 0)
		goto bad;
1245 1246 1247 1248 1249

	/*
	 * Now we can go ahead with allocating the shared arrays and
	 * array caches
	 */
1250
	list_for_each_entry(cachep, &slab_caches, list) {
1251 1252 1253 1254 1255
		struct array_cache *nc;
		struct array_cache *shared = NULL;
		struct array_cache **alien = NULL;

		nc = alloc_arraycache(node, cachep->limit,
1256
					cachep->batchcount, GFP_KERNEL);
1257 1258 1259 1260 1261
		if (!nc)
			goto bad;
		if (cachep->shared) {
			shared = alloc_arraycache(node,
				cachep->shared * cachep->batchcount,
1262
				0xbaadf00d, GFP_KERNEL);
1263 1264
			if (!shared) {
				kfree(nc);
L
Linus Torvalds 已提交
1265
				goto bad;
1266
			}
1267 1268
		}
		if (use_alien_caches) {
1269
			alien = alloc_alien_cache(node, cachep->limit, GFP_KERNEL);
1270 1271 1272
			if (!alien) {
				kfree(shared);
				kfree(nc);
1273
				goto bad;
1274
			}
1275 1276
		}
		cachep->array[cpu] = nc;
1277 1278
		n = cachep->node[node];
		BUG_ON(!n);
1279

1280 1281
		spin_lock_irq(&n->list_lock);
		if (!n->shared) {
1282 1283 1284 1285
			/*
			 * We are serialised from CPU_DEAD or
			 * CPU_UP_CANCELLED by the cpucontrol lock
			 */
1286
			n->shared = shared;
1287 1288
			shared = NULL;
		}
1289
#ifdef CONFIG_NUMA
1290 1291
		if (!n->alien) {
			n->alien = alien;
1292
			alien = NULL;
L
Linus Torvalds 已提交
1293
		}
1294
#endif
1295
		spin_unlock_irq(&n->list_lock);
1296 1297
		kfree(shared);
		free_alien_cache(alien);
1298 1299
		if (cachep->flags & SLAB_DEBUG_OBJECTS)
			slab_set_debugobj_lock_classes_node(cachep, node);
1300 1301 1302
		else if (!OFF_SLAB(cachep) &&
			 !(cachep->flags & SLAB_DESTROY_BY_RCU))
			on_slab_lock_classes_node(cachep, node);
1303
	}
1304 1305
	init_node_lock_keys(node);

1306 1307
	return 0;
bad:
1308
	cpuup_canceled(cpu);
1309 1310 1311
	return -ENOMEM;
}

1312
static int cpuup_callback(struct notifier_block *nfb,
1313 1314 1315 1316 1317 1318 1319 1320
				    unsigned long action, void *hcpu)
{
	long cpu = (long)hcpu;
	int err = 0;

	switch (action) {
	case CPU_UP_PREPARE:
	case CPU_UP_PREPARE_FROZEN:
1321
		mutex_lock(&slab_mutex);
1322
		err = cpuup_prepare(cpu);
1323
		mutex_unlock(&slab_mutex);
L
Linus Torvalds 已提交
1324 1325
		break;
	case CPU_ONLINE:
1326
	case CPU_ONLINE_FROZEN:
L
Linus Torvalds 已提交
1327 1328 1329
		start_cpu_timer(cpu);
		break;
#ifdef CONFIG_HOTPLUG_CPU
1330
  	case CPU_DOWN_PREPARE:
1331
  	case CPU_DOWN_PREPARE_FROZEN:
1332
		/*
1333
		 * Shutdown cache reaper. Note that the slab_mutex is
1334 1335 1336 1337
		 * held so that if cache_reap() is invoked it cannot do
		 * anything expensive but will only modify reap_work
		 * and reschedule the timer.
		*/
1338
		cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu));
1339
		/* Now the cache_reaper is guaranteed to be not running. */
1340
		per_cpu(slab_reap_work, cpu).work.func = NULL;
1341 1342
  		break;
  	case CPU_DOWN_FAILED:
1343
  	case CPU_DOWN_FAILED_FROZEN:
1344 1345
		start_cpu_timer(cpu);
  		break;
L
Linus Torvalds 已提交
1346
	case CPU_DEAD:
1347
	case CPU_DEAD_FROZEN:
1348 1349
		/*
		 * Even if all the cpus of a node are down, we don't free the
1350
		 * kmem_cache_node of any cache. This to avoid a race between
1351
		 * cpu_down, and a kmalloc allocation from another cpu for
1352
		 * memory from the node of the cpu going down.  The node
1353 1354 1355
		 * structure is usually allocated from kmem_cache_create() and
		 * gets destroyed at kmem_cache_destroy().
		 */
S
Simon Arlott 已提交
1356
		/* fall through */
1357
#endif
L
Linus Torvalds 已提交
1358
	case CPU_UP_CANCELED:
1359
	case CPU_UP_CANCELED_FROZEN:
1360
		mutex_lock(&slab_mutex);
1361
		cpuup_canceled(cpu);
1362
		mutex_unlock(&slab_mutex);
L
Linus Torvalds 已提交
1363 1364
		break;
	}
1365
	return notifier_from_errno(err);
L
Linus Torvalds 已提交
1366 1367
}

1368
static struct notifier_block cpucache_notifier = {
1369 1370
	&cpuup_callback, NULL, 0
};
L
Linus Torvalds 已提交
1371

1372 1373 1374 1375 1376 1377
#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
/*
 * Drains freelist for a node on each slab cache, used for memory hot-remove.
 * Returns -EBUSY if all objects cannot be drained so that the node is not
 * removed.
 *
1378
 * Must hold slab_mutex.
1379
 */
1380
static int __meminit drain_cache_node_node(int node)
1381 1382 1383 1384
{
	struct kmem_cache *cachep;
	int ret = 0;

1385
	list_for_each_entry(cachep, &slab_caches, list) {
1386
		struct kmem_cache_node *n;
1387

1388 1389
		n = cachep->node[node];
		if (!n)
1390 1391
			continue;

1392
		drain_freelist(cachep, n, slabs_tofree(cachep, n));
1393

1394 1395
		if (!list_empty(&n->slabs_full) ||
		    !list_empty(&n->slabs_partial)) {
1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415
			ret = -EBUSY;
			break;
		}
	}
	return ret;
}

static int __meminit slab_memory_callback(struct notifier_block *self,
					unsigned long action, void *arg)
{
	struct memory_notify *mnb = arg;
	int ret = 0;
	int nid;

	nid = mnb->status_change_nid;
	if (nid < 0)
		goto out;

	switch (action) {
	case MEM_GOING_ONLINE:
1416
		mutex_lock(&slab_mutex);
1417
		ret = init_cache_node_node(nid);
1418
		mutex_unlock(&slab_mutex);
1419 1420
		break;
	case MEM_GOING_OFFLINE:
1421
		mutex_lock(&slab_mutex);
1422
		ret = drain_cache_node_node(nid);
1423
		mutex_unlock(&slab_mutex);
1424 1425 1426 1427 1428 1429 1430 1431
		break;
	case MEM_ONLINE:
	case MEM_OFFLINE:
	case MEM_CANCEL_ONLINE:
	case MEM_CANCEL_OFFLINE:
		break;
	}
out:
1432
	return notifier_from_errno(ret);
1433 1434 1435
}
#endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */

1436
/*
1437
 * swap the static kmem_cache_node with kmalloced memory
1438
 */
1439
static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *list,
1440
				int nodeid)
1441
{
1442
	struct kmem_cache_node *ptr;
1443

1444
	ptr = kmalloc_node(sizeof(struct kmem_cache_node), GFP_NOWAIT, nodeid);
1445 1446
	BUG_ON(!ptr);

1447
	memcpy(ptr, list, sizeof(struct kmem_cache_node));
1448 1449 1450 1451 1452
	/*
	 * Do not assume that spinlocks can be initialized via memcpy:
	 */
	spin_lock_init(&ptr->list_lock);

1453
	MAKE_ALL_LISTS(cachep, ptr, nodeid);
1454
	cachep->node[nodeid] = ptr;
1455 1456
}

1457
/*
1458 1459
 * For setting up all the kmem_cache_node for cache whose buffer_size is same as
 * size of kmem_cache_node.
1460
 */
1461
static void __init set_up_node(struct kmem_cache *cachep, int index)
1462 1463 1464 1465
{
	int node;

	for_each_online_node(node) {
1466
		cachep->node[node] = &init_kmem_cache_node[index + node];
1467
		cachep->node[node]->next_reap = jiffies +
1468 1469 1470 1471 1472
		    REAPTIMEOUT_LIST3 +
		    ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
	}
}

C
Christoph Lameter 已提交
1473 1474
/*
 * The memory after the last cpu cache pointer is used for the
1475
 * the node pointer.
C
Christoph Lameter 已提交
1476
 */
1477
static void setup_node_pointer(struct kmem_cache *cachep)
C
Christoph Lameter 已提交
1478
{
1479
	cachep->node = (struct kmem_cache_node **)&cachep->array[nr_cpu_ids];
C
Christoph Lameter 已提交
1480 1481
}

A
Andrew Morton 已提交
1482 1483 1484
/*
 * Initialisation.  Called after the page allocator have been initialised and
 * before smp_init().
L
Linus Torvalds 已提交
1485 1486 1487
 */
void __init kmem_cache_init(void)
{
1488 1489
	int i;

1490 1491
	BUILD_BUG_ON(sizeof(((struct page *)NULL)->lru) <
					sizeof(struct rcu_head));
1492
	kmem_cache = &kmem_cache_boot;
1493
	setup_node_pointer(kmem_cache);
1494

1495
	if (num_possible_nodes() == 1)
1496 1497
		use_alien_caches = 0;

C
Christoph Lameter 已提交
1498
	for (i = 0; i < NUM_INIT_LISTS; i++)
1499
		kmem_cache_node_init(&init_kmem_cache_node[i]);
C
Christoph Lameter 已提交
1500

1501
	set_up_node(kmem_cache, CACHE_CACHE);
L
Linus Torvalds 已提交
1502 1503 1504

	/*
	 * Fragmentation resistance on low memory - only use bigger
1505 1506
	 * page orders on machines with more than 32MB of memory if
	 * not overridden on the command line.
L
Linus Torvalds 已提交
1507
	 */
1508
	if (!slab_max_order_set && totalram_pages > (32 << 20) >> PAGE_SHIFT)
1509
		slab_max_order = SLAB_MAX_ORDER_HI;
L
Linus Torvalds 已提交
1510 1511 1512

	/* Bootstrap is tricky, because several objects are allocated
	 * from caches that do not exist yet:
1513 1514 1515
	 * 1) initialize the kmem_cache cache: it contains the struct
	 *    kmem_cache structures of all caches, except kmem_cache itself:
	 *    kmem_cache is statically allocated.
1516
	 *    Initially an __init data area is used for the head array and the
1517
	 *    kmem_cache_node structures, it's replaced with a kmalloc allocated
1518
	 *    array at the end of the bootstrap.
L
Linus Torvalds 已提交
1519
	 * 2) Create the first kmalloc cache.
1520
	 *    The struct kmem_cache for the new cache is allocated normally.
1521 1522 1523
	 *    An __init data area is used for the head array.
	 * 3) Create the remaining kmalloc caches, with minimally sized
	 *    head arrays.
1524
	 * 4) Replace the __init data head arrays for kmem_cache and the first
L
Linus Torvalds 已提交
1525
	 *    kmalloc cache with kmalloc allocated arrays.
1526
	 * 5) Replace the __init data for kmem_cache_node for kmem_cache and
1527 1528
	 *    the other cache's with kmalloc allocated memory.
	 * 6) Resize the head arrays of the kmalloc caches to their final sizes.
L
Linus Torvalds 已提交
1529 1530
	 */

1531
	/* 1) create the kmem_cache */
L
Linus Torvalds 已提交
1532

E
Eric Dumazet 已提交
1533
	/*
1534
	 * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids
E
Eric Dumazet 已提交
1535
	 */
1536 1537
	create_boot_cache(kmem_cache, "kmem_cache",
		offsetof(struct kmem_cache, array[nr_cpu_ids]) +
1538
				  nr_node_ids * sizeof(struct kmem_cache_node *),
1539 1540
				  SLAB_HWCACHE_ALIGN);
	list_add(&kmem_cache->list, &slab_caches);
L
Linus Torvalds 已提交
1541 1542 1543

	/* 2+3) create the kmalloc caches */

A
Andrew Morton 已提交
1544 1545
	/*
	 * Initialize the caches that provide memory for the array cache and the
1546
	 * kmem_cache_node structures first.  Without this, further allocations will
A
Andrew Morton 已提交
1547
	 * bug.
1548 1549
	 */

1550 1551
	kmalloc_caches[INDEX_AC] = create_kmalloc_cache("kmalloc-ac",
					kmalloc_size(INDEX_AC), ARCH_KMALLOC_FLAGS);
1552

1553 1554 1555 1556
	if (INDEX_AC != INDEX_NODE)
		kmalloc_caches[INDEX_NODE] =
			create_kmalloc_cache("kmalloc-node",
				kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
1557

1558 1559
	slab_early_init = 0;

L
Linus Torvalds 已提交
1560 1561
	/* 4) Replace the bootstrap head arrays */
	{
1562
		struct array_cache *ptr;
1563

1564
		ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
1565

1566
		memcpy(ptr, cpu_cache_get(kmem_cache),
P
Pekka Enberg 已提交
1567
		       sizeof(struct arraycache_init));
1568 1569 1570 1571 1572
		/*
		 * Do not assume that spinlocks can be initialized via memcpy:
		 */
		spin_lock_init(&ptr->lock);

1573
		kmem_cache->array[smp_processor_id()] = ptr;
1574

1575
		ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
1576

1577
		BUG_ON(cpu_cache_get(kmalloc_caches[INDEX_AC])
P
Pekka Enberg 已提交
1578
		       != &initarray_generic.cache);
1579
		memcpy(ptr, cpu_cache_get(kmalloc_caches[INDEX_AC]),
P
Pekka Enberg 已提交
1580
		       sizeof(struct arraycache_init));
1581 1582 1583 1584 1585
		/*
		 * Do not assume that spinlocks can be initialized via memcpy:
		 */
		spin_lock_init(&ptr->lock);

1586
		kmalloc_caches[INDEX_AC]->array[smp_processor_id()] = ptr;
L
Linus Torvalds 已提交
1587
	}
1588
	/* 5) Replace the bootstrap kmem_cache_node */
1589
	{
P
Pekka Enberg 已提交
1590 1591
		int nid;

1592
		for_each_online_node(nid) {
1593
			init_list(kmem_cache, &init_kmem_cache_node[CACHE_CACHE + nid], nid);
1594

1595
			init_list(kmalloc_caches[INDEX_AC],
1596
				  &init_kmem_cache_node[SIZE_AC + nid], nid);
1597

1598 1599 1600
			if (INDEX_AC != INDEX_NODE) {
				init_list(kmalloc_caches[INDEX_NODE],
					  &init_kmem_cache_node[SIZE_NODE + nid], nid);
1601 1602 1603
			}
		}
	}
L
Linus Torvalds 已提交
1604

1605
	create_kmalloc_caches(ARCH_KMALLOC_FLAGS);
1606 1607 1608 1609 1610 1611
}

void __init kmem_cache_init_late(void)
{
	struct kmem_cache *cachep;

1612
	slab_state = UP;
P
Peter Zijlstra 已提交
1613

1614
	/* 6) resize the head arrays to their final sizes */
1615 1616
	mutex_lock(&slab_mutex);
	list_for_each_entry(cachep, &slab_caches, list)
1617 1618
		if (enable_cpucache(cachep, GFP_NOWAIT))
			BUG();
1619
	mutex_unlock(&slab_mutex);
1620

1621 1622 1623
	/* Annotate slab for lockdep -- annotate the malloc caches */
	init_lock_keys();

1624 1625 1626
	/* Done! */
	slab_state = FULL;

A
Andrew Morton 已提交
1627 1628 1629
	/*
	 * Register a cpu startup notifier callback that initializes
	 * cpu_cache_get for all new cpus
L
Linus Torvalds 已提交
1630 1631 1632
	 */
	register_cpu_notifier(&cpucache_notifier);

1633 1634 1635
#ifdef CONFIG_NUMA
	/*
	 * Register a memory hotplug callback that initializes and frees
1636
	 * node.
1637 1638 1639 1640
	 */
	hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
#endif

A
Andrew Morton 已提交
1641 1642 1643
	/*
	 * The reap timers are started later, with a module init call: That part
	 * of the kernel is not yet operational.
L
Linus Torvalds 已提交
1644 1645 1646 1647 1648 1649 1650
	 */
}

static int __init cpucache_init(void)
{
	int cpu;

A
Andrew Morton 已提交
1651 1652
	/*
	 * Register the timers that return unneeded pages to the page allocator
L
Linus Torvalds 已提交
1653
	 */
1654
	for_each_online_cpu(cpu)
A
Andrew Morton 已提交
1655
		start_cpu_timer(cpu);
1656 1657

	/* Done! */
1658
	slab_state = FULL;
L
Linus Torvalds 已提交
1659 1660 1661 1662
	return 0;
}
__initcall(cpucache_init);

1663 1664 1665
static noinline void
slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
{
1666
	struct kmem_cache_node *n;
1667 1668 1669 1670 1671 1672 1673 1674
	struct slab *slabp;
	unsigned long flags;
	int node;

	printk(KERN_WARNING
		"SLAB: Unable to allocate memory on node %d (gfp=0x%x)\n",
		nodeid, gfpflags);
	printk(KERN_WARNING "  cache: %s, object size: %d, order: %d\n",
1675
		cachep->name, cachep->size, cachep->gfporder);
1676 1677 1678 1679 1680

	for_each_online_node(node) {
		unsigned long active_objs = 0, num_objs = 0, free_objects = 0;
		unsigned long active_slabs = 0, num_slabs = 0;

1681 1682
		n = cachep->node[node];
		if (!n)
1683 1684
			continue;

1685 1686
		spin_lock_irqsave(&n->list_lock, flags);
		list_for_each_entry(slabp, &n->slabs_full, list) {
1687 1688 1689
			active_objs += cachep->num;
			active_slabs++;
		}
1690
		list_for_each_entry(slabp, &n->slabs_partial, list) {
1691 1692 1693
			active_objs += slabp->inuse;
			active_slabs++;
		}
1694
		list_for_each_entry(slabp, &n->slabs_free, list)
1695 1696
			num_slabs++;

1697 1698
		free_objects += n->free_objects;
		spin_unlock_irqrestore(&n->list_lock, flags);
1699 1700 1701 1702 1703 1704 1705 1706 1707 1708

		num_slabs += active_slabs;
		num_objs = num_slabs * cachep->num;
		printk(KERN_WARNING
			"  node %d: slabs: %ld/%ld, objs: %ld/%ld, free: %ld\n",
			node, active_slabs, num_slabs, active_objs, num_objs,
			free_objects);
	}
}

L
Linus Torvalds 已提交
1709 1710 1711 1712 1713 1714 1715
/*
 * Interface to system's page allocator. No need to hold the cache-lock.
 *
 * If we requested dmaable memory, we will get it. Even if we
 * did not request dmaable memory, we might get it, but that
 * would be relatively rare and ignorable.
 */
1716 1717
static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
								int nodeid)
L
Linus Torvalds 已提交
1718 1719
{
	struct page *page;
1720
	int nr_pages;
1721

1722
	flags |= cachep->allocflags;
1723 1724
	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
		flags |= __GFP_RECLAIMABLE;
1725

L
Linus Torvalds 已提交
1726
	page = alloc_pages_exact_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder);
1727 1728 1729
	if (!page) {
		if (!(flags & __GFP_NOWARN) && printk_ratelimit())
			slab_out_of_memory(cachep, flags, nodeid);
L
Linus Torvalds 已提交
1730
		return NULL;
1731
	}
L
Linus Torvalds 已提交
1732

1733
	/* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
1734 1735 1736
	if (unlikely(page->pfmemalloc))
		pfmemalloc_active = true;

1737
	nr_pages = (1 << cachep->gfporder);
L
Linus Torvalds 已提交
1738
	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1739 1740 1741 1742 1743
		add_zone_page_state(page_zone(page),
			NR_SLAB_RECLAIMABLE, nr_pages);
	else
		add_zone_page_state(page_zone(page),
			NR_SLAB_UNRECLAIMABLE, nr_pages);
1744 1745 1746
	__SetPageSlab(page);
	if (page->pfmemalloc)
		SetPageSlabPfmemalloc(page);
G
Glauber Costa 已提交
1747
	memcg_bind_pages(cachep, cachep->gfporder);
1748

1749 1750 1751 1752 1753 1754 1755 1756
	if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
		kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid);

		if (cachep->ctor)
			kmemcheck_mark_uninitialized_pages(page, nr_pages);
		else
			kmemcheck_mark_unallocated_pages(page, nr_pages);
	}
P
Pekka Enberg 已提交
1757

1758
	return page;
L
Linus Torvalds 已提交
1759 1760 1761 1762 1763
}

/*
 * Interface to system's page release.
 */
1764
static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
L
Linus Torvalds 已提交
1765
{
1766
	const unsigned long nr_freed = (1 << cachep->gfporder);
L
Linus Torvalds 已提交
1767

1768
	kmemcheck_free_shadow(page, cachep->gfporder);
P
Pekka Enberg 已提交
1769

1770 1771 1772 1773 1774 1775
	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
		sub_zone_page_state(page_zone(page),
				NR_SLAB_RECLAIMABLE, nr_freed);
	else
		sub_zone_page_state(page_zone(page),
				NR_SLAB_UNRECLAIMABLE, nr_freed);
J
Joonsoo Kim 已提交
1776

1777
	BUG_ON(!PageSlab(page));
J
Joonsoo Kim 已提交
1778
	__ClearPageSlabPfmemalloc(page);
1779
	__ClearPageSlab(page);
G
Glauber Costa 已提交
1780 1781

	memcg_release_pages(cachep, cachep->gfporder);
L
Linus Torvalds 已提交
1782 1783
	if (current->reclaim_state)
		current->reclaim_state->reclaimed_slab += nr_freed;
1784
	__free_memcg_kmem_pages(page, cachep->gfporder);
L
Linus Torvalds 已提交
1785 1786 1787 1788
}

static void kmem_rcu_free(struct rcu_head *head)
{
1789 1790
	struct kmem_cache *cachep;
	struct page *page;
L
Linus Torvalds 已提交
1791

1792 1793 1794 1795
	page = container_of(head, struct page, rcu_head);
	cachep = page->slab_cache;

	kmem_freepages(cachep, page);
L
Linus Torvalds 已提交
1796 1797 1798 1799 1800
}

#if DEBUG

#ifdef CONFIG_DEBUG_PAGEALLOC
1801
static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
P
Pekka Enberg 已提交
1802
			    unsigned long caller)
L
Linus Torvalds 已提交
1803
{
1804
	int size = cachep->object_size;
L
Linus Torvalds 已提交
1805

1806
	addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];
L
Linus Torvalds 已提交
1807

P
Pekka Enberg 已提交
1808
	if (size < 5 * sizeof(unsigned long))
L
Linus Torvalds 已提交
1809 1810
		return;

P
Pekka Enberg 已提交
1811 1812 1813 1814
	*addr++ = 0x12345678;
	*addr++ = caller;
	*addr++ = smp_processor_id();
	size -= 3 * sizeof(unsigned long);
L
Linus Torvalds 已提交
1815 1816 1817 1818 1819 1820 1821
	{
		unsigned long *sptr = &caller;
		unsigned long svalue;

		while (!kstack_end(sptr)) {
			svalue = *sptr++;
			if (kernel_text_address(svalue)) {
P
Pekka Enberg 已提交
1822
				*addr++ = svalue;
L
Linus Torvalds 已提交
1823 1824 1825 1826 1827 1828 1829
				size -= sizeof(unsigned long);
				if (size <= sizeof(unsigned long))
					break;
			}
		}

	}
P
Pekka Enberg 已提交
1830
	*addr++ = 0x87654321;
L
Linus Torvalds 已提交
1831 1832 1833
}
#endif

1834
static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
L
Linus Torvalds 已提交
1835
{
1836
	int size = cachep->object_size;
1837
	addr = &((char *)addr)[obj_offset(cachep)];
L
Linus Torvalds 已提交
1838 1839

	memset(addr, val, size);
P
Pekka Enberg 已提交
1840
	*(unsigned char *)(addr + size - 1) = POISON_END;
L
Linus Torvalds 已提交
1841 1842 1843 1844 1845
}

static void dump_line(char *data, int offset, int limit)
{
	int i;
D
Dave Jones 已提交
1846 1847 1848
	unsigned char error = 0;
	int bad_count = 0;

1849
	printk(KERN_ERR "%03x: ", offset);
D
Dave Jones 已提交
1850 1851 1852 1853 1854 1855
	for (i = 0; i < limit; i++) {
		if (data[offset + i] != POISON_FREE) {
			error = data[offset + i];
			bad_count++;
		}
	}
1856 1857
	print_hex_dump(KERN_CONT, "", 0, 16, 1,
			&data[offset], limit, 1);
D
Dave Jones 已提交
1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871

	if (bad_count == 1) {
		error ^= POISON_FREE;
		if (!(error & (error - 1))) {
			printk(KERN_ERR "Single bit error detected. Probably "
					"bad RAM.\n");
#ifdef CONFIG_X86
			printk(KERN_ERR "Run memtest86+ or a similar memory "
					"test tool.\n");
#else
			printk(KERN_ERR "Run a memory test tool.\n");
#endif
		}
	}
L
Linus Torvalds 已提交
1872 1873 1874 1875 1876
}
#endif

#if DEBUG

1877
static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
L
Linus Torvalds 已提交
1878 1879 1880 1881 1882
{
	int i, size;
	char *realobj;

	if (cachep->flags & SLAB_RED_ZONE) {
1883
		printk(KERN_ERR "Redzone: 0x%llx/0x%llx.\n",
A
Andrew Morton 已提交
1884 1885
			*dbg_redzone1(cachep, objp),
			*dbg_redzone2(cachep, objp));
L
Linus Torvalds 已提交
1886 1887 1888
	}

	if (cachep->flags & SLAB_STORE_USER) {
J
Joe Perches 已提交
1889 1890 1891
		printk(KERN_ERR "Last user: [<%p>](%pSR)\n",
		       *dbg_userword(cachep, objp),
		       *dbg_userword(cachep, objp));
L
Linus Torvalds 已提交
1892
	}
1893
	realobj = (char *)objp + obj_offset(cachep);
1894
	size = cachep->object_size;
P
Pekka Enberg 已提交
1895
	for (i = 0; i < size && lines; i += 16, lines--) {
L
Linus Torvalds 已提交
1896 1897
		int limit;
		limit = 16;
P
Pekka Enberg 已提交
1898 1899
		if (i + limit > size)
			limit = size - i;
L
Linus Torvalds 已提交
1900 1901 1902 1903
		dump_line(realobj, i, limit);
	}
}

1904
static void check_poison_obj(struct kmem_cache *cachep, void *objp)
L
Linus Torvalds 已提交
1905 1906 1907 1908 1909
{
	char *realobj;
	int size, i;
	int lines = 0;

1910
	realobj = (char *)objp + obj_offset(cachep);
1911
	size = cachep->object_size;
L
Linus Torvalds 已提交
1912

P
Pekka Enberg 已提交
1913
	for (i = 0; i < size; i++) {
L
Linus Torvalds 已提交
1914
		char exp = POISON_FREE;
P
Pekka Enberg 已提交
1915
		if (i == size - 1)
L
Linus Torvalds 已提交
1916 1917 1918 1919 1920 1921
			exp = POISON_END;
		if (realobj[i] != exp) {
			int limit;
			/* Mismatch ! */
			/* Print header */
			if (lines == 0) {
P
Pekka Enberg 已提交
1922
				printk(KERN_ERR
1923 1924
					"Slab corruption (%s): %s start=%p, len=%d\n",
					print_tainted(), cachep->name, realobj, size);
L
Linus Torvalds 已提交
1925 1926 1927
				print_objinfo(cachep, objp, 0);
			}
			/* Hexdump the affected line */
P
Pekka Enberg 已提交
1928
			i = (i / 16) * 16;
L
Linus Torvalds 已提交
1929
			limit = 16;
P
Pekka Enberg 已提交
1930 1931
			if (i + limit > size)
				limit = size - i;
L
Linus Torvalds 已提交
1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943
			dump_line(realobj, i, limit);
			i += 16;
			lines++;
			/* Limit to 5 lines */
			if (lines > 5)
				break;
		}
	}
	if (lines != 0) {
		/* Print some data about the neighboring objects, if they
		 * exist:
		 */
1944
		struct slab *slabp = virt_to_slab(objp);
1945
		unsigned int objnr;
L
Linus Torvalds 已提交
1946

1947
		objnr = obj_to_index(cachep, slabp, objp);
L
Linus Torvalds 已提交
1948
		if (objnr) {
1949
			objp = index_to_obj(cachep, slabp, objnr - 1);
1950
			realobj = (char *)objp + obj_offset(cachep);
L
Linus Torvalds 已提交
1951
			printk(KERN_ERR "Prev obj: start=%p, len=%d\n",
P
Pekka Enberg 已提交
1952
			       realobj, size);
L
Linus Torvalds 已提交
1953 1954
			print_objinfo(cachep, objp, 2);
		}
P
Pekka Enberg 已提交
1955
		if (objnr + 1 < cachep->num) {
1956
			objp = index_to_obj(cachep, slabp, objnr + 1);
1957
			realobj = (char *)objp + obj_offset(cachep);
L
Linus Torvalds 已提交
1958
			printk(KERN_ERR "Next obj: start=%p, len=%d\n",
P
Pekka Enberg 已提交
1959
			       realobj, size);
L
Linus Torvalds 已提交
1960 1961 1962 1963 1964 1965
			print_objinfo(cachep, objp, 2);
		}
	}
}
#endif

1966
#if DEBUG
R
Rabin Vincent 已提交
1967
static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slabp)
L
Linus Torvalds 已提交
1968 1969 1970
{
	int i;
	for (i = 0; i < cachep->num; i++) {
1971
		void *objp = index_to_obj(cachep, slabp, i);
L
Linus Torvalds 已提交
1972 1973 1974

		if (cachep->flags & SLAB_POISON) {
#ifdef CONFIG_DEBUG_PAGEALLOC
1975
			if (cachep->size % PAGE_SIZE == 0 &&
A
Andrew Morton 已提交
1976
					OFF_SLAB(cachep))
P
Pekka Enberg 已提交
1977
				kernel_map_pages(virt_to_page(objp),
1978
					cachep->size / PAGE_SIZE, 1);
L
Linus Torvalds 已提交
1979 1980 1981 1982 1983 1984 1985 1986 1987
			else
				check_poison_obj(cachep, objp);
#else
			check_poison_obj(cachep, objp);
#endif
		}
		if (cachep->flags & SLAB_RED_ZONE) {
			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
				slab_error(cachep, "start of a freed object "
P
Pekka Enberg 已提交
1988
					   "was overwritten");
L
Linus Torvalds 已提交
1989 1990
			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
				slab_error(cachep, "end of a freed object "
P
Pekka Enberg 已提交
1991
					   "was overwritten");
L
Linus Torvalds 已提交
1992 1993
		}
	}
1994
}
L
Linus Torvalds 已提交
1995
#else
R
Rabin Vincent 已提交
1996
static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slabp)
1997 1998
{
}
L
Linus Torvalds 已提交
1999 2000
#endif

2001 2002 2003 2004 2005
/**
 * slab_destroy - destroy and release all objects in a slab
 * @cachep: cache pointer being destroyed
 * @slabp: slab pointer being destroyed
 *
2006
 * Destroy all the objs in a slab, and release the mem back to the system.
A
Andrew Morton 已提交
2007 2008
 * Before calling the slab must have been unlinked from the cache.  The
 * cache-lock is not held/needed.
2009
 */
2010
static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
2011
{
2012
	struct page *page = virt_to_head_page(slabp->s_mem);
2013

R
Rabin Vincent 已提交
2014
	slab_destroy_debugcheck(cachep, slabp);
L
Linus Torvalds 已提交
2015
	if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
2016 2017 2018 2019 2020 2021 2022 2023 2024 2025
		struct rcu_head *head;

		/*
		 * RCU free overloads the RCU head over the LRU.
		 * slab_page has been overloeaded over the LRU,
		 * however it is not used from now on so that
		 * we can use it safely.
		 */
		head = (void *)&page->rcu_head;
		call_rcu(head, kmem_rcu_free);
L
Linus Torvalds 已提交
2026 2027

	} else {
2028
		kmem_freepages(cachep, page);
L
Linus Torvalds 已提交
2029
	}
2030 2031 2032 2033 2034 2035 2036

	/*
	 * From now on, we don't use slab management
	 * although actual page can be freed in rcu context
	 */
	if (OFF_SLAB(cachep))
		kmem_cache_free(cachep->slabp_cache, slabp);
L
Linus Torvalds 已提交
2037 2038
}

2039
/**
2040 2041 2042 2043 2044 2045 2046
 * calculate_slab_order - calculate size (page order) of slabs
 * @cachep: pointer to the cache that is being created
 * @size: size of objects to be created in this cache.
 * @align: required alignment for the objects.
 * @flags: slab allocation flags
 *
 * Also calculates the number of objects per slab.
2047 2048 2049 2050 2051
 *
 * This could be made much more intelligent.  For now, try to avoid using
 * high order pages for slabs.  When the gfp() functions are more friendly
 * towards high-order requests, this should be changed.
 */
A
Andrew Morton 已提交
2052
static size_t calculate_slab_order(struct kmem_cache *cachep,
R
Randy Dunlap 已提交
2053
			size_t size, size_t align, unsigned long flags)
2054
{
2055
	unsigned long offslab_limit;
2056
	size_t left_over = 0;
2057
	int gfporder;
2058

2059
	for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) {
2060 2061 2062
		unsigned int num;
		size_t remainder;

2063
		cache_estimate(gfporder, size, align, flags, &remainder, &num);
2064 2065
		if (!num)
			continue;
2066

2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078
		if (flags & CFLGS_OFF_SLAB) {
			/*
			 * Max number of objs-per-slab for caches which
			 * use off-slab slabs. Needed to avoid a possible
			 * looping condition in cache_grow().
			 */
			offslab_limit = size - sizeof(struct slab);
			offslab_limit /= sizeof(kmem_bufctl_t);

 			if (num > offslab_limit)
				break;
		}
2079

2080
		/* Found something acceptable - save it away */
2081
		cachep->num = num;
2082
		cachep->gfporder = gfporder;
2083 2084
		left_over = remainder;

2085 2086 2087 2088 2089 2090 2091 2092
		/*
		 * A VFS-reclaimable slab tends to have most allocations
		 * as GFP_NOFS and we really don't want to have to be allocating
		 * higher-order pages when we are unable to shrink dcache.
		 */
		if (flags & SLAB_RECLAIM_ACCOUNT)
			break;

2093 2094 2095 2096
		/*
		 * Large number of objects is good, but very large slabs are
		 * currently bad for the gfp()s.
		 */
2097
		if (gfporder >= slab_max_order)
2098 2099
			break;

2100 2101 2102
		/*
		 * Acceptable internal fragmentation?
		 */
A
Andrew Morton 已提交
2103
		if (left_over * 8 <= (PAGE_SIZE << gfporder))
2104 2105 2106 2107 2108
			break;
	}
	return left_over;
}

2109
static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
2110
{
2111
	if (slab_state >= FULL)
2112
		return enable_cpucache(cachep, gfp);
2113

2114
	if (slab_state == DOWN) {
2115
		/*
2116
		 * Note: Creation of first cache (kmem_cache).
2117
		 * The setup_node is taken care
2118 2119 2120 2121 2122 2123 2124
		 * of by the caller of __kmem_cache_create
		 */
		cachep->array[smp_processor_id()] = &initarray_generic.cache;
		slab_state = PARTIAL;
	} else if (slab_state == PARTIAL) {
		/*
		 * Note: the second kmem_cache_create must create the cache
2125 2126 2127 2128 2129 2130
		 * that's used by kmalloc(24), otherwise the creation of
		 * further caches will BUG().
		 */
		cachep->array[smp_processor_id()] = &initarray_generic.cache;

		/*
2131 2132
		 * If the cache that's used by kmalloc(sizeof(kmem_cache_node)) is
		 * the second cache, then we need to set up all its node/,
2133 2134
		 * otherwise the creation of further caches will BUG().
		 */
2135 2136 2137
		set_up_node(cachep, SIZE_AC);
		if (INDEX_AC == INDEX_NODE)
			slab_state = PARTIAL_NODE;
2138
		else
2139
			slab_state = PARTIAL_ARRAYCACHE;
2140
	} else {
2141
		/* Remaining boot caches */
2142
		cachep->array[smp_processor_id()] =
2143
			kmalloc(sizeof(struct arraycache_init), gfp);
2144

2145
		if (slab_state == PARTIAL_ARRAYCACHE) {
2146 2147
			set_up_node(cachep, SIZE_NODE);
			slab_state = PARTIAL_NODE;
2148 2149
		} else {
			int node;
2150
			for_each_online_node(node) {
2151
				cachep->node[node] =
2152
				    kmalloc_node(sizeof(struct kmem_cache_node),
2153
						gfp, node);
2154
				BUG_ON(!cachep->node[node]);
2155
				kmem_cache_node_init(cachep->node[node]);
2156 2157 2158
			}
		}
	}
2159
	cachep->node[numa_mem_id()]->next_reap =
2160 2161 2162 2163 2164 2165 2166 2167 2168
			jiffies + REAPTIMEOUT_LIST3 +
			((unsigned long)cachep) % REAPTIMEOUT_LIST3;

	cpu_cache_get(cachep)->avail = 0;
	cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
	cpu_cache_get(cachep)->batchcount = 1;
	cpu_cache_get(cachep)->touched = 0;
	cachep->batchcount = 1;
	cachep->limit = BOOT_CPUCACHE_ENTRIES;
2169
	return 0;
2170 2171
}

L
Linus Torvalds 已提交
2172
/**
2173
 * __kmem_cache_create - Create a cache.
R
Randy Dunlap 已提交
2174
 * @cachep: cache management descriptor
L
Linus Torvalds 已提交
2175 2176 2177 2178
 * @flags: SLAB flags
 *
 * Returns a ptr to the cache on success, NULL on failure.
 * Cannot be called within a int, but can be interrupted.
2179
 * The @ctor is run when new pages are allocated by the cache.
L
Linus Torvalds 已提交
2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192
 *
 * The flags are
 *
 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
 * to catch references to uninitialised memory.
 *
 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
 * for buffer overruns.
 *
 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
 * cacheline.  This can be beneficial if you're counting cycles as closely
 * as davem.
 */
2193
int
2194
__kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
L
Linus Torvalds 已提交
2195 2196
{
	size_t left_over, slab_size, ralign;
2197
	gfp_t gfp;
2198
	int err;
2199
	size_t size = cachep->size;
L
Linus Torvalds 已提交
2200 2201 2202 2203 2204 2205 2206 2207 2208

#if DEBUG
#if FORCED_DEBUG
	/*
	 * Enable redzoning and last user accounting, except for caches with
	 * large objects, if the increased size would increase the object size
	 * above the next power of two: caches with object sizes just above a
	 * power of two have a significant amount of internal fragmentation.
	 */
D
David Woodhouse 已提交
2209 2210
	if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN +
						2 * sizeof(unsigned long long)))
P
Pekka Enberg 已提交
2211
		flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
L
Linus Torvalds 已提交
2212 2213 2214 2215 2216 2217 2218
	if (!(flags & SLAB_DESTROY_BY_RCU))
		flags |= SLAB_POISON;
#endif
	if (flags & SLAB_DESTROY_BY_RCU)
		BUG_ON(flags & SLAB_POISON);
#endif

A
Andrew Morton 已提交
2219 2220
	/*
	 * Check that size is in terms of words.  This is needed to avoid
L
Linus Torvalds 已提交
2221 2222 2223
	 * unaligned accesses for some archs when redzoning is used, and makes
	 * sure any on-slab bufctl's are also correctly aligned.
	 */
P
Pekka Enberg 已提交
2224 2225 2226
	if (size & (BYTES_PER_WORD - 1)) {
		size += (BYTES_PER_WORD - 1);
		size &= ~(BYTES_PER_WORD - 1);
L
Linus Torvalds 已提交
2227 2228
	}

2229
	/*
D
David Woodhouse 已提交
2230 2231 2232
	 * Redzoning and user store require word alignment or possibly larger.
	 * Note this will be overridden by architecture or caller mandated
	 * alignment if either is greater than BYTES_PER_WORD.
2233
	 */
D
David Woodhouse 已提交
2234 2235 2236 2237 2238 2239 2240 2241 2242 2243
	if (flags & SLAB_STORE_USER)
		ralign = BYTES_PER_WORD;

	if (flags & SLAB_RED_ZONE) {
		ralign = REDZONE_ALIGN;
		/* If redzoning, ensure that the second redzone is suitably
		 * aligned, by adjusting the object size accordingly. */
		size += REDZONE_ALIGN - 1;
		size &= ~(REDZONE_ALIGN - 1);
	}
2244

2245
	/* 3) caller mandated alignment */
2246 2247
	if (ralign < cachep->align) {
		ralign = cachep->align;
L
Linus Torvalds 已提交
2248
	}
2249 2250
	/* disable debug if necessary */
	if (ralign > __alignof__(unsigned long long))
2251
		flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
A
Andrew Morton 已提交
2252
	/*
2253
	 * 4) Store it.
L
Linus Torvalds 已提交
2254
	 */
2255
	cachep->align = ralign;
L
Linus Torvalds 已提交
2256

2257 2258 2259 2260 2261
	if (slab_is_available())
		gfp = GFP_KERNEL;
	else
		gfp = GFP_NOWAIT;

2262
	setup_node_pointer(cachep);
L
Linus Torvalds 已提交
2263 2264
#if DEBUG

2265 2266 2267 2268
	/*
	 * Both debugging options require word-alignment which is calculated
	 * into align above.
	 */
L
Linus Torvalds 已提交
2269 2270
	if (flags & SLAB_RED_ZONE) {
		/* add space for red zone words */
2271 2272
		cachep->obj_offset += sizeof(unsigned long long);
		size += 2 * sizeof(unsigned long long);
L
Linus Torvalds 已提交
2273 2274
	}
	if (flags & SLAB_STORE_USER) {
2275
		/* user store requires one word storage behind the end of
D
David Woodhouse 已提交
2276 2277
		 * the real object. But if the second red zone needs to be
		 * aligned to 64 bits, we must allow that much space.
L
Linus Torvalds 已提交
2278
		 */
D
David Woodhouse 已提交
2279 2280 2281 2282
		if (flags & SLAB_RED_ZONE)
			size += REDZONE_ALIGN;
		else
			size += BYTES_PER_WORD;
L
Linus Torvalds 已提交
2283 2284
	}
#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
2285
	if (size >= kmalloc_size(INDEX_NODE + 1)
2286 2287 2288
	    && cachep->object_size > cache_line_size()
	    && ALIGN(size, cachep->align) < PAGE_SIZE) {
		cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align);
L
Linus Torvalds 已提交
2289 2290 2291 2292 2293
		size = PAGE_SIZE;
	}
#endif
#endif

2294 2295 2296
	/*
	 * Determine if the slab management is 'on' or 'off' slab.
	 * (bootstrapping cannot cope with offslab caches so don't do
2297 2298
	 * it too early on. Always use on-slab management when
	 * SLAB_NOLEAKTRACE to avoid recursive calls into kmemleak)
2299
	 */
2300 2301
	if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init &&
	    !(flags & SLAB_NOLEAKTRACE))
L
Linus Torvalds 已提交
2302 2303 2304 2305 2306 2307
		/*
		 * Size is large, assume best to place the slab management obj
		 * off-slab (should allow better packing of objs).
		 */
		flags |= CFLGS_OFF_SLAB;

2308
	size = ALIGN(size, cachep->align);
L
Linus Torvalds 已提交
2309

2310
	left_over = calculate_slab_order(cachep, size, cachep->align, flags);
L
Linus Torvalds 已提交
2311

2312
	if (!cachep->num)
2313
		return -E2BIG;
L
Linus Torvalds 已提交
2314

P
Pekka Enberg 已提交
2315
	slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)
2316
			  + sizeof(struct slab), cachep->align);
L
Linus Torvalds 已提交
2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328

	/*
	 * If the slab has been placed off-slab, and we have enough space then
	 * move it on-slab. This is at the expense of any extra colouring.
	 */
	if (flags & CFLGS_OFF_SLAB && left_over >= slab_size) {
		flags &= ~CFLGS_OFF_SLAB;
		left_over -= slab_size;
	}

	if (flags & CFLGS_OFF_SLAB) {
		/* really off slab. No need for manual alignment */
P
Pekka Enberg 已提交
2329 2330
		slab_size =
		    cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab);
2331 2332 2333 2334 2335 2336 2337 2338 2339

#ifdef CONFIG_PAGE_POISONING
		/* If we're going to use the generic kernel_map_pages()
		 * poisoning, then it's going to smash the contents of
		 * the redzone and userword anyhow, so switch them off.
		 */
		if (size % PAGE_SIZE == 0 && flags & SLAB_POISON)
			flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
#endif
L
Linus Torvalds 已提交
2340 2341 2342 2343
	}

	cachep->colour_off = cache_line_size();
	/* Offset must be a multiple of the alignment. */
2344 2345
	if (cachep->colour_off < cachep->align)
		cachep->colour_off = cachep->align;
P
Pekka Enberg 已提交
2346
	cachep->colour = left_over / cachep->colour_off;
L
Linus Torvalds 已提交
2347 2348
	cachep->slab_size = slab_size;
	cachep->flags = flags;
2349
	cachep->allocflags = __GFP_COMP;
2350
	if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA))
2351
		cachep->allocflags |= GFP_DMA;
2352
	cachep->size = size;
2353
	cachep->reciprocal_buffer_size = reciprocal_value(size);
L
Linus Torvalds 已提交
2354

2355
	if (flags & CFLGS_OFF_SLAB) {
2356
		cachep->slabp_cache = kmalloc_slab(slab_size, 0u);
2357 2358 2359 2360 2361 2362 2363
		/*
		 * This is a possibility for one of the malloc_sizes caches.
		 * But since we go off slab only for object size greater than
		 * PAGE_SIZE/8, and malloc_sizes gets created in ascending order,
		 * this should not happen at all.
		 * But leave a BUG_ON for some lucky dude.
		 */
2364
		BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache));
2365
	}
L
Linus Torvalds 已提交
2366

2367 2368
	err = setup_cpu_cache(cachep, gfp);
	if (err) {
2369
		__kmem_cache_shutdown(cachep);
2370
		return err;
2371
	}
L
Linus Torvalds 已提交
2372

2373 2374 2375 2376 2377 2378 2379 2380
	if (flags & SLAB_DEBUG_OBJECTS) {
		/*
		 * Would deadlock through slab_destroy()->call_rcu()->
		 * debug_object_activate()->kmem_cache_alloc().
		 */
		WARN_ON_ONCE(flags & SLAB_DESTROY_BY_RCU);

		slab_set_debugobj_lock_classes(cachep);
2381 2382
	} else if (!OFF_SLAB(cachep) && !(flags & SLAB_DESTROY_BY_RCU))
		on_slab_lock_classes(cachep);
2383

2384
	return 0;
L
Linus Torvalds 已提交
2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397
}

#if DEBUG
static void check_irq_off(void)
{
	BUG_ON(!irqs_disabled());
}

static void check_irq_on(void)
{
	BUG_ON(irqs_disabled());
}

2398
static void check_spinlock_acquired(struct kmem_cache *cachep)
L
Linus Torvalds 已提交
2399 2400 2401
{
#ifdef CONFIG_SMP
	check_irq_off();
2402
	assert_spin_locked(&cachep->node[numa_mem_id()]->list_lock);
L
Linus Torvalds 已提交
2403 2404
#endif
}
2405

2406
static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
2407 2408 2409
{
#ifdef CONFIG_SMP
	check_irq_off();
2410
	assert_spin_locked(&cachep->node[node]->list_lock);
2411 2412 2413
#endif
}

L
Linus Torvalds 已提交
2414 2415 2416 2417
#else
#define check_irq_off()	do { } while(0)
#define check_irq_on()	do { } while(0)
#define check_spinlock_acquired(x) do { } while(0)
2418
#define check_spinlock_acquired_node(x, y) do { } while(0)
L
Linus Torvalds 已提交
2419 2420
#endif

2421
static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
2422 2423 2424
			struct array_cache *ac,
			int force, int node);

L
Linus Torvalds 已提交
2425 2426
static void do_drain(void *arg)
{
A
Andrew Morton 已提交
2427
	struct kmem_cache *cachep = arg;
L
Linus Torvalds 已提交
2428
	struct array_cache *ac;
2429
	int node = numa_mem_id();
L
Linus Torvalds 已提交
2430 2431

	check_irq_off();
2432
	ac = cpu_cache_get(cachep);
2433
	spin_lock(&cachep->node[node]->list_lock);
2434
	free_block(cachep, ac->entry, ac->avail, node);
2435
	spin_unlock(&cachep->node[node]->list_lock);
L
Linus Torvalds 已提交
2436 2437 2438
	ac->avail = 0;
}

2439
static void drain_cpu_caches(struct kmem_cache *cachep)
L
Linus Torvalds 已提交
2440
{
2441
	struct kmem_cache_node *n;
2442 2443
	int node;

2444
	on_each_cpu(do_drain, cachep, 1);
L
Linus Torvalds 已提交
2445
	check_irq_on();
P
Pekka Enberg 已提交
2446
	for_each_online_node(node) {
2447 2448 2449
		n = cachep->node[node];
		if (n && n->alien)
			drain_alien_cache(cachep, n->alien);
2450 2451 2452
	}

	for_each_online_node(node) {
2453 2454 2455
		n = cachep->node[node];
		if (n)
			drain_array(cachep, n, n->shared, 1, node);
2456
	}
L
Linus Torvalds 已提交
2457 2458
}

2459 2460 2461 2462 2463 2464 2465
/*
 * Remove slabs from the list of free slabs.
 * Specify the number of slabs to drain in tofree.
 *
 * Returns the actual number of slabs released.
 */
static int drain_freelist(struct kmem_cache *cache,
2466
			struct kmem_cache_node *n, int tofree)
L
Linus Torvalds 已提交
2467
{
2468 2469
	struct list_head *p;
	int nr_freed;
L
Linus Torvalds 已提交
2470 2471
	struct slab *slabp;

2472
	nr_freed = 0;
2473
	while (nr_freed < tofree && !list_empty(&n->slabs_free)) {
L
Linus Torvalds 已提交
2474

2475 2476 2477 2478
		spin_lock_irq(&n->list_lock);
		p = n->slabs_free.prev;
		if (p == &n->slabs_free) {
			spin_unlock_irq(&n->list_lock);
2479 2480
			goto out;
		}
L
Linus Torvalds 已提交
2481

2482
		slabp = list_entry(p, struct slab, list);
L
Linus Torvalds 已提交
2483
#if DEBUG
2484
		BUG_ON(slabp->inuse);
L
Linus Torvalds 已提交
2485 2486
#endif
		list_del(&slabp->list);
2487 2488 2489 2490
		/*
		 * Safe to drop the lock. The slab is no longer linked
		 * to the cache.
		 */
2491 2492
		n->free_objects -= cache->num;
		spin_unlock_irq(&n->list_lock);
2493 2494
		slab_destroy(cache, slabp);
		nr_freed++;
L
Linus Torvalds 已提交
2495
	}
2496 2497
out:
	return nr_freed;
L
Linus Torvalds 已提交
2498 2499
}

2500
/* Called with slab_mutex held to protect against cpu hotplug */
2501
static int __cache_shrink(struct kmem_cache *cachep)
2502 2503
{
	int ret = 0, i = 0;
2504
	struct kmem_cache_node *n;
2505 2506 2507 2508 2509

	drain_cpu_caches(cachep);

	check_irq_on();
	for_each_online_node(i) {
2510 2511
		n = cachep->node[i];
		if (!n)
2512 2513
			continue;

2514
		drain_freelist(cachep, n, slabs_tofree(cachep, n));
2515

2516 2517
		ret += !list_empty(&n->slabs_full) ||
			!list_empty(&n->slabs_partial);
2518 2519 2520 2521
	}
	return (ret ? 1 : 0);
}

L
Linus Torvalds 已提交
2522 2523 2524 2525 2526 2527 2528
/**
 * kmem_cache_shrink - Shrink a cache.
 * @cachep: The cache to shrink.
 *
 * Releases as many slabs as possible for a cache.
 * To help debugging, a zero exit status indicates all slabs were released.
 */
2529
int kmem_cache_shrink(struct kmem_cache *cachep)
L
Linus Torvalds 已提交
2530
{
2531
	int ret;
2532
	BUG_ON(!cachep || in_interrupt());
L
Linus Torvalds 已提交
2533

2534
	get_online_cpus();
2535
	mutex_lock(&slab_mutex);
2536
	ret = __cache_shrink(cachep);
2537
	mutex_unlock(&slab_mutex);
2538
	put_online_cpus();
2539
	return ret;
L
Linus Torvalds 已提交
2540 2541 2542
}
EXPORT_SYMBOL(kmem_cache_shrink);

2543
int __kmem_cache_shutdown(struct kmem_cache *cachep)
L
Linus Torvalds 已提交
2544
{
2545
	int i;
2546
	struct kmem_cache_node *n;
2547
	int rc = __cache_shrink(cachep);
L
Linus Torvalds 已提交
2548

2549 2550
	if (rc)
		return rc;
L
Linus Torvalds 已提交
2551

2552 2553
	for_each_online_cpu(i)
	    kfree(cachep->array[i]);
L
Linus Torvalds 已提交
2554

2555
	/* NUMA: free the node structures */
2556
	for_each_online_node(i) {
2557 2558 2559 2560 2561
		n = cachep->node[i];
		if (n) {
			kfree(n->shared);
			free_alien_cache(n->alien);
			kfree(n);
2562 2563 2564
		}
	}
	return 0;
L
Linus Torvalds 已提交
2565 2566
}

2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577
/*
 * Get the memory for a slab management obj.
 * For a slab cache when the slab descriptor is off-slab, slab descriptors
 * always come from malloc_sizes caches.  The slab descriptor cannot
 * come from the same cache which is getting created because,
 * when we are searching for an appropriate cache for these
 * descriptors in kmem_cache_create, we search through the malloc_sizes array.
 * If we are creating a malloc_sizes cache here it would not be visible to
 * kmem_find_general_cachep till the initialization is complete.
 * Hence we cannot have slabp_cache same as the original cache.
 */
2578 2579 2580
static struct slab *alloc_slabmgmt(struct kmem_cache *cachep,
				   struct page *page, int colour_off,
				   gfp_t local_flags, int nodeid)
L
Linus Torvalds 已提交
2581 2582
{
	struct slab *slabp;
2583
	void *addr = page_address(page);
P
Pekka Enberg 已提交
2584

L
Linus Torvalds 已提交
2585 2586
	if (OFF_SLAB(cachep)) {
		/* Slab management obj is off-slab. */
2587
		slabp = kmem_cache_alloc_node(cachep->slabp_cache,
2588
					      local_flags, nodeid);
2589 2590 2591 2592 2593 2594
		/*
		 * If the first object in the slab is leaked (it's allocated
		 * but no one has a reference to it), we want to make sure
		 * kmemleak does not treat the ->s_mem pointer as a reference
		 * to the object. Otherwise we will not report the leak.
		 */
2595 2596
		kmemleak_scan_area(&slabp->list, sizeof(struct list_head),
				   local_flags);
L
Linus Torvalds 已提交
2597 2598 2599
		if (!slabp)
			return NULL;
	} else {
2600
		slabp = addr + colour_off;
L
Linus Torvalds 已提交
2601 2602 2603
		colour_off += cachep->slab_size;
	}
	slabp->inuse = 0;
2604
	slabp->s_mem = addr + colour_off;
2605
	slabp->free = 0;
L
Linus Torvalds 已提交
2606 2607 2608 2609 2610
	return slabp;
}

static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp)
{
P
Pekka Enberg 已提交
2611
	return (kmem_bufctl_t *) (slabp + 1);
L
Linus Torvalds 已提交
2612 2613
}

2614
static void cache_init_objs(struct kmem_cache *cachep,
C
Christoph Lameter 已提交
2615
			    struct slab *slabp)
L
Linus Torvalds 已提交
2616 2617 2618 2619
{
	int i;

	for (i = 0; i < cachep->num; i++) {
2620
		void *objp = index_to_obj(cachep, slabp, i);
L
Linus Torvalds 已提交
2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632
#if DEBUG
		/* need to poison the objs? */
		if (cachep->flags & SLAB_POISON)
			poison_obj(cachep, objp, POISON_FREE);
		if (cachep->flags & SLAB_STORE_USER)
			*dbg_userword(cachep, objp) = NULL;

		if (cachep->flags & SLAB_RED_ZONE) {
			*dbg_redzone1(cachep, objp) = RED_INACTIVE;
			*dbg_redzone2(cachep, objp) = RED_INACTIVE;
		}
		/*
A
Andrew Morton 已提交
2633 2634 2635
		 * Constructors are not allowed to allocate memory from the same
		 * cache which they are a constructor for.  Otherwise, deadlock.
		 * They must also be threaded.
L
Linus Torvalds 已提交
2636 2637
		 */
		if (cachep->ctor && !(cachep->flags & SLAB_POISON))
2638
			cachep->ctor(objp + obj_offset(cachep));
L
Linus Torvalds 已提交
2639 2640 2641 2642

		if (cachep->flags & SLAB_RED_ZONE) {
			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
				slab_error(cachep, "constructor overwrote the"
P
Pekka Enberg 已提交
2643
					   " end of an object");
L
Linus Torvalds 已提交
2644 2645
			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
				slab_error(cachep, "constructor overwrote the"
P
Pekka Enberg 已提交
2646
					   " start of an object");
L
Linus Torvalds 已提交
2647
		}
2648
		if ((cachep->size % PAGE_SIZE) == 0 &&
A
Andrew Morton 已提交
2649
			    OFF_SLAB(cachep) && cachep->flags & SLAB_POISON)
P
Pekka Enberg 已提交
2650
			kernel_map_pages(virt_to_page(objp),
2651
					 cachep->size / PAGE_SIZE, 0);
L
Linus Torvalds 已提交
2652 2653
#else
		if (cachep->ctor)
2654
			cachep->ctor(objp);
L
Linus Torvalds 已提交
2655
#endif
P
Pekka Enberg 已提交
2656
		slab_bufctl(slabp)[i] = i + 1;
L
Linus Torvalds 已提交
2657
	}
P
Pekka Enberg 已提交
2658
	slab_bufctl(slabp)[i - 1] = BUFCTL_END;
L
Linus Torvalds 已提交
2659 2660
}

2661
static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
L
Linus Torvalds 已提交
2662
{
2663 2664
	if (CONFIG_ZONE_DMA_FLAG) {
		if (flags & GFP_DMA)
2665
			BUG_ON(!(cachep->allocflags & GFP_DMA));
2666
		else
2667
			BUG_ON(cachep->allocflags & GFP_DMA);
2668
	}
L
Linus Torvalds 已提交
2669 2670
}

A
Andrew Morton 已提交
2671 2672
static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp,
				int nodeid)
2673
{
2674
	void *objp = index_to_obj(cachep, slabp, slabp->free);
2675 2676 2677 2678 2679 2680
	kmem_bufctl_t next;

	slabp->inuse++;
	next = slab_bufctl(slabp)[slabp->free];
#if DEBUG
	slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
J
Joonsoo Kim 已提交
2681
	WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid);
2682 2683 2684 2685 2686 2687
#endif
	slabp->free = next;

	return objp;
}

A
Andrew Morton 已提交
2688 2689
static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp,
				void *objp, int nodeid)
2690
{
2691
	unsigned int objnr = obj_to_index(cachep, slabp, objp);
2692 2693 2694

#if DEBUG
	/* Verify that the slab belongs to the intended node */
J
Joonsoo Kim 已提交
2695
	WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid);
2696

2697
	if (slab_bufctl(slabp)[objnr] + 1 <= SLAB_LIMIT + 1) {
2698
		printk(KERN_ERR "slab: double free detected in cache "
A
Andrew Morton 已提交
2699
				"'%s', objp %p\n", cachep->name, objp);
2700 2701 2702 2703 2704 2705 2706 2707
		BUG();
	}
#endif
	slab_bufctl(slabp)[objnr] = slabp->free;
	slabp->free = objnr;
	slabp->inuse--;
}

2708 2709 2710
/*
 * Map pages beginning at addr to the given cache and slab. This is required
 * for the slab allocator to be able to lookup the cache and slab of a
2711
 * virtual address for kfree, ksize, and slab debugging.
2712 2713
 */
static void slab_map_pages(struct kmem_cache *cache, struct slab *slab,
2714
			   struct page *page)
L
Linus Torvalds 已提交
2715
{
2716 2717
	page->slab_cache = cache;
	page->slab_page = slab;
L
Linus Torvalds 已提交
2718 2719 2720 2721 2722 2723
}

/*
 * Grow (by 1) the number of slabs within a cache.  This is called by
 * kmem_cache_alloc() when there are no active objs left in a cache.
 */
2724
static int cache_grow(struct kmem_cache *cachep,
2725
		gfp_t flags, int nodeid, struct page *page)
L
Linus Torvalds 已提交
2726
{
P
Pekka Enberg 已提交
2727 2728 2729
	struct slab *slabp;
	size_t offset;
	gfp_t local_flags;
2730
	struct kmem_cache_node *n;
L
Linus Torvalds 已提交
2731

A
Andrew Morton 已提交
2732 2733 2734
	/*
	 * Be lazy and only check for valid flags here,  keeping it out of the
	 * critical path in kmem_cache_alloc().
L
Linus Torvalds 已提交
2735
	 */
C
Christoph Lameter 已提交
2736 2737
	BUG_ON(flags & GFP_SLAB_BUG_MASK);
	local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
L
Linus Torvalds 已提交
2738

2739
	/* Take the node list lock to change the colour_next on this node */
L
Linus Torvalds 已提交
2740
	check_irq_off();
2741 2742
	n = cachep->node[nodeid];
	spin_lock(&n->list_lock);
L
Linus Torvalds 已提交
2743 2744

	/* Get colour for the slab, and cal the next value. */
2745 2746 2747 2748 2749
	offset = n->colour_next;
	n->colour_next++;
	if (n->colour_next >= cachep->colour)
		n->colour_next = 0;
	spin_unlock(&n->list_lock);
L
Linus Torvalds 已提交
2750

2751
	offset *= cachep->colour_off;
L
Linus Torvalds 已提交
2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763

	if (local_flags & __GFP_WAIT)
		local_irq_enable();

	/*
	 * The test for missing atomic flag is performed here, rather than
	 * the more obvious place, simply to reduce the critical path length
	 * in kmem_cache_alloc(). If a caller is seriously mis-behaving they
	 * will eventually be caught here (where it matters).
	 */
	kmem_flagcheck(cachep, flags);

A
Andrew Morton 已提交
2764 2765 2766
	/*
	 * Get mem for the objs.  Attempt to allocate a physical page from
	 * 'nodeid'.
2767
	 */
2768 2769 2770
	if (!page)
		page = kmem_getpages(cachep, local_flags, nodeid);
	if (!page)
L
Linus Torvalds 已提交
2771 2772 2773
		goto failed;

	/* Get slab management. */
2774
	slabp = alloc_slabmgmt(cachep, page, offset,
C
Christoph Lameter 已提交
2775
			local_flags & ~GFP_CONSTRAINT_MASK, nodeid);
A
Andrew Morton 已提交
2776
	if (!slabp)
L
Linus Torvalds 已提交
2777 2778
		goto opps1;

2779
	slab_map_pages(cachep, slabp, page);
L
Linus Torvalds 已提交
2780

C
Christoph Lameter 已提交
2781
	cache_init_objs(cachep, slabp);
L
Linus Torvalds 已提交
2782 2783 2784 2785

	if (local_flags & __GFP_WAIT)
		local_irq_disable();
	check_irq_off();
2786
	spin_lock(&n->list_lock);
L
Linus Torvalds 已提交
2787 2788

	/* Make slab active. */
2789
	list_add_tail(&slabp->list, &(n->slabs_free));
L
Linus Torvalds 已提交
2790
	STATS_INC_GROWN(cachep);
2791 2792
	n->free_objects += cachep->num;
	spin_unlock(&n->list_lock);
L
Linus Torvalds 已提交
2793
	return 1;
A
Andrew Morton 已提交
2794
opps1:
2795
	kmem_freepages(cachep, page);
A
Andrew Morton 已提交
2796
failed:
L
Linus Torvalds 已提交
2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812
	if (local_flags & __GFP_WAIT)
		local_irq_disable();
	return 0;
}

#if DEBUG

/*
 * Perform extra freeing checks:
 * - detect bad pointers.
 * - POISON/RED_ZONE checking
 */
static void kfree_debugcheck(const void *objp)
{
	if (!virt_addr_valid(objp)) {
		printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n",
P
Pekka Enberg 已提交
2813 2814
		       (unsigned long)objp);
		BUG();
L
Linus Torvalds 已提交
2815 2816 2817
	}
}

2818 2819
static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
{
2820
	unsigned long long redzone1, redzone2;
2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835

	redzone1 = *dbg_redzone1(cache, obj);
	redzone2 = *dbg_redzone2(cache, obj);

	/*
	 * Redzone is ok.
	 */
	if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE)
		return;

	if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE)
		slab_error(cache, "double free detected");
	else
		slab_error(cache, "memory outside object was overwritten");

2836
	printk(KERN_ERR "%p: redzone 1:0x%llx, redzone 2:0x%llx.\n",
2837 2838 2839
			obj, redzone1, redzone2);
}

2840
static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
2841
				   unsigned long caller)
L
Linus Torvalds 已提交
2842 2843 2844 2845
{
	unsigned int objnr;
	struct slab *slabp;

2846 2847
	BUG_ON(virt_to_cache(objp) != cachep);

2848
	objp -= obj_offset(cachep);
L
Linus Torvalds 已提交
2849
	kfree_debugcheck(objp);
2850
	slabp = virt_to_slab(objp);
L
Linus Torvalds 已提交
2851 2852

	if (cachep->flags & SLAB_RED_ZONE) {
2853
		verify_redzone_free(cachep, objp);
L
Linus Torvalds 已提交
2854 2855 2856 2857
		*dbg_redzone1(cachep, objp) = RED_INACTIVE;
		*dbg_redzone2(cachep, objp) = RED_INACTIVE;
	}
	if (cachep->flags & SLAB_STORE_USER)
2858
		*dbg_userword(cachep, objp) = (void *)caller;
L
Linus Torvalds 已提交
2859

2860
	objnr = obj_to_index(cachep, slabp, objp);
L
Linus Torvalds 已提交
2861 2862

	BUG_ON(objnr >= cachep->num);
2863
	BUG_ON(objp != index_to_obj(cachep, slabp, objnr));
L
Linus Torvalds 已提交
2864

2865 2866 2867
#ifdef CONFIG_DEBUG_SLAB_LEAK
	slab_bufctl(slabp)[objnr] = BUFCTL_FREE;
#endif
L
Linus Torvalds 已提交
2868 2869
	if (cachep->flags & SLAB_POISON) {
#ifdef CONFIG_DEBUG_PAGEALLOC
2870
		if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
2871
			store_stackinfo(cachep, objp, caller);
P
Pekka Enberg 已提交
2872
			kernel_map_pages(virt_to_page(objp),
2873
					 cachep->size / PAGE_SIZE, 0);
L
Linus Torvalds 已提交
2874 2875 2876 2877 2878 2879 2880 2881 2882 2883
		} else {
			poison_obj(cachep, objp, POISON_FREE);
		}
#else
		poison_obj(cachep, objp, POISON_FREE);
#endif
	}
	return objp;
}

2884
static void check_slabp(struct kmem_cache *cachep, struct slab *slabp)
L
Linus Torvalds 已提交
2885 2886 2887
{
	kmem_bufctl_t i;
	int entries = 0;
P
Pekka Enberg 已提交
2888

L
Linus Torvalds 已提交
2889 2890 2891 2892 2893 2894 2895
	/* Check slab's freelist to see if this obj is there. */
	for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) {
		entries++;
		if (entries > cachep->num || i >= cachep->num)
			goto bad;
	}
	if (entries != cachep->num - slabp->inuse) {
A
Andrew Morton 已提交
2896 2897
bad:
		printk(KERN_ERR "slab: Internal list corruption detected in "
2898 2899 2900
			"cache '%s'(%d), slabp %p(%d). Tainted(%s). Hexdump:\n",
			cachep->name, cachep->num, slabp, slabp->inuse,
			print_tainted());
2901 2902 2903
		print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, slabp,
			sizeof(*slabp) + cachep->num * sizeof(kmem_bufctl_t),
			1);
L
Linus Torvalds 已提交
2904 2905 2906 2907 2908 2909 2910 2911 2912
		BUG();
	}
}
#else
#define kfree_debugcheck(x) do { } while(0)
#define cache_free_debugcheck(x,objp,z) (objp)
#define check_slabp(x,y) do { } while(0)
#endif

2913 2914
static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags,
							bool force_refill)
L
Linus Torvalds 已提交
2915 2916
{
	int batchcount;
2917
	struct kmem_cache_node *n;
L
Linus Torvalds 已提交
2918
	struct array_cache *ac;
P
Pekka Enberg 已提交
2919 2920
	int node;

L
Linus Torvalds 已提交
2921
	check_irq_off();
2922
	node = numa_mem_id();
2923 2924 2925
	if (unlikely(force_refill))
		goto force_grow;
retry:
2926
	ac = cpu_cache_get(cachep);
L
Linus Torvalds 已提交
2927 2928
	batchcount = ac->batchcount;
	if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
A
Andrew Morton 已提交
2929 2930 2931 2932
		/*
		 * If there was little recent activity on this cache, then
		 * perform only a partial refill.  Otherwise we could generate
		 * refill bouncing.
L
Linus Torvalds 已提交
2933 2934 2935
		 */
		batchcount = BATCHREFILL_LIMIT;
	}
2936
	n = cachep->node[node];
2937

2938 2939
	BUG_ON(ac->avail > 0 || !n);
	spin_lock(&n->list_lock);
L
Linus Torvalds 已提交
2940

2941
	/* See if we can refill from the shared array */
2942 2943
	if (n->shared && transfer_objects(ac, n->shared, batchcount)) {
		n->shared->touched = 1;
2944
		goto alloc_done;
2945
	}
2946

L
Linus Torvalds 已提交
2947 2948 2949 2950
	while (batchcount > 0) {
		struct list_head *entry;
		struct slab *slabp;
		/* Get slab alloc is to come from. */
2951 2952 2953 2954 2955
		entry = n->slabs_partial.next;
		if (entry == &n->slabs_partial) {
			n->free_touched = 1;
			entry = n->slabs_free.next;
			if (entry == &n->slabs_free)
L
Linus Torvalds 已提交
2956 2957 2958 2959 2960 2961
				goto must_grow;
		}

		slabp = list_entry(entry, struct slab, list);
		check_slabp(cachep, slabp);
		check_spinlock_acquired(cachep);
2962 2963 2964 2965 2966 2967

		/*
		 * The slab was either on partial or free list so
		 * there must be at least one object available for
		 * allocation.
		 */
2968
		BUG_ON(slabp->inuse >= cachep->num);
2969

L
Linus Torvalds 已提交
2970 2971 2972 2973 2974
		while (slabp->inuse < cachep->num && batchcount--) {
			STATS_INC_ALLOCED(cachep);
			STATS_INC_ACTIVE(cachep);
			STATS_SET_HIGH(cachep);

2975 2976
			ac_put_obj(cachep, ac, slab_get_obj(cachep, slabp,
									node));
L
Linus Torvalds 已提交
2977 2978 2979 2980 2981 2982
		}
		check_slabp(cachep, slabp);

		/* move slabp to correct slabp list: */
		list_del(&slabp->list);
		if (slabp->free == BUFCTL_END)
2983
			list_add(&slabp->list, &n->slabs_full);
L
Linus Torvalds 已提交
2984
		else
2985
			list_add(&slabp->list, &n->slabs_partial);
L
Linus Torvalds 已提交
2986 2987
	}

A
Andrew Morton 已提交
2988
must_grow:
2989
	n->free_objects -= ac->avail;
A
Andrew Morton 已提交
2990
alloc_done:
2991
	spin_unlock(&n->list_lock);
L
Linus Torvalds 已提交
2992 2993 2994

	if (unlikely(!ac->avail)) {
		int x;
2995
force_grow:
2996
		x = cache_grow(cachep, flags | GFP_THISNODE, node, NULL);
2997

A
Andrew Morton 已提交
2998
		/* cache_grow can reenable interrupts, then ac could change. */
2999
		ac = cpu_cache_get(cachep);
3000
		node = numa_mem_id();
3001 3002 3003

		/* no objects in sight? abort */
		if (!x && (ac->avail == 0 || force_refill))
L
Linus Torvalds 已提交
3004 3005
			return NULL;

A
Andrew Morton 已提交
3006
		if (!ac->avail)		/* objects refilled by interrupt? */
L
Linus Torvalds 已提交
3007 3008 3009
			goto retry;
	}
	ac->touched = 1;
3010 3011

	return ac_get_obj(cachep, ac, flags, force_refill);
L
Linus Torvalds 已提交
3012 3013
}

A
Andrew Morton 已提交
3014 3015
static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
						gfp_t flags)
L
Linus Torvalds 已提交
3016 3017 3018 3019 3020 3021 3022 3023
{
	might_sleep_if(flags & __GFP_WAIT);
#if DEBUG
	kmem_flagcheck(cachep, flags);
#endif
}

#if DEBUG
A
Andrew Morton 已提交
3024
static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
3025
				gfp_t flags, void *objp, unsigned long caller)
L
Linus Torvalds 已提交
3026
{
P
Pekka Enberg 已提交
3027
	if (!objp)
L
Linus Torvalds 已提交
3028
		return objp;
P
Pekka Enberg 已提交
3029
	if (cachep->flags & SLAB_POISON) {
L
Linus Torvalds 已提交
3030
#ifdef CONFIG_DEBUG_PAGEALLOC
3031
		if ((cachep->size % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
P
Pekka Enberg 已提交
3032
			kernel_map_pages(virt_to_page(objp),
3033
					 cachep->size / PAGE_SIZE, 1);
L
Linus Torvalds 已提交
3034 3035 3036 3037 3038 3039 3040 3041
		else
			check_poison_obj(cachep, objp);
#else
		check_poison_obj(cachep, objp);
#endif
		poison_obj(cachep, objp, POISON_INUSE);
	}
	if (cachep->flags & SLAB_STORE_USER)
3042
		*dbg_userword(cachep, objp) = (void *)caller;
L
Linus Torvalds 已提交
3043 3044

	if (cachep->flags & SLAB_RED_ZONE) {
A
Andrew Morton 已提交
3045 3046 3047 3048
		if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
				*dbg_redzone2(cachep, objp) != RED_INACTIVE) {
			slab_error(cachep, "double free, or memory outside"
						" object was overwritten");
P
Pekka Enberg 已提交
3049
			printk(KERN_ERR
3050
				"%p: redzone 1:0x%llx, redzone 2:0x%llx\n",
A
Andrew Morton 已提交
3051 3052
				objp, *dbg_redzone1(cachep, objp),
				*dbg_redzone2(cachep, objp));
L
Linus Torvalds 已提交
3053 3054 3055 3056
		}
		*dbg_redzone1(cachep, objp) = RED_ACTIVE;
		*dbg_redzone2(cachep, objp) = RED_ACTIVE;
	}
3057 3058 3059 3060 3061
#ifdef CONFIG_DEBUG_SLAB_LEAK
	{
		struct slab *slabp;
		unsigned objnr;

3062
		slabp = virt_to_slab(objp);
3063
		objnr = (unsigned)(objp - slabp->s_mem) / cachep->size;
3064 3065 3066
		slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE;
	}
#endif
3067
	objp += obj_offset(cachep);
3068
	if (cachep->ctor && cachep->flags & SLAB_POISON)
3069
		cachep->ctor(objp);
T
Tetsuo Handa 已提交
3070 3071
	if (ARCH_SLAB_MINALIGN &&
	    ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) {
3072
		printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",
H
Hugh Dickins 已提交
3073
		       objp, (int)ARCH_SLAB_MINALIGN);
3074
	}
L
Linus Torvalds 已提交
3075 3076 3077 3078 3079 3080
	return objp;
}
#else
#define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
#endif

A
Akinobu Mita 已提交
3081
static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags)
3082
{
3083
	if (cachep == kmem_cache)
A
Akinobu Mita 已提交
3084
		return false;
3085

3086
	return should_failslab(cachep->object_size, flags, cachep->flags);
3087 3088
}

3089
static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
L
Linus Torvalds 已提交
3090
{
P
Pekka Enberg 已提交
3091
	void *objp;
L
Linus Torvalds 已提交
3092
	struct array_cache *ac;
3093
	bool force_refill = false;
L
Linus Torvalds 已提交
3094

3095
	check_irq_off();
3096

3097
	ac = cpu_cache_get(cachep);
L
Linus Torvalds 已提交
3098 3099
	if (likely(ac->avail)) {
		ac->touched = 1;
3100 3101
		objp = ac_get_obj(cachep, ac, flags, false);

3102
		/*
3103 3104
		 * Allow for the possibility all avail objects are not allowed
		 * by the current flags
3105
		 */
3106 3107 3108 3109 3110
		if (objp) {
			STATS_INC_ALLOCHIT(cachep);
			goto out;
		}
		force_refill = true;
L
Linus Torvalds 已提交
3111
	}
3112 3113 3114 3115 3116 3117 3118 3119 3120 3121

	STATS_INC_ALLOCMISS(cachep);
	objp = cache_alloc_refill(cachep, flags, force_refill);
	/*
	 * the 'ac' may be updated by cache_alloc_refill(),
	 * and kmemleak_erase() requires its correct value.
	 */
	ac = cpu_cache_get(cachep);

out:
3122 3123 3124 3125 3126
	/*
	 * To avoid a false negative, if an object that is in one of the
	 * per-CPU caches is leaked, we need to make sure kmemleak doesn't
	 * treat the array pointers as a reference to the object.
	 */
3127 3128
	if (objp)
		kmemleak_erase(&ac->entry[ac->avail]);
3129 3130 3131
	return objp;
}

3132
#ifdef CONFIG_NUMA
3133
/*
3134
 * Try allocating on another node if PF_SPREAD_SLAB|PF_MEMPOLICY.
3135 3136 3137 3138 3139 3140 3141 3142
 *
 * If we are in_interrupt, then process context, including cpusets and
 * mempolicy, may not apply and should not be used for allocation policy.
 */
static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
{
	int nid_alloc, nid_here;

3143
	if (in_interrupt() || (flags & __GFP_THISNODE))
3144
		return NULL;
3145
	nid_alloc = nid_here = numa_mem_id();
3146
	if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
3147
		nid_alloc = cpuset_slab_spread_node();
3148
	else if (current->mempolicy)
3149
		nid_alloc = slab_node();
3150
	if (nid_alloc != nid_here)
3151
		return ____cache_alloc_node(cachep, flags, nid_alloc);
3152 3153 3154
	return NULL;
}

3155 3156
/*
 * Fallback function if there was no memory available and no objects on a
3157
 * certain node and fall back is permitted. First we scan all the
3158
 * available node for available objects. If that fails then we
3159 3160 3161
 * perform an allocation without specifying a node. This allows the page
 * allocator to do its reclaim / fallback magic. We then insert the
 * slab into the proper nodelist and then allocate from it.
3162
 */
3163
static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
3164
{
3165 3166
	struct zonelist *zonelist;
	gfp_t local_flags;
3167
	struct zoneref *z;
3168 3169
	struct zone *zone;
	enum zone_type high_zoneidx = gfp_zone(flags);
3170
	void *obj = NULL;
3171
	int nid;
3172
	unsigned int cpuset_mems_cookie;
3173 3174 3175 3176

	if (flags & __GFP_THISNODE)
		return NULL;

C
Christoph Lameter 已提交
3177
	local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
3178

3179 3180
retry_cpuset:
	cpuset_mems_cookie = get_mems_allowed();
3181
	zonelist = node_zonelist(slab_node(), flags);
3182

3183 3184 3185 3186 3187
retry:
	/*
	 * Look through allowed nodes for objects available
	 * from existing per node queues.
	 */
3188 3189
	for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
		nid = zone_to_nid(zone);
3190

3191
		if (cpuset_zone_allowed_hardwall(zone, flags) &&
3192 3193
			cache->node[nid] &&
			cache->node[nid]->free_objects) {
3194 3195
				obj = ____cache_alloc_node(cache,
					flags | GFP_THISNODE, nid);
3196 3197 3198
				if (obj)
					break;
		}
3199 3200
	}

3201
	if (!obj) {
3202 3203 3204 3205 3206 3207
		/*
		 * This allocation will be performed within the constraints
		 * of the current cpuset / memory policy requirements.
		 * We may trigger various forms of reclaim on the allowed
		 * set and go into memory reserves if necessary.
		 */
3208 3209
		struct page *page;

3210 3211 3212
		if (local_flags & __GFP_WAIT)
			local_irq_enable();
		kmem_flagcheck(cache, flags);
3213
		page = kmem_getpages(cache, local_flags, numa_mem_id());
3214 3215
		if (local_flags & __GFP_WAIT)
			local_irq_disable();
3216
		if (page) {
3217 3218 3219
			/*
			 * Insert into the appropriate per node queues
			 */
3220 3221
			nid = page_to_nid(page);
			if (cache_grow(cache, flags, nid, page)) {
3222 3223 3224 3225 3226 3227 3228 3229 3230 3231
				obj = ____cache_alloc_node(cache,
					flags | GFP_THISNODE, nid);
				if (!obj)
					/*
					 * Another processor may allocate the
					 * objects in the slab since we are
					 * not holding any locks.
					 */
					goto retry;
			} else {
3232
				/* cache_grow already freed obj */
3233 3234 3235
				obj = NULL;
			}
		}
3236
	}
3237 3238 3239

	if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !obj))
		goto retry_cpuset;
3240 3241 3242
	return obj;
}

3243 3244
/*
 * A interface to enable slab creation on nodeid
L
Linus Torvalds 已提交
3245
 */
3246
static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
A
Andrew Morton 已提交
3247
				int nodeid)
3248 3249
{
	struct list_head *entry;
P
Pekka Enberg 已提交
3250
	struct slab *slabp;
3251
	struct kmem_cache_node *n;
P
Pekka Enberg 已提交
3252 3253 3254
	void *obj;
	int x;

3255
	VM_BUG_ON(nodeid > num_online_nodes());
3256 3257
	n = cachep->node[nodeid];
	BUG_ON(!n);
P
Pekka Enberg 已提交
3258

A
Andrew Morton 已提交
3259
retry:
3260
	check_irq_off();
3261 3262 3263 3264 3265 3266
	spin_lock(&n->list_lock);
	entry = n->slabs_partial.next;
	if (entry == &n->slabs_partial) {
		n->free_touched = 1;
		entry = n->slabs_free.next;
		if (entry == &n->slabs_free)
P
Pekka Enberg 已提交
3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279
			goto must_grow;
	}

	slabp = list_entry(entry, struct slab, list);
	check_spinlock_acquired_node(cachep, nodeid);
	check_slabp(cachep, slabp);

	STATS_INC_NODEALLOCS(cachep);
	STATS_INC_ACTIVE(cachep);
	STATS_SET_HIGH(cachep);

	BUG_ON(slabp->inuse == cachep->num);

3280
	obj = slab_get_obj(cachep, slabp, nodeid);
P
Pekka Enberg 已提交
3281
	check_slabp(cachep, slabp);
3282
	n->free_objects--;
P
Pekka Enberg 已提交
3283 3284 3285
	/* move slabp to correct slabp list: */
	list_del(&slabp->list);

A
Andrew Morton 已提交
3286
	if (slabp->free == BUFCTL_END)
3287
		list_add(&slabp->list, &n->slabs_full);
A
Andrew Morton 已提交
3288
	else
3289
		list_add(&slabp->list, &n->slabs_partial);
3290

3291
	spin_unlock(&n->list_lock);
P
Pekka Enberg 已提交
3292
	goto done;
3293

A
Andrew Morton 已提交
3294
must_grow:
3295
	spin_unlock(&n->list_lock);
3296
	x = cache_grow(cachep, flags | GFP_THISNODE, nodeid, NULL);
3297 3298
	if (x)
		goto retry;
L
Linus Torvalds 已提交
3299

3300
	return fallback_alloc(cachep, flags);
3301

A
Andrew Morton 已提交
3302
done:
P
Pekka Enberg 已提交
3303
	return obj;
3304
}
3305 3306

static __always_inline void *
3307
slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3308
		   unsigned long caller)
3309 3310 3311
{
	unsigned long save_flags;
	void *ptr;
3312
	int slab_node = numa_mem_id();
3313

3314
	flags &= gfp_allowed_mask;
3315

3316 3317
	lockdep_trace_alloc(flags);

A
Akinobu Mita 已提交
3318
	if (slab_should_failslab(cachep, flags))
3319 3320
		return NULL;

3321 3322
	cachep = memcg_kmem_get_cache(cachep, flags);

3323 3324 3325
	cache_alloc_debugcheck_before(cachep, flags);
	local_irq_save(save_flags);

A
Andrew Morton 已提交
3326
	if (nodeid == NUMA_NO_NODE)
3327
		nodeid = slab_node;
3328

3329
	if (unlikely(!cachep->node[nodeid])) {
3330 3331 3332 3333 3334
		/* Node not bootstrapped yet */
		ptr = fallback_alloc(cachep, flags);
		goto out;
	}

3335
	if (nodeid == slab_node) {
3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350
		/*
		 * Use the locally cached objects if possible.
		 * However ____cache_alloc does not allow fallback
		 * to other nodes. It may fail while we still have
		 * objects on other nodes available.
		 */
		ptr = ____cache_alloc(cachep, flags);
		if (ptr)
			goto out;
	}
	/* ___cache_alloc_node can fall back to other nodes */
	ptr = ____cache_alloc_node(cachep, flags, nodeid);
  out:
	local_irq_restore(save_flags);
	ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
3351
	kmemleak_alloc_recursive(ptr, cachep->object_size, 1, cachep->flags,
3352
				 flags);
3353

P
Pekka Enberg 已提交
3354
	if (likely(ptr))
3355
		kmemcheck_slab_alloc(cachep, flags, ptr, cachep->object_size);
P
Pekka Enberg 已提交
3356

3357
	if (unlikely((flags & __GFP_ZERO) && ptr))
3358
		memset(ptr, 0, cachep->object_size);
3359

3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378
	return ptr;
}

static __always_inline void *
__do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
{
	void *objp;

	if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) {
		objp = alternate_node_alloc(cache, flags);
		if (objp)
			goto out;
	}
	objp = ____cache_alloc(cache, flags);

	/*
	 * We may just have run out of memory on the local node.
	 * ____cache_alloc_node() knows how to locate memory on other nodes
	 */
3379 3380
	if (!objp)
		objp = ____cache_alloc_node(cache, flags, numa_mem_id());
3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395

  out:
	return objp;
}
#else

static __always_inline void *
__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
{
	return ____cache_alloc(cachep, flags);
}

#endif /* CONFIG_NUMA */

static __always_inline void *
3396
slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
3397 3398 3399 3400
{
	unsigned long save_flags;
	void *objp;

3401
	flags &= gfp_allowed_mask;
3402

3403 3404
	lockdep_trace_alloc(flags);

A
Akinobu Mita 已提交
3405
	if (slab_should_failslab(cachep, flags))
3406 3407
		return NULL;

3408 3409
	cachep = memcg_kmem_get_cache(cachep, flags);

3410 3411 3412 3413 3414
	cache_alloc_debugcheck_before(cachep, flags);
	local_irq_save(save_flags);
	objp = __do_cache_alloc(cachep, flags);
	local_irq_restore(save_flags);
	objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
3415
	kmemleak_alloc_recursive(objp, cachep->object_size, 1, cachep->flags,
3416
				 flags);
3417 3418
	prefetchw(objp);

P
Pekka Enberg 已提交
3419
	if (likely(objp))
3420
		kmemcheck_slab_alloc(cachep, flags, objp, cachep->object_size);
P
Pekka Enberg 已提交
3421

3422
	if (unlikely((flags & __GFP_ZERO) && objp))
3423
		memset(objp, 0, cachep->object_size);
3424

3425 3426
	return objp;
}
3427 3428 3429 3430

/*
 * Caller needs to acquire correct kmem_list's list_lock
 */
3431
static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
P
Pekka Enberg 已提交
3432
		       int node)
L
Linus Torvalds 已提交
3433 3434
{
	int i;
3435
	struct kmem_cache_node *n;
L
Linus Torvalds 已提交
3436 3437

	for (i = 0; i < nr_objects; i++) {
3438
		void *objp;
L
Linus Torvalds 已提交
3439 3440
		struct slab *slabp;

3441 3442 3443
		clear_obj_pfmemalloc(&objpp[i]);
		objp = objpp[i];

3444
		slabp = virt_to_slab(objp);
3445
		n = cachep->node[node];
L
Linus Torvalds 已提交
3446
		list_del(&slabp->list);
3447
		check_spinlock_acquired_node(cachep, node);
L
Linus Torvalds 已提交
3448
		check_slabp(cachep, slabp);
3449
		slab_put_obj(cachep, slabp, objp, node);
L
Linus Torvalds 已提交
3450
		STATS_DEC_ACTIVE(cachep);
3451
		n->free_objects++;
L
Linus Torvalds 已提交
3452 3453 3454 3455
		check_slabp(cachep, slabp);

		/* fixup slab chains */
		if (slabp->inuse == 0) {
3456 3457
			if (n->free_objects > n->free_limit) {
				n->free_objects -= cachep->num;
3458 3459 3460 3461 3462 3463
				/* No need to drop any previously held
				 * lock here, even if we have a off-slab slab
				 * descriptor it is guaranteed to come from
				 * a different cache, refer to comments before
				 * alloc_slabmgmt.
				 */
L
Linus Torvalds 已提交
3464 3465
				slab_destroy(cachep, slabp);
			} else {
3466
				list_add(&slabp->list, &n->slabs_free);
L
Linus Torvalds 已提交
3467 3468 3469 3470 3471 3472
			}
		} else {
			/* Unconditionally move a slab to the end of the
			 * partial list on free - maximum time for the
			 * other objects to be freed, too.
			 */
3473
			list_add_tail(&slabp->list, &n->slabs_partial);
L
Linus Torvalds 已提交
3474 3475 3476 3477
		}
	}
}

3478
static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
L
Linus Torvalds 已提交
3479 3480
{
	int batchcount;
3481
	struct kmem_cache_node *n;
3482
	int node = numa_mem_id();
L
Linus Torvalds 已提交
3483 3484 3485 3486 3487 3488

	batchcount = ac->batchcount;
#if DEBUG
	BUG_ON(!batchcount || batchcount > ac->avail);
#endif
	check_irq_off();
3489 3490 3491 3492
	n = cachep->node[node];
	spin_lock(&n->list_lock);
	if (n->shared) {
		struct array_cache *shared_array = n->shared;
P
Pekka Enberg 已提交
3493
		int max = shared_array->limit - shared_array->avail;
L
Linus Torvalds 已提交
3494 3495 3496
		if (max) {
			if (batchcount > max)
				batchcount = max;
3497
			memcpy(&(shared_array->entry[shared_array->avail]),
P
Pekka Enberg 已提交
3498
			       ac->entry, sizeof(void *) * batchcount);
L
Linus Torvalds 已提交
3499 3500 3501 3502 3503
			shared_array->avail += batchcount;
			goto free_done;
		}
	}

3504
	free_block(cachep, ac->entry, batchcount, node);
A
Andrew Morton 已提交
3505
free_done:
L
Linus Torvalds 已提交
3506 3507 3508 3509 3510
#if STATS
	{
		int i = 0;
		struct list_head *p;

3511 3512
		p = n->slabs_free.next;
		while (p != &(n->slabs_free)) {
L
Linus Torvalds 已提交
3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523
			struct slab *slabp;

			slabp = list_entry(p, struct slab, list);
			BUG_ON(slabp->inuse);

			i++;
			p = p->next;
		}
		STATS_SET_FREEABLE(cachep, i);
	}
#endif
3524
	spin_unlock(&n->list_lock);
L
Linus Torvalds 已提交
3525
	ac->avail -= batchcount;
A
Andrew Morton 已提交
3526
	memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
L
Linus Torvalds 已提交
3527 3528 3529
}

/*
A
Andrew Morton 已提交
3530 3531
 * Release an obj back to its cache. If the obj has a constructed state, it must
 * be in this state _before_ it is released.  Called with disabled ints.
L
Linus Torvalds 已提交
3532
 */
3533
static inline void __cache_free(struct kmem_cache *cachep, void *objp,
3534
				unsigned long caller)
L
Linus Torvalds 已提交
3535
{
3536
	struct array_cache *ac = cpu_cache_get(cachep);
L
Linus Torvalds 已提交
3537 3538

	check_irq_off();
3539
	kmemleak_free_recursive(objp, cachep->flags);
3540
	objp = cache_free_debugcheck(cachep, objp, caller);
L
Linus Torvalds 已提交
3541

3542
	kmemcheck_slab_free(cachep, objp, cachep->object_size);
P
Pekka Enberg 已提交
3543

3544 3545 3546 3547 3548 3549 3550
	/*
	 * Skip calling cache_free_alien() when the platform is not numa.
	 * This will avoid cache misses that happen while accessing slabp (which
	 * is per page memory  reference) to get nodeid. Instead use a global
	 * variable to skip the call, which is mostly likely to be present in
	 * the cache.
	 */
3551
	if (nr_online_nodes > 1 && cache_free_alien(cachep, objp))
3552 3553
		return;

L
Linus Torvalds 已提交
3554 3555 3556 3557 3558 3559
	if (likely(ac->avail < ac->limit)) {
		STATS_INC_FREEHIT(cachep);
	} else {
		STATS_INC_FREEMISS(cachep);
		cache_flusharray(cachep, ac);
	}
Z
Zhao Jin 已提交
3560

3561
	ac_put_obj(cachep, ac, objp);
L
Linus Torvalds 已提交
3562 3563 3564 3565 3566 3567 3568 3569 3570 3571
}

/**
 * kmem_cache_alloc - Allocate an object
 * @cachep: The cache to allocate from.
 * @flags: See kmalloc().
 *
 * Allocate an object from this cache.  The flags are only relevant
 * if the cache has no available objects.
 */
3572
void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
L
Linus Torvalds 已提交
3573
{
3574
	void *ret = slab_alloc(cachep, flags, _RET_IP_);
E
Eduard - Gabriel Munteanu 已提交
3575

3576
	trace_kmem_cache_alloc(_RET_IP_, ret,
3577
			       cachep->object_size, cachep->size, flags);
E
Eduard - Gabriel Munteanu 已提交
3578 3579

	return ret;
L
Linus Torvalds 已提交
3580 3581 3582
}
EXPORT_SYMBOL(kmem_cache_alloc);

3583
#ifdef CONFIG_TRACING
3584
void *
3585
kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
E
Eduard - Gabriel Munteanu 已提交
3586
{
3587 3588
	void *ret;

3589
	ret = slab_alloc(cachep, flags, _RET_IP_);
3590 3591

	trace_kmalloc(_RET_IP_, ret,
3592
		      size, cachep->size, flags);
3593
	return ret;
E
Eduard - Gabriel Munteanu 已提交
3594
}
3595
EXPORT_SYMBOL(kmem_cache_alloc_trace);
E
Eduard - Gabriel Munteanu 已提交
3596 3597
#endif

L
Linus Torvalds 已提交
3598
#ifdef CONFIG_NUMA
3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609
/**
 * kmem_cache_alloc_node - Allocate an object on the specified node
 * @cachep: The cache to allocate from.
 * @flags: See kmalloc().
 * @nodeid: node number of the target node.
 *
 * Identical to kmem_cache_alloc but it will allocate memory on the given
 * node, which can improve the performance for cpu bound structures.
 *
 * Fallback to other node is possible if __GFP_THISNODE is not set.
 */
3610 3611
void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
3612
	void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
E
Eduard - Gabriel Munteanu 已提交
3613

3614
	trace_kmem_cache_alloc_node(_RET_IP_, ret,
3615
				    cachep->object_size, cachep->size,
3616
				    flags, nodeid);
E
Eduard - Gabriel Munteanu 已提交
3617 3618

	return ret;
3619
}
L
Linus Torvalds 已提交
3620 3621
EXPORT_SYMBOL(kmem_cache_alloc_node);

3622
#ifdef CONFIG_TRACING
3623
void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
3624
				  gfp_t flags,
3625 3626
				  int nodeid,
				  size_t size)
E
Eduard - Gabriel Munteanu 已提交
3627
{
3628 3629
	void *ret;

3630
	ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
3631

3632
	trace_kmalloc_node(_RET_IP_, ret,
3633
			   size, cachep->size,
3634 3635
			   flags, nodeid);
	return ret;
E
Eduard - Gabriel Munteanu 已提交
3636
}
3637
EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
E
Eduard - Gabriel Munteanu 已提交
3638 3639
#endif

3640
static __always_inline void *
3641
__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
3642
{
3643
	struct kmem_cache *cachep;
3644

3645
	cachep = kmalloc_slab(size, flags);
3646 3647
	if (unlikely(ZERO_OR_NULL_PTR(cachep)))
		return cachep;
3648
	return kmem_cache_alloc_node_trace(cachep, flags, node, size);
3649
}
3650

3651
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
3652 3653
void *__kmalloc_node(size_t size, gfp_t flags, int node)
{
3654
	return __do_kmalloc_node(size, flags, node, _RET_IP_);
3655
}
3656
EXPORT_SYMBOL(__kmalloc_node);
3657 3658

void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
3659
		int node, unsigned long caller)
3660
{
3661
	return __do_kmalloc_node(size, flags, node, caller);
3662 3663 3664 3665 3666
}
EXPORT_SYMBOL(__kmalloc_node_track_caller);
#else
void *__kmalloc_node(size_t size, gfp_t flags, int node)
{
3667
	return __do_kmalloc_node(size, flags, node, 0);
3668 3669
}
EXPORT_SYMBOL(__kmalloc_node);
3670
#endif /* CONFIG_DEBUG_SLAB || CONFIG_TRACING */
3671
#endif /* CONFIG_NUMA */
L
Linus Torvalds 已提交
3672 3673

/**
3674
 * __do_kmalloc - allocate memory
L
Linus Torvalds 已提交
3675
 * @size: how many bytes of memory are required.
3676
 * @flags: the type of memory to allocate (see kmalloc).
3677
 * @caller: function caller for debug tracking of the caller
L
Linus Torvalds 已提交
3678
 */
3679
static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3680
					  unsigned long caller)
L
Linus Torvalds 已提交
3681
{
3682
	struct kmem_cache *cachep;
E
Eduard - Gabriel Munteanu 已提交
3683
	void *ret;
L
Linus Torvalds 已提交
3684

3685 3686 3687 3688 3689
	/* If you want to save a few bytes .text space: replace
	 * __ with kmem_.
	 * Then kmalloc uses the uninlined functions instead of the inline
	 * functions.
	 */
3690
	cachep = kmalloc_slab(size, flags);
3691 3692
	if (unlikely(ZERO_OR_NULL_PTR(cachep)))
		return cachep;
3693
	ret = slab_alloc(cachep, flags, caller);
E
Eduard - Gabriel Munteanu 已提交
3694

3695
	trace_kmalloc(caller, ret,
3696
		      size, cachep->size, flags);
E
Eduard - Gabriel Munteanu 已提交
3697 3698

	return ret;
3699 3700 3701
}


3702
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
3703 3704
void *__kmalloc(size_t size, gfp_t flags)
{
3705
	return __do_kmalloc(size, flags, _RET_IP_);
L
Linus Torvalds 已提交
3706 3707 3708
}
EXPORT_SYMBOL(__kmalloc);

3709
void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
3710
{
3711
	return __do_kmalloc(size, flags, caller);
3712 3713
}
EXPORT_SYMBOL(__kmalloc_track_caller);
3714 3715 3716 3717

#else
void *__kmalloc(size_t size, gfp_t flags)
{
3718
	return __do_kmalloc(size, flags, 0);
3719 3720
}
EXPORT_SYMBOL(__kmalloc);
3721 3722
#endif

L
Linus Torvalds 已提交
3723 3724 3725 3726 3727 3728 3729 3730
/**
 * kmem_cache_free - Deallocate an object
 * @cachep: The cache the allocation was from.
 * @objp: The previously allocated object.
 *
 * Free an object which was previously allocated from this
 * cache.
 */
3731
void kmem_cache_free(struct kmem_cache *cachep, void *objp)
L
Linus Torvalds 已提交
3732 3733
{
	unsigned long flags;
3734 3735 3736
	cachep = cache_from_obj(cachep, objp);
	if (!cachep)
		return;
L
Linus Torvalds 已提交
3737 3738

	local_irq_save(flags);
3739
	debug_check_no_locks_freed(objp, cachep->object_size);
3740
	if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
3741
		debug_check_no_obj_freed(objp, cachep->object_size);
3742
	__cache_free(cachep, objp, _RET_IP_);
L
Linus Torvalds 已提交
3743
	local_irq_restore(flags);
E
Eduard - Gabriel Munteanu 已提交
3744

3745
	trace_kmem_cache_free(_RET_IP_, objp);
L
Linus Torvalds 已提交
3746 3747 3748 3749 3750 3751 3752
}
EXPORT_SYMBOL(kmem_cache_free);

/**
 * kfree - free previously allocated memory
 * @objp: pointer returned by kmalloc.
 *
3753 3754
 * If @objp is NULL, no operation is performed.
 *
L
Linus Torvalds 已提交
3755 3756 3757 3758 3759
 * Don't free memory not originally allocated by kmalloc()
 * or you will run into trouble.
 */
void kfree(const void *objp)
{
3760
	struct kmem_cache *c;
L
Linus Torvalds 已提交
3761 3762
	unsigned long flags;

3763 3764
	trace_kfree(_RET_IP_, objp);

3765
	if (unlikely(ZERO_OR_NULL_PTR(objp)))
L
Linus Torvalds 已提交
3766 3767 3768
		return;
	local_irq_save(flags);
	kfree_debugcheck(objp);
3769
	c = virt_to_cache(objp);
3770 3771 3772
	debug_check_no_locks_freed(objp, c->object_size);

	debug_check_no_obj_freed(objp, c->object_size);
3773
	__cache_free(c, (void *)objp, _RET_IP_);
L
Linus Torvalds 已提交
3774 3775 3776 3777
	local_irq_restore(flags);
}
EXPORT_SYMBOL(kfree);

3778
/*
3779
 * This initializes kmem_cache_node or resizes various caches for all nodes.
3780
 */
3781
static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp)
3782 3783
{
	int node;
3784
	struct kmem_cache_node *n;
3785
	struct array_cache *new_shared;
3786
	struct array_cache **new_alien = NULL;
3787

3788
	for_each_online_node(node) {
3789

3790
                if (use_alien_caches) {
3791
                        new_alien = alloc_alien_cache(node, cachep->limit, gfp);
3792 3793 3794
                        if (!new_alien)
                                goto fail;
                }
3795

3796 3797 3798
		new_shared = NULL;
		if (cachep->shared) {
			new_shared = alloc_arraycache(node,
3799
				cachep->shared*cachep->batchcount,
3800
					0xbaadf00d, gfp);
3801 3802 3803 3804
			if (!new_shared) {
				free_alien_cache(new_alien);
				goto fail;
			}
3805
		}
3806

3807 3808 3809
		n = cachep->node[node];
		if (n) {
			struct array_cache *shared = n->shared;
3810

3811
			spin_lock_irq(&n->list_lock);
3812

3813
			if (shared)
3814 3815
				free_block(cachep, shared->entry,
						shared->avail, node);
3816

3817 3818 3819
			n->shared = new_shared;
			if (!n->alien) {
				n->alien = new_alien;
3820 3821
				new_alien = NULL;
			}
3822
			n->free_limit = (1 + nr_cpus_node(node)) *
A
Andrew Morton 已提交
3823
					cachep->batchcount + cachep->num;
3824
			spin_unlock_irq(&n->list_lock);
3825
			kfree(shared);
3826 3827 3828
			free_alien_cache(new_alien);
			continue;
		}
3829 3830
		n = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node);
		if (!n) {
3831 3832
			free_alien_cache(new_alien);
			kfree(new_shared);
3833
			goto fail;
3834
		}
3835

3836 3837
		kmem_cache_node_init(n);
		n->next_reap = jiffies + REAPTIMEOUT_LIST3 +
A
Andrew Morton 已提交
3838
				((unsigned long)cachep) % REAPTIMEOUT_LIST3;
3839 3840 3841
		n->shared = new_shared;
		n->alien = new_alien;
		n->free_limit = (1 + nr_cpus_node(node)) *
A
Andrew Morton 已提交
3842
					cachep->batchcount + cachep->num;
3843
		cachep->node[node] = n;
3844
	}
3845
	return 0;
3846

A
Andrew Morton 已提交
3847
fail:
3848
	if (!cachep->list.next) {
3849 3850 3851
		/* Cache is not active yet. Roll back what we did */
		node--;
		while (node >= 0) {
3852
			if (cachep->node[node]) {
3853
				n = cachep->node[node];
3854

3855 3856 3857
				kfree(n->shared);
				free_alien_cache(n->alien);
				kfree(n);
3858
				cachep->node[node] = NULL;
3859 3860 3861 3862
			}
			node--;
		}
	}
3863
	return -ENOMEM;
3864 3865
}

L
Linus Torvalds 已提交
3866
struct ccupdate_struct {
3867
	struct kmem_cache *cachep;
3868
	struct array_cache *new[0];
L
Linus Torvalds 已提交
3869 3870 3871 3872
};

static void do_ccupdate_local(void *info)
{
A
Andrew Morton 已提交
3873
	struct ccupdate_struct *new = info;
L
Linus Torvalds 已提交
3874 3875 3876
	struct array_cache *old;

	check_irq_off();
3877
	old = cpu_cache_get(new->cachep);
3878

L
Linus Torvalds 已提交
3879 3880 3881 3882
	new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()];
	new->new[smp_processor_id()] = old;
}

3883
/* Always called with the slab_mutex held */
G
Glauber Costa 已提交
3884
static int __do_tune_cpucache(struct kmem_cache *cachep, int limit,
3885
				int batchcount, int shared, gfp_t gfp)
L
Linus Torvalds 已提交
3886
{
3887
	struct ccupdate_struct *new;
3888
	int i;
L
Linus Torvalds 已提交
3889

3890 3891
	new = kzalloc(sizeof(*new) + nr_cpu_ids * sizeof(struct array_cache *),
		      gfp);
3892 3893 3894
	if (!new)
		return -ENOMEM;

3895
	for_each_online_cpu(i) {
3896
		new->new[i] = alloc_arraycache(cpu_to_mem(i), limit,
3897
						batchcount, gfp);
3898
		if (!new->new[i]) {
P
Pekka Enberg 已提交
3899
			for (i--; i >= 0; i--)
3900 3901
				kfree(new->new[i]);
			kfree(new);
3902
			return -ENOMEM;
L
Linus Torvalds 已提交
3903 3904
		}
	}
3905
	new->cachep = cachep;
L
Linus Torvalds 已提交
3906

3907
	on_each_cpu(do_ccupdate_local, (void *)new, 1);
3908

L
Linus Torvalds 已提交
3909 3910 3911
	check_irq_on();
	cachep->batchcount = batchcount;
	cachep->limit = limit;
3912
	cachep->shared = shared;
L
Linus Torvalds 已提交
3913

3914
	for_each_online_cpu(i) {
3915
		struct array_cache *ccold = new->new[i];
L
Linus Torvalds 已提交
3916 3917
		if (!ccold)
			continue;
3918
		spin_lock_irq(&cachep->node[cpu_to_mem(i)]->list_lock);
3919
		free_block(cachep, ccold->entry, ccold->avail, cpu_to_mem(i));
3920
		spin_unlock_irq(&cachep->node[cpu_to_mem(i)]->list_lock);
L
Linus Torvalds 已提交
3921 3922
		kfree(ccold);
	}
3923
	kfree(new);
3924
	return alloc_kmemlist(cachep, gfp);
L
Linus Torvalds 已提交
3925 3926
}

G
Glauber Costa 已提交
3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941
static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
				int batchcount, int shared, gfp_t gfp)
{
	int ret;
	struct kmem_cache *c = NULL;
	int i = 0;

	ret = __do_tune_cpucache(cachep, limit, batchcount, shared, gfp);

	if (slab_state < FULL)
		return ret;

	if ((ret < 0) || !is_root_cache(cachep))
		return ret;

3942
	VM_BUG_ON(!mutex_is_locked(&slab_mutex));
G
Glauber Costa 已提交
3943 3944 3945 3946 3947 3948 3949 3950 3951 3952
	for_each_memcg_cache_index(i) {
		c = cache_from_memcg(cachep, i);
		if (c)
			/* return value determined by the parent cache only */
			__do_tune_cpucache(c, limit, batchcount, shared, gfp);
	}

	return ret;
}

3953
/* Called with slab_mutex held always */
3954
static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
L
Linus Torvalds 已提交
3955 3956
{
	int err;
G
Glauber Costa 已提交
3957 3958 3959 3960 3961 3962 3963 3964 3965 3966
	int limit = 0;
	int shared = 0;
	int batchcount = 0;

	if (!is_root_cache(cachep)) {
		struct kmem_cache *root = memcg_root_cache(cachep);
		limit = root->limit;
		shared = root->shared;
		batchcount = root->batchcount;
	}
L
Linus Torvalds 已提交
3967

G
Glauber Costa 已提交
3968 3969
	if (limit && shared && batchcount)
		goto skip_setup;
A
Andrew Morton 已提交
3970 3971
	/*
	 * The head array serves three purposes:
L
Linus Torvalds 已提交
3972 3973
	 * - create a LIFO ordering, i.e. return objects that are cache-warm
	 * - reduce the number of spinlock operations.
A
Andrew Morton 已提交
3974
	 * - reduce the number of linked list operations on the slab and
L
Linus Torvalds 已提交
3975 3976 3977 3978
	 *   bufctl chains: array operations are cheaper.
	 * The numbers are guessed, we should auto-tune as described by
	 * Bonwick.
	 */
3979
	if (cachep->size > 131072)
L
Linus Torvalds 已提交
3980
		limit = 1;
3981
	else if (cachep->size > PAGE_SIZE)
L
Linus Torvalds 已提交
3982
		limit = 8;
3983
	else if (cachep->size > 1024)
L
Linus Torvalds 已提交
3984
		limit = 24;
3985
	else if (cachep->size > 256)
L
Linus Torvalds 已提交
3986 3987 3988 3989
		limit = 54;
	else
		limit = 120;

A
Andrew Morton 已提交
3990 3991
	/*
	 * CPU bound tasks (e.g. network routing) can exhibit cpu bound
L
Linus Torvalds 已提交
3992 3993 3994 3995 3996 3997 3998 3999
	 * allocation behaviour: Most allocs on one cpu, most free operations
	 * on another cpu. For these cases, an efficient object passing between
	 * cpus is necessary. This is provided by a shared array. The array
	 * replaces Bonwick's magazine layer.
	 * On uniprocessor, it's functionally equivalent (but less efficient)
	 * to a larger limit. Thus disabled by default.
	 */
	shared = 0;
4000
	if (cachep->size <= PAGE_SIZE && num_possible_cpus() > 1)
L
Linus Torvalds 已提交
4001 4002 4003
		shared = 8;

#if DEBUG
A
Andrew Morton 已提交
4004 4005 4006
	/*
	 * With debugging enabled, large batchcount lead to excessively long
	 * periods with disabled local interrupts. Limit the batchcount
L
Linus Torvalds 已提交
4007 4008 4009 4010
	 */
	if (limit > 32)
		limit = 32;
#endif
G
Glauber Costa 已提交
4011 4012 4013
	batchcount = (limit + 1) / 2;
skip_setup:
	err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
L
Linus Torvalds 已提交
4014 4015
	if (err)
		printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
P
Pekka Enberg 已提交
4016
		       cachep->name, -err);
4017
	return err;
L
Linus Torvalds 已提交
4018 4019
}

4020
/*
4021 4022
 * Drain an array if it contains any elements taking the node lock only if
 * necessary. Note that the node listlock also protects the array_cache
4023
 * if drain_array() is used on the shared array.
4024
 */
4025
static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
4026
			 struct array_cache *ac, int force, int node)
L
Linus Torvalds 已提交
4027 4028 4029
{
	int tofree;

4030 4031
	if (!ac || !ac->avail)
		return;
L
Linus Torvalds 已提交
4032 4033
	if (ac->touched && !force) {
		ac->touched = 0;
4034
	} else {
4035
		spin_lock_irq(&n->list_lock);
4036 4037 4038 4039 4040 4041 4042 4043 4044
		if (ac->avail) {
			tofree = force ? ac->avail : (ac->limit + 4) / 5;
			if (tofree > ac->avail)
				tofree = (ac->avail + 1) / 2;
			free_block(cachep, ac->entry, tofree, node);
			ac->avail -= tofree;
			memmove(ac->entry, &(ac->entry[tofree]),
				sizeof(void *) * ac->avail);
		}
4045
		spin_unlock_irq(&n->list_lock);
L
Linus Torvalds 已提交
4046 4047 4048 4049 4050
	}
}

/**
 * cache_reap - Reclaim memory from caches.
4051
 * @w: work descriptor
L
Linus Torvalds 已提交
4052 4053 4054 4055 4056 4057
 *
 * Called from workqueue/eventd every few seconds.
 * Purpose:
 * - clear the per-cpu caches for this CPU.
 * - return freeable pages to the main free memory pool.
 *
A
Andrew Morton 已提交
4058 4059
 * If we cannot acquire the cache chain mutex then just give up - we'll try
 * again on the next iteration.
L
Linus Torvalds 已提交
4060
 */
4061
static void cache_reap(struct work_struct *w)
L
Linus Torvalds 已提交
4062
{
4063
	struct kmem_cache *searchp;
4064
	struct kmem_cache_node *n;
4065
	int node = numa_mem_id();
4066
	struct delayed_work *work = to_delayed_work(w);
L
Linus Torvalds 已提交
4067

4068
	if (!mutex_trylock(&slab_mutex))
L
Linus Torvalds 已提交
4069
		/* Give up. Setup the next iteration. */
4070
		goto out;
L
Linus Torvalds 已提交
4071

4072
	list_for_each_entry(searchp, &slab_caches, list) {
L
Linus Torvalds 已提交
4073 4074
		check_irq_on();

4075
		/*
4076
		 * We only take the node lock if absolutely necessary and we
4077 4078 4079
		 * have established with reasonable certainty that
		 * we can do some work if the lock was obtained.
		 */
4080
		n = searchp->node[node];
4081

4082
		reap_alien(searchp, n);
L
Linus Torvalds 已提交
4083

4084
		drain_array(searchp, n, cpu_cache_get(searchp), 0, node);
L
Linus Torvalds 已提交
4085

4086 4087 4088 4089
		/*
		 * These are racy checks but it does not matter
		 * if we skip one check or scan twice.
		 */
4090
		if (time_after(n->next_reap, jiffies))
4091
			goto next;
L
Linus Torvalds 已提交
4092

4093
		n->next_reap = jiffies + REAPTIMEOUT_LIST3;
L
Linus Torvalds 已提交
4094

4095
		drain_array(searchp, n, n->shared, 0, node);
L
Linus Torvalds 已提交
4096

4097 4098
		if (n->free_touched)
			n->free_touched = 0;
4099 4100
		else {
			int freed;
L
Linus Torvalds 已提交
4101

4102
			freed = drain_freelist(searchp, n, (n->free_limit +
4103 4104 4105
				5 * searchp->num - 1) / (5 * searchp->num));
			STATS_ADD_REAPED(searchp, freed);
		}
4106
next:
L
Linus Torvalds 已提交
4107 4108 4109
		cond_resched();
	}
	check_irq_on();
4110
	mutex_unlock(&slab_mutex);
4111
	next_reap_node();
4112
out:
A
Andrew Morton 已提交
4113
	/* Set up the next iteration */
4114
	schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_CPUC));
L
Linus Torvalds 已提交
4115 4116
}

4117
#ifdef CONFIG_SLABINFO
4118
void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
L
Linus Torvalds 已提交
4119
{
P
Pekka Enberg 已提交
4120 4121 4122 4123 4124
	struct slab *slabp;
	unsigned long active_objs;
	unsigned long num_objs;
	unsigned long active_slabs = 0;
	unsigned long num_slabs, free_objects = 0, shared_avail = 0;
4125
	const char *name;
L
Linus Torvalds 已提交
4126
	char *error = NULL;
4127
	int node;
4128
	struct kmem_cache_node *n;
L
Linus Torvalds 已提交
4129 4130 4131

	active_objs = 0;
	num_slabs = 0;
4132
	for_each_online_node(node) {
4133 4134
		n = cachep->node[node];
		if (!n)
4135 4136
			continue;

4137
		check_irq_on();
4138
		spin_lock_irq(&n->list_lock);
4139

4140
		list_for_each_entry(slabp, &n->slabs_full, list) {
4141 4142 4143 4144 4145
			if (slabp->inuse != cachep->num && !error)
				error = "slabs_full accounting error";
			active_objs += cachep->num;
			active_slabs++;
		}
4146
		list_for_each_entry(slabp, &n->slabs_partial, list) {
4147 4148 4149 4150 4151 4152 4153
			if (slabp->inuse == cachep->num && !error)
				error = "slabs_partial inuse accounting error";
			if (!slabp->inuse && !error)
				error = "slabs_partial/inuse accounting error";
			active_objs += slabp->inuse;
			active_slabs++;
		}
4154
		list_for_each_entry(slabp, &n->slabs_free, list) {
4155 4156 4157 4158
			if (slabp->inuse && !error)
				error = "slabs_free/inuse accounting error";
			num_slabs++;
		}
4159 4160 4161
		free_objects += n->free_objects;
		if (n->shared)
			shared_avail += n->shared->avail;
4162

4163
		spin_unlock_irq(&n->list_lock);
L
Linus Torvalds 已提交
4164
	}
P
Pekka Enberg 已提交
4165 4166
	num_slabs += active_slabs;
	num_objs = num_slabs * cachep->num;
4167
	if (num_objs - active_objs != free_objects && !error)
L
Linus Torvalds 已提交
4168 4169
		error = "free_objects accounting error";

P
Pekka Enberg 已提交
4170
	name = cachep->name;
L
Linus Torvalds 已提交
4171 4172 4173
	if (error)
		printk(KERN_ERR "slab: cache %s error: %s\n", name, error);

4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187
	sinfo->active_objs = active_objs;
	sinfo->num_objs = num_objs;
	sinfo->active_slabs = active_slabs;
	sinfo->num_slabs = num_slabs;
	sinfo->shared_avail = shared_avail;
	sinfo->limit = cachep->limit;
	sinfo->batchcount = cachep->batchcount;
	sinfo->shared = cachep->shared;
	sinfo->objects_per_slab = cachep->num;
	sinfo->cache_order = cachep->gfporder;
}

void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
{
L
Linus Torvalds 已提交
4188
#if STATS
4189
	{			/* node stats */
L
Linus Torvalds 已提交
4190 4191 4192 4193 4194 4195 4196
		unsigned long high = cachep->high_mark;
		unsigned long allocs = cachep->num_allocations;
		unsigned long grown = cachep->grown;
		unsigned long reaped = cachep->reaped;
		unsigned long errors = cachep->errors;
		unsigned long max_freeable = cachep->max_freeable;
		unsigned long node_allocs = cachep->node_allocs;
4197
		unsigned long node_frees = cachep->node_frees;
4198
		unsigned long overflows = cachep->node_overflow;
L
Linus Torvalds 已提交
4199

J
Joe Perches 已提交
4200 4201 4202 4203 4204
		seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu "
			   "%4lu %4lu %4lu %4lu %4lu",
			   allocs, high, grown,
			   reaped, errors, max_freeable, node_allocs,
			   node_frees, overflows);
L
Linus Torvalds 已提交
4205 4206 4207 4208 4209 4210 4211 4212 4213
	}
	/* cpu stats */
	{
		unsigned long allochit = atomic_read(&cachep->allochit);
		unsigned long allocmiss = atomic_read(&cachep->allocmiss);
		unsigned long freehit = atomic_read(&cachep->freehit);
		unsigned long freemiss = atomic_read(&cachep->freemiss);

		seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
P
Pekka Enberg 已提交
4214
			   allochit, allocmiss, freehit, freemiss);
L
Linus Torvalds 已提交
4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226
	}
#endif
}

#define MAX_SLABINFO_WRITE 128
/**
 * slabinfo_write - Tuning for the slab allocator
 * @file: unused
 * @buffer: user buffer
 * @count: data length
 * @ppos: unused
 */
4227
ssize_t slabinfo_write(struct file *file, const char __user *buffer,
P
Pekka Enberg 已提交
4228
		       size_t count, loff_t *ppos)
L
Linus Torvalds 已提交
4229
{
P
Pekka Enberg 已提交
4230
	char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
L
Linus Torvalds 已提交
4231
	int limit, batchcount, shared, res;
4232
	struct kmem_cache *cachep;
P
Pekka Enberg 已提交
4233

L
Linus Torvalds 已提交
4234 4235 4236 4237
	if (count > MAX_SLABINFO_WRITE)
		return -EINVAL;
	if (copy_from_user(&kbuf, buffer, count))
		return -EFAULT;
P
Pekka Enberg 已提交
4238
	kbuf[MAX_SLABINFO_WRITE] = '\0';
L
Linus Torvalds 已提交
4239 4240 4241 4242 4243 4244 4245 4246 4247 4248

	tmp = strchr(kbuf, ' ');
	if (!tmp)
		return -EINVAL;
	*tmp = '\0';
	tmp++;
	if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3)
		return -EINVAL;

	/* Find the cache in the chain of caches. */
4249
	mutex_lock(&slab_mutex);
L
Linus Torvalds 已提交
4250
	res = -EINVAL;
4251
	list_for_each_entry(cachep, &slab_caches, list) {
L
Linus Torvalds 已提交
4252
		if (!strcmp(cachep->name, kbuf)) {
A
Andrew Morton 已提交
4253 4254
			if (limit < 1 || batchcount < 1 ||
					batchcount > limit || shared < 0) {
4255
				res = 0;
L
Linus Torvalds 已提交
4256
			} else {
4257
				res = do_tune_cpucache(cachep, limit,
4258 4259
						       batchcount, shared,
						       GFP_KERNEL);
L
Linus Torvalds 已提交
4260 4261 4262 4263
			}
			break;
		}
	}
4264
	mutex_unlock(&slab_mutex);
L
Linus Torvalds 已提交
4265 4266 4267 4268
	if (res >= 0)
		res = count;
	return res;
}
4269 4270 4271 4272 4273

#ifdef CONFIG_DEBUG_SLAB_LEAK

static void *leaks_start(struct seq_file *m, loff_t *pos)
{
4274 4275
	mutex_lock(&slab_mutex);
	return seq_list_start(&slab_caches, *pos);
4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313
}

static inline int add_caller(unsigned long *n, unsigned long v)
{
	unsigned long *p;
	int l;
	if (!v)
		return 1;
	l = n[1];
	p = n + 2;
	while (l) {
		int i = l/2;
		unsigned long *q = p + 2 * i;
		if (*q == v) {
			q[1]++;
			return 1;
		}
		if (*q > v) {
			l = i;
		} else {
			p = q + 2;
			l -= i + 1;
		}
	}
	if (++n[1] == n[0])
		return 0;
	memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n));
	p[0] = v;
	p[1] = 1;
	return 1;
}

static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s)
{
	void *p;
	int i;
	if (n[0] == n[1])
		return;
4314
	for (i = 0, p = s->s_mem; i < c->num; i++, p += c->size) {
4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325
		if (slab_bufctl(s)[i] != BUFCTL_ACTIVE)
			continue;
		if (!add_caller(n, (unsigned long)*dbg_userword(c, p)))
			return;
	}
}

static void show_symbol(struct seq_file *m, unsigned long address)
{
#ifdef CONFIG_KALLSYMS
	unsigned long offset, size;
4326
	char modname[MODULE_NAME_LEN], name[KSYM_NAME_LEN];
4327

4328
	if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) {
4329
		seq_printf(m, "%s+%#lx/%#lx", name, offset, size);
4330
		if (modname[0])
4331 4332 4333 4334 4335 4336 4337 4338 4339
			seq_printf(m, " [%s]", modname);
		return;
	}
#endif
	seq_printf(m, "%p", (void *)address);
}

static int leaks_show(struct seq_file *m, void *p)
{
4340
	struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list);
4341
	struct slab *slabp;
4342
	struct kmem_cache_node *n;
4343
	const char *name;
4344
	unsigned long *x = m->private;
4345 4346 4347 4348 4349 4350 4351 4352 4353 4354
	int node;
	int i;

	if (!(cachep->flags & SLAB_STORE_USER))
		return 0;
	if (!(cachep->flags & SLAB_RED_ZONE))
		return 0;

	/* OK, we can do it */

4355
	x[1] = 0;
4356 4357

	for_each_online_node(node) {
4358 4359
		n = cachep->node[node];
		if (!n)
4360 4361 4362
			continue;

		check_irq_on();
4363
		spin_lock_irq(&n->list_lock);
4364

4365
		list_for_each_entry(slabp, &n->slabs_full, list)
4366
			handle_slab(x, cachep, slabp);
4367
		list_for_each_entry(slabp, &n->slabs_partial, list)
4368
			handle_slab(x, cachep, slabp);
4369
		spin_unlock_irq(&n->list_lock);
4370 4371
	}
	name = cachep->name;
4372
	if (x[0] == x[1]) {
4373
		/* Increase the buffer size */
4374
		mutex_unlock(&slab_mutex);
4375
		m->private = kzalloc(x[0] * 4 * sizeof(unsigned long), GFP_KERNEL);
4376 4377
		if (!m->private) {
			/* Too bad, we are really out */
4378
			m->private = x;
4379
			mutex_lock(&slab_mutex);
4380 4381
			return -ENOMEM;
		}
4382 4383
		*(unsigned long *)m->private = x[0] * 2;
		kfree(x);
4384
		mutex_lock(&slab_mutex);
4385 4386 4387 4388
		/* Now make sure this entry will be retried */
		m->count = m->size;
		return 0;
	}
4389 4390 4391
	for (i = 0; i < x[1]; i++) {
		seq_printf(m, "%s: %lu ", name, x[2*i+3]);
		show_symbol(m, x[2*i+2]);
4392 4393
		seq_putc(m, '\n');
	}
4394

4395 4396 4397
	return 0;
}

4398
static const struct seq_operations slabstats_op = {
4399
	.start = leaks_start,
4400 4401
	.next = slab_next,
	.stop = slab_stop,
4402 4403
	.show = leaks_show,
};
4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433

static int slabstats_open(struct inode *inode, struct file *file)
{
	unsigned long *n = kzalloc(PAGE_SIZE, GFP_KERNEL);
	int ret = -ENOMEM;
	if (n) {
		ret = seq_open(file, &slabstats_op);
		if (!ret) {
			struct seq_file *m = file->private_data;
			*n = PAGE_SIZE / (2 * sizeof(unsigned long));
			m->private = n;
			n = NULL;
		}
		kfree(n);
	}
	return ret;
}

static const struct file_operations proc_slabstats_operations = {
	.open		= slabstats_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= seq_release_private,
};
#endif

static int __init slab_proc_init(void)
{
#ifdef CONFIG_DEBUG_SLAB_LEAK
	proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
4434
#endif
4435 4436 4437
	return 0;
}
module_init(slab_proc_init);
L
Linus Torvalds 已提交
4438 4439
#endif

4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451
/**
 * ksize - get the actual amount of memory allocated for a given object
 * @objp: Pointer to the object
 *
 * kmalloc may internally round up allocations and return more memory
 * than requested. ksize() can be used to determine the actual amount of
 * memory allocated. The caller may use this additional memory, even though
 * a smaller amount of memory was initially specified with the kmalloc call.
 * The caller must guarantee that objp points to a valid object previously
 * allocated with either kmalloc() or kmem_cache_alloc(). The object
 * must not be freed during the duration of the call.
 */
P
Pekka Enberg 已提交
4452
size_t ksize(const void *objp)
L
Linus Torvalds 已提交
4453
{
4454 4455
	BUG_ON(!objp);
	if (unlikely(objp == ZERO_SIZE_PTR))
4456
		return 0;
L
Linus Torvalds 已提交
4457

4458
	return virt_to_cache(objp)->object_size;
L
Linus Torvalds 已提交
4459
}
K
Kirill A. Shutemov 已提交
4460
EXPORT_SYMBOL(ksize);