slob.c 16.0 KB
Newer Older
1 2 3 4 5
/*
 * SLOB Allocator: Simple List Of Blocks
 *
 * Matt Mackall <mpm@selenic.com> 12/30/03
 *
P
Paul Mundt 已提交
6 7
 * NUMA support by Paul Mundt, 2007.
 *
8 9 10 11
 * How SLOB works:
 *
 * The core of SLOB is a traditional K&R style heap allocator, with
 * support for returning aligned objects. The granularity of this
N
Nick Piggin 已提交
12 13
 * allocator is as little as 2 bytes, however typically most architectures
 * will require 4 bytes on 32-bit and 8 bytes on 64-bit.
N
Nick Piggin 已提交
14
 *
15 16 17 18 19 20 21 22 23 24 25
 * The slob heap is a set of linked list of pages from alloc_pages(),
 * and within each page, there is a singly-linked list of free blocks
 * (slob_t). The heap is grown on demand. To reduce fragmentation,
 * heap pages are segregated into three lists, with objects less than
 * 256 bytes, objects less than 1024 bytes, and all other objects.
 *
 * Allocation from heap involves first searching for a page with
 * sufficient free blocks (using a next-fit-like approach) followed by
 * a first-fit scan of the page. Deallocation inserts objects back
 * into the free list in address order, so this is effectively an
 * address-ordered first fit.
26 27
 *
 * Above this is an implementation of kmalloc/kfree. Blocks returned
N
Nick Piggin 已提交
28
 * from kmalloc are prepended with a 4-byte header with the kmalloc size.
29
 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
P
Paul Mundt 已提交
30
 * alloc_pages() directly, allocating compound pages so the page order
31 32
 * does not have to be separately tracked.
 * These objects are detected in kfree() because PageSlab()
N
Nick Piggin 已提交
33
 * is false for them.
34 35
 *
 * SLAB is emulated on top of SLOB by simply calling constructors and
N
Nick Piggin 已提交
36 37 38 39
 * destructors for every SLAB allocation. Objects are returned with the
 * 4-byte alignment unless the SLAB_HWCACHE_ALIGN flag is set, in which
 * case the low-level allocator will fragment blocks to create the proper
 * alignment. Again, objects of page-size or greater are allocated by
P
Paul Mundt 已提交
40
 * calling alloc_pages(). As SLAB objects know their size, no separate
N
Nick Piggin 已提交
41
 * size bookkeeping is necessary and there is essentially no allocation
N
Nick Piggin 已提交
42 43
 * space overhead, and compound pages aren't needed for multi-page
 * allocations.
P
Paul Mundt 已提交
44 45 46 47
 *
 * NUMA support in SLOB is fairly simplistic, pushing most of the real
 * logic down to the page allocator, and simply doing the node accounting
 * on the upper levels. In the event that a node id is explicitly
48
 * provided, __alloc_pages_node() with the specified node id is used
P
Paul Mundt 已提交
49 50 51 52 53 54 55 56
 * instead. The common case (or when the node id isn't explicitly provided)
 * will default to the current node, as per numa_node_id().
 *
 * Node aware pages are still inserted in to the global freelist, and
 * these are scanned for by matching against the node id encoded in the
 * page flags. As a result, block allocations that can be satisfied from
 * the freelist will only be done so on pages residing on the same node,
 * in order to prevent random node placement.
57 58
 */

N
Nick Piggin 已提交
59
#include <linux/kernel.h>
60
#include <linux/slab.h>
61

62
#include <linux/mm.h>
N
Nick Piggin 已提交
63
#include <linux/swap.h> /* struct reclaim_state */
64 65
#include <linux/cache.h>
#include <linux/init.h>
66
#include <linux/export.h>
N
Nick Piggin 已提交
67
#include <linux/rcupdate.h>
N
Nick Piggin 已提交
68
#include <linux/list.h>
69
#include <linux/kmemleak.h>
70 71 72

#include <trace/events/kmem.h>

A
Arun Sharma 已提交
73
#include <linux/atomic.h>
N
Nick Piggin 已提交
74

75
#include "slab.h"
N
Nick Piggin 已提交
76 77 78 79 80 81 82 83
/*
 * slob_block has a field 'units', which indicates size of block if +ve,
 * or offset of next block if -ve (in SLOB_UNITs).
 *
 * Free blocks of size 1 unit simply contain the offset of the next block.
 * Those with larger size contain their size in the first SLOB_UNIT of
 * memory, and the offset of the next free block in the second SLOB_UNIT.
 */
N
Nick Piggin 已提交
84
#if PAGE_SIZE <= (32767 * 2)
N
Nick Piggin 已提交
85 86 87 88 89
typedef s16 slobidx_t;
#else
typedef s32 slobidx_t;
#endif

90
struct slob_block {
N
Nick Piggin 已提交
91
	slobidx_t units;
N
Nick Piggin 已提交
92
};
93 94
typedef struct slob_block slob_t;

N
Nick Piggin 已提交
95
/*
96
 * All partially free slob pages go on these lists.
N
Nick Piggin 已提交
97
 */
98 99 100 101 102
#define SLOB_BREAK1 256
#define SLOB_BREAK2 1024
static LIST_HEAD(free_slob_small);
static LIST_HEAD(free_slob_medium);
static LIST_HEAD(free_slob_large);
N
Nick Piggin 已提交
103 104 105 106

/*
 * slob_page_free: true for pages on free_slob_pages list.
 */
107
static inline int slob_page_free(struct page *sp)
N
Nick Piggin 已提交
108
{
109
	return PageSlobFree(sp);
N
Nick Piggin 已提交
110 111
}

112
static void set_slob_page_free(struct page *sp, struct list_head *list)
N
Nick Piggin 已提交
113
{
114
	list_add(&sp->lru, list);
115
	__SetPageSlobFree(sp);
N
Nick Piggin 已提交
116 117
}

118
static inline void clear_slob_page_free(struct page *sp)
N
Nick Piggin 已提交
119
{
120
	list_del(&sp->lru);
121
	__ClearPageSlobFree(sp);
N
Nick Piggin 已提交
122 123
}

124
#define SLOB_UNIT sizeof(slob_t)
125
#define SLOB_UNITS(size) DIV_ROUND_UP(size, SLOB_UNIT)
126

N
Nick Piggin 已提交
127 128 129 130 131 132 133 134 135 136
/*
 * struct slob_rcu is inserted at the tail of allocated slob blocks, which
 * were created with a SLAB_DESTROY_BY_RCU slab. slob_rcu is used to free
 * the block using call_rcu.
 */
struct slob_rcu {
	struct rcu_head head;
	int size;
};

N
Nick Piggin 已提交
137 138 139
/*
 * slob_lock protects all slob allocator structures.
 */
140 141
static DEFINE_SPINLOCK(slob_lock);

N
Nick Piggin 已提交
142 143 144 145 146 147 148
/*
 * Encode the given size and next info into a free slob block s.
 */
static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
{
	slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
	slobidx_t offset = next - base;
149

N
Nick Piggin 已提交
150 151 152 153 154 155
	if (size > 1) {
		s[0].units = size;
		s[1].units = offset;
	} else
		s[0].units = -offset;
}
156

N
Nick Piggin 已提交
157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189
/*
 * Return the size of a slob block.
 */
static slobidx_t slob_units(slob_t *s)
{
	if (s->units > 0)
		return s->units;
	return 1;
}

/*
 * Return the next free slob block pointer after this one.
 */
static slob_t *slob_next(slob_t *s)
{
	slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
	slobidx_t next;

	if (s[0].units < 0)
		next = -s[0].units;
	else
		next = s[1].units;
	return base+next;
}

/*
 * Returns true if s is the last free block in its page.
 */
static int slob_last(slob_t *s)
{
	return !((unsigned long)slob_next(s) & ~PAGE_MASK);
}

A
Américo Wang 已提交
190
static void *slob_new_pages(gfp_t gfp, int order, int node)
P
Paul Mundt 已提交
191 192 193 194
{
	void *page;

#ifdef CONFIG_NUMA
195
	if (node != NUMA_NO_NODE)
196
		page = __alloc_pages_node(node, gfp, order);
P
Paul Mundt 已提交
197 198 199 200 201 202 203 204 205 206
	else
#endif
		page = alloc_pages(gfp, order);

	if (!page)
		return NULL;

	return page_address(page);
}

A
Américo Wang 已提交
207 208
static void slob_free_pages(void *b, int order)
{
N
Nick Piggin 已提交
209 210
	if (current->reclaim_state)
		current->reclaim_state->reclaimed_slab += 1 << order;
A
Américo Wang 已提交
211 212 213
	free_pages((unsigned long)b, order);
}

N
Nick Piggin 已提交
214 215 216
/*
 * Allocate a slob block within a given slob_page sp.
 */
217
static void *slob_page_alloc(struct page *sp, size_t size, int align)
218
{
A
Américo Wang 已提交
219
	slob_t *prev, *cur, *aligned = NULL;
220 221
	int delta = 0, units = SLOB_UNITS(size);

222
	for (prev = NULL, cur = sp->freelist; ; prev = cur, cur = slob_next(cur)) {
N
Nick Piggin 已提交
223 224
		slobidx_t avail = slob_units(cur);

225 226 227 228
		if (align) {
			aligned = (slob_t *)ALIGN((unsigned long)cur, align);
			delta = aligned - cur;
		}
N
Nick Piggin 已提交
229 230 231
		if (avail >= units + delta) { /* room enough? */
			slob_t *next;

232
			if (delta) { /* need to fragment head to align? */
N
Nick Piggin 已提交
233 234 235
				next = slob_next(cur);
				set_slob(aligned, avail - delta, next);
				set_slob(cur, delta, aligned);
236 237
				prev = cur;
				cur = aligned;
N
Nick Piggin 已提交
238
				avail = slob_units(cur);
239 240
			}

N
Nick Piggin 已提交
241 242 243 244 245
			next = slob_next(cur);
			if (avail == units) { /* exact fit? unlink. */
				if (prev)
					set_slob(prev, slob_units(prev), next);
				else
246
					sp->freelist = next;
N
Nick Piggin 已提交
247 248 249 250
			} else { /* fragment */
				if (prev)
					set_slob(prev, slob_units(prev), cur + units);
				else
251
					sp->freelist = cur + units;
N
Nick Piggin 已提交
252
				set_slob(cur + units, avail - units, next);
253 254
			}

N
Nick Piggin 已提交
255 256 257
			sp->units -= units;
			if (!sp->units)
				clear_slob_page_free(sp);
258 259
			return cur;
		}
N
Nick Piggin 已提交
260 261 262 263
		if (slob_last(cur))
			return NULL;
	}
}
264

N
Nick Piggin 已提交
265 266 267
/*
 * slob_alloc: entry point into the slob allocator.
 */
P
Paul Mundt 已提交
268
static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
N
Nick Piggin 已提交
269
{
270
	struct page *sp;
M
Matt Mackall 已提交
271
	struct list_head *prev;
272
	struct list_head *slob_list;
N
Nick Piggin 已提交
273 274
	slob_t *b = NULL;
	unsigned long flags;
275

276 277 278 279 280 281 282
	if (size < SLOB_BREAK1)
		slob_list = &free_slob_small;
	else if (size < SLOB_BREAK2)
		slob_list = &free_slob_medium;
	else
		slob_list = &free_slob_large;

N
Nick Piggin 已提交
283 284
	spin_lock_irqsave(&slob_lock, flags);
	/* Iterate through each partially free page, try to find room */
285
	list_for_each_entry(sp, slob_list, lru) {
P
Paul Mundt 已提交
286 287 288 289 290
#ifdef CONFIG_NUMA
		/*
		 * If there's a node specification, search for a partial
		 * page with a matching node id in the freelist.
		 */
291
		if (node != NUMA_NO_NODE && page_to_nid(sp) != node)
P
Paul Mundt 已提交
292 293
			continue;
#endif
M
Matt Mackall 已提交
294 295 296
		/* Enough room on this page? */
		if (sp->units < SLOB_UNITS(size))
			continue;
P
Paul Mundt 已提交
297

M
Matt Mackall 已提交
298
		/* Attempt to alloc */
299
		prev = sp->lru.prev;
M
Matt Mackall 已提交
300 301 302 303 304 305 306
		b = slob_page_alloc(sp, size, align);
		if (!b)
			continue;

		/* Improve fragment distribution and reduce our average
		 * search time by starting our next search here. (see
		 * Knuth vol 1, sec 2.5, pg 449) */
307 308 309
		if (prev != slob_list->prev &&
				slob_list->next != prev->next)
			list_move_tail(slob_list, prev->next);
M
Matt Mackall 已提交
310
		break;
311
	}
N
Nick Piggin 已提交
312 313 314 315
	spin_unlock_irqrestore(&slob_lock, flags);

	/* Not enough space: must allocate a new page */
	if (!b) {
A
Américo Wang 已提交
316
		b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
N
Nick Piggin 已提交
317
		if (!b)
A
Américo Wang 已提交
318
			return NULL;
319 320
		sp = virt_to_page(b);
		__SetPageSlab(sp);
N
Nick Piggin 已提交
321 322 323

		spin_lock_irqsave(&slob_lock, flags);
		sp->units = SLOB_UNITS(PAGE_SIZE);
324
		sp->freelist = b;
325
		INIT_LIST_HEAD(&sp->lru);
N
Nick Piggin 已提交
326
		set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
327
		set_slob_page_free(sp, slob_list);
N
Nick Piggin 已提交
328 329 330 331
		b = slob_page_alloc(sp, size, align);
		BUG_ON(!b);
		spin_unlock_irqrestore(&slob_lock, flags);
	}
332 333
	if (unlikely((gfp & __GFP_ZERO) && b))
		memset(b, 0, size);
N
Nick Piggin 已提交
334
	return b;
335 336
}

N
Nick Piggin 已提交
337 338 339
/*
 * slob_free: entry point into the slob allocator.
 */
340 341
static void slob_free(void *block, int size)
{
342
	struct page *sp;
N
Nick Piggin 已提交
343 344
	slob_t *prev, *next, *b = (slob_t *)block;
	slobidx_t units;
345
	unsigned long flags;
B
Bob Liu 已提交
346
	struct list_head *slob_list;
347

348
	if (unlikely(ZERO_OR_NULL_PTR(block)))
349
		return;
N
Nick Piggin 已提交
350
	BUG_ON(!size);
351

352
	sp = virt_to_page(block);
N
Nick Piggin 已提交
353
	units = SLOB_UNITS(size);
354 355 356

	spin_lock_irqsave(&slob_lock, flags);

N
Nick Piggin 已提交
357 358 359 360
	if (sp->units + units == SLOB_UNITS(PAGE_SIZE)) {
		/* Go directly to page allocator. Do not pass slob allocator */
		if (slob_page_free(sp))
			clear_slob_page_free(sp);
N
Nick Piggin 已提交
361
		spin_unlock_irqrestore(&slob_lock, flags);
362
		__ClearPageSlab(sp);
363
		page_mapcount_reset(sp);
N
Nick Piggin 已提交
364
		slob_free_pages(b, 0);
N
Nick Piggin 已提交
365
		return;
N
Nick Piggin 已提交
366
	}
367

N
Nick Piggin 已提交
368 369 370
	if (!slob_page_free(sp)) {
		/* This slob page is about to become partially free. Easy! */
		sp->units = units;
371
		sp->freelist = b;
N
Nick Piggin 已提交
372 373 374
		set_slob(b, units,
			(void *)((unsigned long)(b +
					SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK));
B
Bob Liu 已提交
375 376 377 378 379 380 381
		if (size < SLOB_BREAK1)
			slob_list = &free_slob_small;
		else if (size < SLOB_BREAK2)
			slob_list = &free_slob_medium;
		else
			slob_list = &free_slob_large;
		set_slob_page_free(sp, slob_list);
N
Nick Piggin 已提交
382 383 384 385 386 387 388 389
		goto out;
	}

	/*
	 * Otherwise the page is already partially free, so find reinsertion
	 * point.
	 */
	sp->units += units;
390

391 392 393 394
	if (b < (slob_t *)sp->freelist) {
		if (b + units == sp->freelist) {
			units += slob_units(sp->freelist);
			sp->freelist = slob_next(sp->freelist);
395
		}
396 397
		set_slob(b, units, sp->freelist);
		sp->freelist = b;
N
Nick Piggin 已提交
398
	} else {
399
		prev = sp->freelist;
N
Nick Piggin 已提交
400 401 402 403 404
		next = slob_next(prev);
		while (b > next) {
			prev = next;
			next = slob_next(prev);
		}
405

N
Nick Piggin 已提交
406 407 408 409 410 411 412 413 414 415 416 417 418
		if (!slob_last(prev) && b + units == next) {
			units += slob_units(next);
			set_slob(b, units, slob_next(next));
		} else
			set_slob(b, units, next);

		if (prev + slob_units(prev) == b) {
			units = slob_units(b) + slob_units(prev);
			set_slob(prev, units, slob_next(b));
		} else
			set_slob(prev, slob_units(prev), b);
	}
out:
419 420 421
	spin_unlock_irqrestore(&slob_lock, flags);
}

N
Nick Piggin 已提交
422 423 424 425
/*
 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
 */

426 427
static __always_inline void *
__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
428
{
429
	unsigned int *m;
430
	int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
E
Eduard - Gabriel Munteanu 已提交
431
	void *ret;
N
Nick Piggin 已提交
432

433 434
	gfp &= gfp_allowed_mask;

435
	lockdep_trace_alloc(gfp);
436

N
Nick Piggin 已提交
437
	if (size < PAGE_SIZE - align) {
438 439 440
		if (!size)
			return ZERO_SIZE_PTR;

P
Paul Mundt 已提交
441
		m = slob_alloc(size + align, gfp, align, node);
E
Eduard - Gabriel Munteanu 已提交
442

M
MinChan Kim 已提交
443 444 445
		if (!m)
			return NULL;
		*m = size;
E
Eduard - Gabriel Munteanu 已提交
446 447
		ret = (void *)m + align;

448
		trace_kmalloc_node(caller, ret,
449
				   size, size + align, gfp, node);
N
Nick Piggin 已提交
450
	} else {
E
Eduard - Gabriel Munteanu 已提交
451
		unsigned int order = get_order(size);
N
Nick Piggin 已提交
452

453 454 455
		if (likely(order))
			gfp |= __GFP_COMP;
		ret = slob_new_pages(gfp, order, node);
E
Eduard - Gabriel Munteanu 已提交
456

457
		trace_kmalloc_node(caller, ret,
458
				   size, PAGE_SIZE << order, gfp, node);
459
	}
E
Eduard - Gabriel Munteanu 已提交
460

461
	kmemleak_alloc(ret, size, 1, gfp);
E
Eduard - Gabriel Munteanu 已提交
462
	return ret;
463
}
464

465
void *__kmalloc(size_t size, gfp_t gfp)
466
{
467
	return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, _RET_IP_);
468
}
469
EXPORT_SYMBOL(__kmalloc);
470

471 472 473 474 475 476
void *__kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller)
{
	return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, caller);
}

#ifdef CONFIG_NUMA
477
void *__kmalloc_node_track_caller(size_t size, gfp_t gfp,
478 479 480 481 482 483
					int node, unsigned long caller)
{
	return __do_kmalloc_node(size, gfp, node, caller);
}
#endif

484 485
void kfree(const void *block)
{
486
	struct page *sp;
487

488 489
	trace_kfree(_RET_IP_, block);

490
	if (unlikely(ZERO_OR_NULL_PTR(block)))
491
		return;
492
	kmemleak_free(block);
493

494 495
	sp = virt_to_page(block);
	if (PageSlab(sp)) {
496
		int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
N
Nick Piggin 已提交
497 498
		unsigned int *m = (unsigned int *)(block - align);
		slob_free(m, *m + align);
N
Nick Piggin 已提交
499
	} else
500
		__free_pages(sp, compound_order(sp));
501 502 503
}
EXPORT_SYMBOL(kfree);

N
Nick Piggin 已提交
504
/* can't use ksize for kmem_cache_alloc memory, only kmalloc */
P
Pekka Enberg 已提交
505
size_t ksize(const void *block)
506
{
507
	struct page *sp;
508 509
	int align;
	unsigned int *m;
510

511 512
	BUG_ON(!block);
	if (unlikely(block == ZERO_SIZE_PTR))
513 514
		return 0;

515
	sp = virt_to_page(block);
516 517 518
	if (unlikely(!PageSlab(sp)))
		return PAGE_SIZE << compound_order(sp);

519
	align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
520 521
	m = (unsigned int *)(block - align);
	return SLOB_UNITS(*m) * SLOB_UNIT;
522
}
K
Kirill A. Shutemov 已提交
523
EXPORT_SYMBOL(ksize);
524

525
int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
526
{
527 528 529
	if (flags & SLAB_DESTROY_BY_RCU) {
		/* leave room for rcu footer at the end of object */
		c->size += sizeof(struct slob_rcu);
530
	}
531 532
	c->flags = flags;
	return 0;
533 534
}

535
static void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
536 537 538
{
	void *b;

539 540 541 542
	flags &= gfp_allowed_mask;

	lockdep_trace_alloc(flags);

E
Eduard - Gabriel Munteanu 已提交
543
	if (c->size < PAGE_SIZE) {
P
Paul Mundt 已提交
544
		b = slob_alloc(c->size, flags, c->align, node);
545
		trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
546 547
					    SLOB_UNITS(c->size) * SLOB_UNIT,
					    flags, node);
E
Eduard - Gabriel Munteanu 已提交
548
	} else {
A
Américo Wang 已提交
549
		b = slob_new_pages(flags, get_order(c->size), node);
550
		trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
551 552
					    PAGE_SIZE << get_order(c->size),
					    flags, node);
E
Eduard - Gabriel Munteanu 已提交
553
	}
554

555
	if (b && c->ctor)
556
		c->ctor(b);
557

558
	kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags);
559 560
	return b;
}
561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578

void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
{
	return slob_alloc_node(cachep, flags, NUMA_NO_NODE);
}
EXPORT_SYMBOL(kmem_cache_alloc);

#ifdef CONFIG_NUMA
void *__kmalloc_node(size_t size, gfp_t gfp, int node)
{
	return __do_kmalloc_node(size, gfp, node, _RET_IP_);
}
EXPORT_SYMBOL(__kmalloc_node);

void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t gfp, int node)
{
	return slob_alloc_node(cachep, gfp, node);
}
P
Paul Mundt 已提交
579
EXPORT_SYMBOL(kmem_cache_alloc_node);
580
#endif
581

N
Nick Piggin 已提交
582
static void __kmem_cache_free(void *b, int size)
583
{
N
Nick Piggin 已提交
584 585
	if (size < PAGE_SIZE)
		slob_free(b, size);
586
	else
A
Américo Wang 已提交
587
		slob_free_pages(b, get_order(size));
N
Nick Piggin 已提交
588 589 590 591 592 593 594 595 596 597 598 599
}

static void kmem_rcu_free(struct rcu_head *head)
{
	struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
	void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));

	__kmem_cache_free(b, slob_rcu->size);
}

void kmem_cache_free(struct kmem_cache *c, void *b)
{
600
	kmemleak_free_recursive(b, c->flags);
N
Nick Piggin 已提交
601 602 603 604 605 606 607 608
	if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
		struct slob_rcu *slob_rcu;
		slob_rcu = b + (c->size - sizeof(struct slob_rcu));
		slob_rcu->size = c->size;
		call_rcu(&slob_rcu->head, kmem_rcu_free);
	} else {
		__kmem_cache_free(b, c->size);
	}
E
Eduard - Gabriel Munteanu 已提交
609

610
	trace_kmem_cache_free(_RET_IP_, b);
611 612 613
}
EXPORT_SYMBOL(kmem_cache_free);

614 615 616 617 618 619
void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
{
	__kmem_cache_free_bulk(s, size, p);
}
EXPORT_SYMBOL(kmem_cache_free_bulk);

620
int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
621 622 623 624 625 626
								void **p)
{
	return __kmem_cache_alloc_bulk(s, flags, size, p);
}
EXPORT_SYMBOL(kmem_cache_alloc_bulk);

627 628 629 630 631 632
int __kmem_cache_shutdown(struct kmem_cache *c)
{
	/* No way to check for remaining objects */
	return 0;
}

633 634 635 636
void __kmem_cache_release(struct kmem_cache *c)
{
}

637
int __kmem_cache_shrink(struct kmem_cache *d)
638 639 640 641
{
	return 0;
}

642 643 644 645 646 647 648
struct kmem_cache kmem_cache_boot = {
	.name = "kmem_cache",
	.size = sizeof(struct kmem_cache),
	.flags = SLAB_PANIC,
	.align = ARCH_KMALLOC_MINALIGN,
};

649 650
void __init kmem_cache_init(void)
{
651
	kmem_cache = &kmem_cache_boot;
652
	slab_state = UP;
653
}
654 655 656

void __init kmem_cache_init_late(void)
{
657
	slab_state = FULL;
658
}