slob.c 15.6 KB
Newer Older
1 2 3 4 5
/*
 * SLOB Allocator: Simple List Of Blocks
 *
 * Matt Mackall <mpm@selenic.com> 12/30/03
 *
P
Paul Mundt 已提交
6 7
 * NUMA support by Paul Mundt, 2007.
 *
8 9 10 11
 * How SLOB works:
 *
 * The core of SLOB is a traditional K&R style heap allocator, with
 * support for returning aligned objects. The granularity of this
N
Nick Piggin 已提交
12 13
 * allocator is as little as 2 bytes, however typically most architectures
 * will require 4 bytes on 32-bit and 8 bytes on 64-bit.
N
Nick Piggin 已提交
14
 *
15 16 17 18 19 20 21 22 23 24 25
 * The slob heap is a set of linked list of pages from alloc_pages(),
 * and within each page, there is a singly-linked list of free blocks
 * (slob_t). The heap is grown on demand. To reduce fragmentation,
 * heap pages are segregated into three lists, with objects less than
 * 256 bytes, objects less than 1024 bytes, and all other objects.
 *
 * Allocation from heap involves first searching for a page with
 * sufficient free blocks (using a next-fit-like approach) followed by
 * a first-fit scan of the page. Deallocation inserts objects back
 * into the free list in address order, so this is effectively an
 * address-ordered first fit.
26 27
 *
 * Above this is an implementation of kmalloc/kfree. Blocks returned
N
Nick Piggin 已提交
28
 * from kmalloc are prepended with a 4-byte header with the kmalloc size.
29
 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
P
Paul Mundt 已提交
30
 * alloc_pages() directly, allocating compound pages so the page order
N
Nick Piggin 已提交
31 32 33 34
 * does not have to be separately tracked, and also stores the exact
 * allocation size in page->private so that it can be used to accurately
 * provide ksize(). These objects are detected in kfree() because slob_page()
 * is false for them.
35 36
 *
 * SLAB is emulated on top of SLOB by simply calling constructors and
N
Nick Piggin 已提交
37 38 39 40
 * destructors for every SLAB allocation. Objects are returned with the
 * 4-byte alignment unless the SLAB_HWCACHE_ALIGN flag is set, in which
 * case the low-level allocator will fragment blocks to create the proper
 * alignment. Again, objects of page-size or greater are allocated by
P
Paul Mundt 已提交
41
 * calling alloc_pages(). As SLAB objects know their size, no separate
N
Nick Piggin 已提交
42
 * size bookkeeping is necessary and there is essentially no allocation
N
Nick Piggin 已提交
43 44
 * space overhead, and compound pages aren't needed for multi-page
 * allocations.
P
Paul Mundt 已提交
45 46 47 48
 *
 * NUMA support in SLOB is fairly simplistic, pushing most of the real
 * logic down to the page allocator, and simply doing the node accounting
 * on the upper levels. In the event that a node id is explicitly
49
 * provided, alloc_pages_exact_node() with the specified node id is used
P
Paul Mundt 已提交
50 51 52 53 54 55 56 57
 * instead. The common case (or when the node id isn't explicitly provided)
 * will default to the current node, as per numa_node_id().
 *
 * Node aware pages are still inserted in to the global freelist, and
 * these are scanned for by matching against the node id encoded in the
 * page flags. As a result, block allocations that can be satisfied from
 * the freelist will only be done so on pages residing on the same node,
 * in order to prevent random node placement.
58 59
 */

N
Nick Piggin 已提交
60
#include <linux/kernel.h>
61 62
#include <linux/slab.h>
#include <linux/mm.h>
N
Nick Piggin 已提交
63
#include <linux/swap.h> /* struct reclaim_state */
64 65
#include <linux/cache.h>
#include <linux/init.h>
66
#include <linux/export.h>
N
Nick Piggin 已提交
67
#include <linux/rcupdate.h>
N
Nick Piggin 已提交
68
#include <linux/list.h>
69
#include <linux/kmemleak.h>
70 71 72

#include <trace/events/kmem.h>

A
Arun Sharma 已提交
73
#include <linux/atomic.h>
N
Nick Piggin 已提交
74 75 76 77 78 79 80 81 82

/*
 * slob_block has a field 'units', which indicates size of block if +ve,
 * or offset of next block if -ve (in SLOB_UNITs).
 *
 * Free blocks of size 1 unit simply contain the offset of the next block.
 * Those with larger size contain their size in the first SLOB_UNIT of
 * memory, and the offset of the next free block in the second SLOB_UNIT.
 */
N
Nick Piggin 已提交
83
#if PAGE_SIZE <= (32767 * 2)
N
Nick Piggin 已提交
84 85 86 87 88
typedef s16 slobidx_t;
#else
typedef s32 slobidx_t;
#endif

89
struct slob_block {
N
Nick Piggin 已提交
90
	slobidx_t units;
N
Nick Piggin 已提交
91
};
92 93
typedef struct slob_block slob_t;

N
Nick Piggin 已提交
94
/*
95
 * All partially free slob pages go on these lists.
N
Nick Piggin 已提交
96
 */
97 98 99 100 101
#define SLOB_BREAK1 256
#define SLOB_BREAK2 1024
static LIST_HEAD(free_slob_small);
static LIST_HEAD(free_slob_medium);
static LIST_HEAD(free_slob_large);
N
Nick Piggin 已提交
102 103 104 105

/*
 * slob_page_free: true for pages on free_slob_pages list.
 */
106
static inline int slob_page_free(struct page *sp)
N
Nick Piggin 已提交
107
{
108
	return PageSlobFree(sp);
N
Nick Piggin 已提交
109 110
}

111
static void set_slob_page_free(struct page *sp, struct list_head *list)
N
Nick Piggin 已提交
112
{
113
	list_add(&sp->list, list);
114
	__SetPageSlobFree(sp);
N
Nick Piggin 已提交
115 116
}

117
static inline void clear_slob_page_free(struct page *sp)
N
Nick Piggin 已提交
118 119
{
	list_del(&sp->list);
120
	__ClearPageSlobFree(sp);
N
Nick Piggin 已提交
121 122
}

123 124 125 126
#define SLOB_UNIT sizeof(slob_t)
#define SLOB_UNITS(size) (((size) + SLOB_UNIT - 1)/SLOB_UNIT)
#define SLOB_ALIGN L1_CACHE_BYTES

N
Nick Piggin 已提交
127 128 129 130 131 132 133 134 135 136
/*
 * struct slob_rcu is inserted at the tail of allocated slob blocks, which
 * were created with a SLAB_DESTROY_BY_RCU slab. slob_rcu is used to free
 * the block using call_rcu.
 */
struct slob_rcu {
	struct rcu_head head;
	int size;
};

N
Nick Piggin 已提交
137 138 139
/*
 * slob_lock protects all slob allocator structures.
 */
140 141
static DEFINE_SPINLOCK(slob_lock);

N
Nick Piggin 已提交
142 143 144 145 146 147 148
/*
 * Encode the given size and next info into a free slob block s.
 */
static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
{
	slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
	slobidx_t offset = next - base;
149

N
Nick Piggin 已提交
150 151 152 153 154 155
	if (size > 1) {
		s[0].units = size;
		s[1].units = offset;
	} else
		s[0].units = -offset;
}
156

N
Nick Piggin 已提交
157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189
/*
 * Return the size of a slob block.
 */
static slobidx_t slob_units(slob_t *s)
{
	if (s->units > 0)
		return s->units;
	return 1;
}

/*
 * Return the next free slob block pointer after this one.
 */
static slob_t *slob_next(slob_t *s)
{
	slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
	slobidx_t next;

	if (s[0].units < 0)
		next = -s[0].units;
	else
		next = s[1].units;
	return base+next;
}

/*
 * Returns true if s is the last free block in its page.
 */
static int slob_last(slob_t *s)
{
	return !((unsigned long)slob_next(s) & ~PAGE_MASK);
}

A
Américo Wang 已提交
190
static void *slob_new_pages(gfp_t gfp, int order, int node)
P
Paul Mundt 已提交
191 192 193 194 195
{
	void *page;

#ifdef CONFIG_NUMA
	if (node != -1)
196
		page = alloc_pages_exact_node(node, gfp, order);
P
Paul Mundt 已提交
197 198 199 200 201 202 203 204 205 206
	else
#endif
		page = alloc_pages(gfp, order);

	if (!page)
		return NULL;

	return page_address(page);
}

A
Américo Wang 已提交
207 208
static void slob_free_pages(void *b, int order)
{
N
Nick Piggin 已提交
209 210
	if (current->reclaim_state)
		current->reclaim_state->reclaimed_slab += 1 << order;
A
Américo Wang 已提交
211 212 213
	free_pages((unsigned long)b, order);
}

N
Nick Piggin 已提交
214 215 216
/*
 * Allocate a slob block within a given slob_page sp.
 */
217
static void *slob_page_alloc(struct page *sp, size_t size, int align)
218
{
A
Américo Wang 已提交
219
	slob_t *prev, *cur, *aligned = NULL;
220 221
	int delta = 0, units = SLOB_UNITS(size);

222
	for (prev = NULL, cur = sp->freelist; ; prev = cur, cur = slob_next(cur)) {
N
Nick Piggin 已提交
223 224
		slobidx_t avail = slob_units(cur);

225 226 227 228
		if (align) {
			aligned = (slob_t *)ALIGN((unsigned long)cur, align);
			delta = aligned - cur;
		}
N
Nick Piggin 已提交
229 230 231
		if (avail >= units + delta) { /* room enough? */
			slob_t *next;

232
			if (delta) { /* need to fragment head to align? */
N
Nick Piggin 已提交
233 234 235
				next = slob_next(cur);
				set_slob(aligned, avail - delta, next);
				set_slob(cur, delta, aligned);
236 237
				prev = cur;
				cur = aligned;
N
Nick Piggin 已提交
238
				avail = slob_units(cur);
239 240
			}

N
Nick Piggin 已提交
241 242 243 244 245
			next = slob_next(cur);
			if (avail == units) { /* exact fit? unlink. */
				if (prev)
					set_slob(prev, slob_units(prev), next);
				else
246
					sp->freelist = next;
N
Nick Piggin 已提交
247 248 249 250
			} else { /* fragment */
				if (prev)
					set_slob(prev, slob_units(prev), cur + units);
				else
251
					sp->freelist = cur + units;
N
Nick Piggin 已提交
252
				set_slob(cur + units, avail - units, next);
253 254
			}

N
Nick Piggin 已提交
255 256 257
			sp->units -= units;
			if (!sp->units)
				clear_slob_page_free(sp);
258 259
			return cur;
		}
N
Nick Piggin 已提交
260 261 262 263
		if (slob_last(cur))
			return NULL;
	}
}
264

N
Nick Piggin 已提交
265 266 267
/*
 * slob_alloc: entry point into the slob allocator.
 */
P
Paul Mundt 已提交
268
static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
N
Nick Piggin 已提交
269
{
270
	struct page *sp;
M
Matt Mackall 已提交
271
	struct list_head *prev;
272
	struct list_head *slob_list;
N
Nick Piggin 已提交
273 274
	slob_t *b = NULL;
	unsigned long flags;
275

276 277 278 279 280 281 282
	if (size < SLOB_BREAK1)
		slob_list = &free_slob_small;
	else if (size < SLOB_BREAK2)
		slob_list = &free_slob_medium;
	else
		slob_list = &free_slob_large;

N
Nick Piggin 已提交
283 284
	spin_lock_irqsave(&slob_lock, flags);
	/* Iterate through each partially free page, try to find room */
285
	list_for_each_entry(sp, slob_list, list) {
P
Paul Mundt 已提交
286 287 288 289 290
#ifdef CONFIG_NUMA
		/*
		 * If there's a node specification, search for a partial
		 * page with a matching node id in the freelist.
		 */
291
		if (node != -1 && page_to_nid(sp) != node)
P
Paul Mundt 已提交
292 293
			continue;
#endif
M
Matt Mackall 已提交
294 295 296
		/* Enough room on this page? */
		if (sp->units < SLOB_UNITS(size))
			continue;
P
Paul Mundt 已提交
297

M
Matt Mackall 已提交
298 299 300 301 302 303 304 305 306
		/* Attempt to alloc */
		prev = sp->list.prev;
		b = slob_page_alloc(sp, size, align);
		if (!b)
			continue;

		/* Improve fragment distribution and reduce our average
		 * search time by starting our next search here. (see
		 * Knuth vol 1, sec 2.5, pg 449) */
307 308 309
		if (prev != slob_list->prev &&
				slob_list->next != prev->next)
			list_move_tail(slob_list, prev->next);
M
Matt Mackall 已提交
310
		break;
311
	}
N
Nick Piggin 已提交
312 313 314 315
	spin_unlock_irqrestore(&slob_lock, flags);

	/* Not enough space: must allocate a new page */
	if (!b) {
A
Américo Wang 已提交
316
		b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
N
Nick Piggin 已提交
317
		if (!b)
A
Américo Wang 已提交
318
			return NULL;
319 320
		sp = virt_to_page(b);
		__SetPageSlab(sp);
N
Nick Piggin 已提交
321 322 323

		spin_lock_irqsave(&slob_lock, flags);
		sp->units = SLOB_UNITS(PAGE_SIZE);
324
		sp->freelist = b;
N
Nick Piggin 已提交
325 326
		INIT_LIST_HEAD(&sp->list);
		set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
327
		set_slob_page_free(sp, slob_list);
N
Nick Piggin 已提交
328 329 330 331
		b = slob_page_alloc(sp, size, align);
		BUG_ON(!b);
		spin_unlock_irqrestore(&slob_lock, flags);
	}
332 333
	if (unlikely((gfp & __GFP_ZERO) && b))
		memset(b, 0, size);
N
Nick Piggin 已提交
334
	return b;
335 336
}

N
Nick Piggin 已提交
337 338 339
/*
 * slob_free: entry point into the slob allocator.
 */
340 341
static void slob_free(void *block, int size)
{
342
	struct page *sp;
N
Nick Piggin 已提交
343 344
	slob_t *prev, *next, *b = (slob_t *)block;
	slobidx_t units;
345
	unsigned long flags;
B
Bob Liu 已提交
346
	struct list_head *slob_list;
347

348
	if (unlikely(ZERO_OR_NULL_PTR(block)))
349
		return;
N
Nick Piggin 已提交
350
	BUG_ON(!size);
351

352
	sp = virt_to_page(block);
N
Nick Piggin 已提交
353
	units = SLOB_UNITS(size);
354 355 356

	spin_lock_irqsave(&slob_lock, flags);

N
Nick Piggin 已提交
357 358 359 360
	if (sp->units + units == SLOB_UNITS(PAGE_SIZE)) {
		/* Go directly to page allocator. Do not pass slob allocator */
		if (slob_page_free(sp))
			clear_slob_page_free(sp);
N
Nick Piggin 已提交
361
		spin_unlock_irqrestore(&slob_lock, flags);
362 363
		__ClearPageSlab(sp);
		reset_page_mapcount(sp);
N
Nick Piggin 已提交
364
		slob_free_pages(b, 0);
N
Nick Piggin 已提交
365
		return;
N
Nick Piggin 已提交
366
	}
367

N
Nick Piggin 已提交
368 369 370
	if (!slob_page_free(sp)) {
		/* This slob page is about to become partially free. Easy! */
		sp->units = units;
371
		sp->freelist = b;
N
Nick Piggin 已提交
372 373 374
		set_slob(b, units,
			(void *)((unsigned long)(b +
					SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK));
B
Bob Liu 已提交
375 376 377 378 379 380 381
		if (size < SLOB_BREAK1)
			slob_list = &free_slob_small;
		else if (size < SLOB_BREAK2)
			slob_list = &free_slob_medium;
		else
			slob_list = &free_slob_large;
		set_slob_page_free(sp, slob_list);
N
Nick Piggin 已提交
382 383 384 385 386 387 388 389
		goto out;
	}

	/*
	 * Otherwise the page is already partially free, so find reinsertion
	 * point.
	 */
	sp->units += units;
390

391 392 393 394
	if (b < (slob_t *)sp->freelist) {
		if (b + units == sp->freelist) {
			units += slob_units(sp->freelist);
			sp->freelist = slob_next(sp->freelist);
395
		}
396 397
		set_slob(b, units, sp->freelist);
		sp->freelist = b;
N
Nick Piggin 已提交
398
	} else {
399
		prev = sp->freelist;
N
Nick Piggin 已提交
400 401 402 403 404
		next = slob_next(prev);
		while (b > next) {
			prev = next;
			next = slob_next(prev);
		}
405

N
Nick Piggin 已提交
406 407 408 409 410 411 412 413 414 415 416 417 418
		if (!slob_last(prev) && b + units == next) {
			units += slob_units(next);
			set_slob(b, units, slob_next(next));
		} else
			set_slob(b, units, next);

		if (prev + slob_units(prev) == b) {
			units = slob_units(b) + slob_units(prev);
			set_slob(prev, units, slob_next(b));
		} else
			set_slob(prev, slob_units(prev), b);
	}
out:
419 420 421
	spin_unlock_irqrestore(&slob_lock, flags);
}

N
Nick Piggin 已提交
422 423 424 425
/*
 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
 */

P
Paul Mundt 已提交
426
void *__kmalloc_node(size_t size, gfp_t gfp, int node)
427
{
428
	unsigned int *m;
N
Nick Piggin 已提交
429
	int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
E
Eduard - Gabriel Munteanu 已提交
430
	void *ret;
N
Nick Piggin 已提交
431

432 433
	gfp &= gfp_allowed_mask;

434
	lockdep_trace_alloc(gfp);
435

N
Nick Piggin 已提交
436
	if (size < PAGE_SIZE - align) {
437 438 439
		if (!size)
			return ZERO_SIZE_PTR;

P
Paul Mundt 已提交
440
		m = slob_alloc(size + align, gfp, align, node);
E
Eduard - Gabriel Munteanu 已提交
441

M
MinChan Kim 已提交
442 443 444
		if (!m)
			return NULL;
		*m = size;
E
Eduard - Gabriel Munteanu 已提交
445 446
		ret = (void *)m + align;

447 448
		trace_kmalloc_node(_RET_IP_, ret,
				   size, size + align, gfp, node);
N
Nick Piggin 已提交
449
	} else {
E
Eduard - Gabriel Munteanu 已提交
450
		unsigned int order = get_order(size);
N
Nick Piggin 已提交
451

452 453 454
		if (likely(order))
			gfp |= __GFP_COMP;
		ret = slob_new_pages(gfp, order, node);
N
Nick Piggin 已提交
455 456 457 458 459
		if (ret) {
			struct page *page;
			page = virt_to_page(ret);
			page->private = size;
		}
E
Eduard - Gabriel Munteanu 已提交
460

461 462
		trace_kmalloc_node(_RET_IP_, ret,
				   size, PAGE_SIZE << order, gfp, node);
463
	}
E
Eduard - Gabriel Munteanu 已提交
464

465
	kmemleak_alloc(ret, size, 1, gfp);
E
Eduard - Gabriel Munteanu 已提交
466
	return ret;
467
}
P
Paul Mundt 已提交
468
EXPORT_SYMBOL(__kmalloc_node);
469 470 471

void kfree(const void *block)
{
472
	struct page *sp;
473

474 475
	trace_kfree(_RET_IP_, block);

476
	if (unlikely(ZERO_OR_NULL_PTR(block)))
477
		return;
478
	kmemleak_free(block);
479

480 481
	sp = virt_to_page(block);
	if (PageSlab(sp)) {
N
Nick Piggin 已提交
482 483 484
		int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
		unsigned int *m = (unsigned int *)(block - align);
		slob_free(m, *m + align);
N
Nick Piggin 已提交
485
	} else
486
		put_page(sp);
487 488 489
}
EXPORT_SYMBOL(kfree);

N
Nick Piggin 已提交
490
/* can't use ksize for kmem_cache_alloc memory, only kmalloc */
P
Pekka Enberg 已提交
491
size_t ksize(const void *block)
492
{
493
	struct page *sp;
494

495 496
	BUG_ON(!block);
	if (unlikely(block == ZERO_SIZE_PTR))
497 498
		return 0;

499 500
	sp = virt_to_page(block);
	if (PageSlab(sp)) {
501 502 503 504
		int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
		unsigned int *m = (unsigned int *)(block - align);
		return SLOB_UNITS(*m) * SLOB_UNIT;
	} else
505
		return sp->private;
506
}
K
Kirill A. Shutemov 已提交
507
EXPORT_SYMBOL(ksize);
508 509

struct kmem_cache *kmem_cache_create(const char *name, size_t size,
510
	size_t align, unsigned long flags, void (*ctor)(void *))
511 512 513
{
	struct kmem_cache *c;

514
	c = slob_alloc(sizeof(struct kmem_cache),
515
		GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
516 517 518

	if (c) {
		c->name = name;
519
		c->size = c->object_size;
N
Nick Piggin 已提交
520 521 522 523 524
		if (flags & SLAB_DESTROY_BY_RCU) {
			/* leave room for rcu footer at the end of object */
			c->size += sizeof(struct slob_rcu);
		}
		c->flags = flags;
525 526
		c->ctor = ctor;
		/* ignore alignment unless it's forced */
527
		c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
N
Nick Piggin 已提交
528 529
		if (c->align < ARCH_SLAB_MINALIGN)
			c->align = ARCH_SLAB_MINALIGN;
530 531
		if (c->align < align)
			c->align = align;
A
Akinobu Mita 已提交
532 533
	} else if (flags & SLAB_PANIC)
		panic("Cannot create slab cache %s\n", name);
534

535
	kmemleak_alloc(c, sizeof(struct kmem_cache), 1, GFP_KERNEL);
536 537 538 539
	return c;
}
EXPORT_SYMBOL(kmem_cache_create);

540
void kmem_cache_destroy(struct kmem_cache *c)
541
{
542
	kmemleak_free(c);
543 544
	if (c->flags & SLAB_DESTROY_BY_RCU)
		rcu_barrier();
545 546 547 548
	slob_free(c, sizeof(struct kmem_cache));
}
EXPORT_SYMBOL(kmem_cache_destroy);

P
Paul Mundt 已提交
549
void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
550 551 552
{
	void *b;

553 554 555 556
	flags &= gfp_allowed_mask;

	lockdep_trace_alloc(flags);

E
Eduard - Gabriel Munteanu 已提交
557
	if (c->size < PAGE_SIZE) {
P
Paul Mundt 已提交
558
		b = slob_alloc(c->size, flags, c->align, node);
559 560 561
		trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
					    SLOB_UNITS(c->size) * SLOB_UNIT,
					    flags, node);
E
Eduard - Gabriel Munteanu 已提交
562
	} else {
A
Américo Wang 已提交
563
		b = slob_new_pages(flags, get_order(c->size), node);
564 565 566
		trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
					    PAGE_SIZE << get_order(c->size),
					    flags, node);
E
Eduard - Gabriel Munteanu 已提交
567
	}
568 569

	if (c->ctor)
570
		c->ctor(b);
571

572
	kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags);
573 574
	return b;
}
P
Paul Mundt 已提交
575
EXPORT_SYMBOL(kmem_cache_alloc_node);
576

N
Nick Piggin 已提交
577
static void __kmem_cache_free(void *b, int size)
578
{
N
Nick Piggin 已提交
579 580
	if (size < PAGE_SIZE)
		slob_free(b, size);
581
	else
A
Américo Wang 已提交
582
		slob_free_pages(b, get_order(size));
N
Nick Piggin 已提交
583 584 585 586 587 588 589 590 591 592 593 594
}

static void kmem_rcu_free(struct rcu_head *head)
{
	struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
	void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));

	__kmem_cache_free(b, slob_rcu->size);
}

void kmem_cache_free(struct kmem_cache *c, void *b)
{
595
	kmemleak_free_recursive(b, c->flags);
N
Nick Piggin 已提交
596 597 598 599 600 601 602 603
	if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
		struct slob_rcu *slob_rcu;
		slob_rcu = b + (c->size - sizeof(struct slob_rcu));
		slob_rcu->size = c->size;
		call_rcu(&slob_rcu->head, kmem_rcu_free);
	} else {
		__kmem_cache_free(b, c->size);
	}
E
Eduard - Gabriel Munteanu 已提交
604

605
	trace_kmem_cache_free(_RET_IP_, b);
606 607 608 609 610 611 612 613 614
}
EXPORT_SYMBOL(kmem_cache_free);

unsigned int kmem_cache_size(struct kmem_cache *c)
{
	return c->size;
}
EXPORT_SYMBOL(kmem_cache_size);

615 616 617 618 619 620
int kmem_cache_shrink(struct kmem_cache *d)
{
	return 0;
}
EXPORT_SYMBOL(kmem_cache_shrink);

P
Paul Mundt 已提交
621 622 623 624 625 626 627
static unsigned int slob_ready __read_mostly;

int slab_is_available(void)
{
	return slob_ready;
}

628 629
void __init kmem_cache_init(void)
{
P
Paul Mundt 已提交
630
	slob_ready = 1;
631
}
632 633 634 635 636

void __init kmem_cache_init_late(void)
{
	/* Nothing to do */
}