slab.c 22.8 KB
Newer Older
1 2 3
/*
 * File      : slab.c
 * This file is part of RT-Thread RTOS
D
dzzxzz 已提交
4
 * COPYRIGHT (C) 2008 - 2012, RT-Thread Development Team
5 6 7 8 9 10 11 12
 *
 * The license and distribution terms for this file may be
 * found in the file LICENSE in this distribution or at
 * http://www.rt-thread.org/license/LICENSE
 *
 * Change Logs:
 * Date           Author       Notes
 * 2008-07-12     Bernard      the first version
13
 * 2010-07-13     Bernard      fix RT_ALIGN issue found by kuronca
D
dzzxzz 已提交
14 15
 * 2010-10-23     yi.qiu       add module memory allocator
 * 2010-12-18     yi.qiu       fix zone release bug
16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57
 */

/*
 * KERN_SLABALLOC.C	- Kernel SLAB memory allocator
 *
 * Copyright (c) 2003,2004 The DragonFly Project.  All rights reserved.
 *
 * This code is derived from software contributed to The DragonFly Project
 * by Matthew Dillon <dillon@backplane.com>
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 *
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in
 *    the documentation and/or other materials provided with the
 *    distribution.
 * 3. Neither the name of The DragonFly Project nor the names of its
 *    contributors may be used to endorse or promote products derived
 *    from this software without specific, prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
 * SUCH DAMAGE.
 *
 */

#include <rthw.h>
#include <rtthread.h>

58
#define RT_MEM_STATS
59 60

#if defined (RT_USING_HEAP) && defined (RT_USING_SLAB)
61 62 63 64 65
/* some statistical variable */
#ifdef RT_MEM_STATS
static rt_size_t used_mem, max_mem;
#endif

66 67 68 69 70 71 72
#ifdef RT_USING_HOOK
static void (*rt_malloc_hook)(void *ptr, rt_size_t size);
static void (*rt_free_hook)(void *ptr);

/**
 * @addtogroup Hook
 */
D
dzzxzz 已提交
73

74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160
/*@{*/

/**
 * This function will set a hook function, which will be invoked when a memory
 * block is allocated from heap memory.
 *
 * @param hook the hook function
 */
void rt_malloc_sethook(void (*hook)(void *ptr, rt_size_t size))
{
	rt_malloc_hook = hook;
}

/**
 * This function will set a hook function, which will be invoked when a memory
 * block is released to heap memory.
 *
 * @param hook the hook function
 */
void rt_free_sethook(void (*hook)(void *ptr))
{
	rt_free_hook = hook;
}

/*@}*/

#endif

/*
 * slab allocator implementation
 *
 * A slab allocator reserves a ZONE for each chunk size, then lays the
 * chunks out in an array within the zone.  Allocation and deallocation
 * is nearly instantanious, and fragmentation/overhead losses are limited
 * to a fixed worst-case amount.
 *
 * The downside of this slab implementation is in the chunk size
 * multiplied by the number of zones.  ~80 zones * 128K = 10MB of VM per cpu.
 * In a kernel implementation all this memory will be physical so
 * the zone size is adjusted downward on machines with less physical
 * memory.  The upside is that overhead is bounded... this is the *worst*
 * case overhead.
 *
 * Slab management is done on a per-cpu basis and no locking or mutexes
 * are required, only a critical section.  When one cpu frees memory
 * belonging to another cpu's slab manager an asynchronous IPI message
 * will be queued to execute the operation.   In addition, both the
 * high level slab allocator and the low level zone allocator optimize
 * M_ZERO requests, and the slab allocator does not have to pre initialize
 * the linked list of chunks.
 *
 * XXX Balancing is needed between cpus.  Balance will be handled through
 * asynchronous IPIs primarily by reassigning the z_Cpu ownership of chunks.
 *
 * XXX If we have to allocate a new zone and M_USE_RESERVE is set, use of
 * the new zone should be restricted to M_USE_RESERVE requests only.
 *
 *	Alloc Size	Chunking        Number of zones
 *	0-127		8				16
 *	128-255		16				8
 *	256-511		32				8
 *	512-1023	64				8
 *	1024-2047	128				8
 *	2048-4095	256				8
 *	4096-8191	512				8
 *	8192-16383	1024			8
 *	16384-32767	2048			8
 *	(if RT_MM_PAGE_SIZE is 4K the maximum zone allocation is 16383)
 *
 *	Allocations >= zone_limit go directly to kmem.
 *
 *			API REQUIREMENTS AND SIDE EFFECTS
 *
 *    To operate as a drop-in replacement to the FreeBSD-4.x malloc() we
 *    have remained compatible with the following API requirements:
 *
 *    + small power-of-2 sized allocations are power-of-2 aligned (kern_tty)
 *    + all power-of-2 sized allocations are power-of-2 aligned (twe)
 *    + malloc(0) is allowed and returns non-RT_NULL (ahc driver)
 *    + ability to allocate arbitrarily large chunks of memory
 */

/*
 * Chunk structure for free elements
 */
typedef struct slab_chunk
{
D
dzzxzz 已提交
161
	struct slab_chunk *c_next;
162 163 164 165 166
} slab_chunk;

/*
 * The IN-BAND zone header is placed at the beginning of each zone.
 */
D
dzzxzz 已提交
167 168 169 170 171
typedef struct slab_zone
{
	rt_int32_t	z_magic;        /* magic number for sanity check */
	rt_int32_t	z_nfree;        /* total free chunks / ualloc space in zone */
	rt_int32_t	z_nmax;         /* maximum free chunks */
172

D
dzzxzz 已提交
173 174
	struct slab_zone *z_next;   /* zoneary[] link if z_nfree non-zero */
	rt_uint8_t	*z_baseptr;     /* pointer to start of chunk array */
175

D
dzzxzz 已提交
176 177
	rt_int32_t	z_uindex;       /* current initial allocation index */
	rt_int32_t	z_chunksize;    /* chunk size for validation */
178

D
dzzxzz 已提交
179 180
	rt_int32_t	z_zoneindex;    /* zone index */
	slab_chunk	*z_freechunk;   /* free chunk list */
181 182
} slab_zone;

D
dzzxzz 已提交
183 184 185 186 187 188
#define ZALLOC_SLAB_MAGIC       0x51ab51ab
#define ZALLOC_ZONE_LIMIT       (16 * 1024)     /* max slab-managed alloc */
#define ZALLOC_MIN_ZONE_SIZE    (32 * 1024)     /* minimum zone size */
#define ZALLOC_MAX_ZONE_SIZE    (128 * 1024)    /* maximum zone size */
#define NZONES                  72              /* number of zones */
#define ZONE_RELEASE_THRESH     2               /* threshold number of zones */
189

D
dzzxzz 已提交
190 191
static slab_zone *zone_array[NZONES];   /* linked list of zones NFree > 0 */
static slab_zone *zone_free;            /* whole zones that have become free */
192 193 194 195 196 197 198 199 200 201

static int zone_free_cnt;
static int zone_size;
static int zone_limit;
static int zone_page_cnt;

/*
 * Misc constants.  Note that allocations that are exact multiples of
 * RT_MM_PAGE_SIZE, or exceed the zone limit, fall through to the kmem module.
 */
D
dzzxzz 已提交
202 203
#define MIN_CHUNK_SIZE      8		/* in bytes */
#define MIN_CHUNK_MASK      (MIN_CHUNK_SIZE - 1)
204 205 206 207

/*
 * Array of descriptors that describe the contents of each page
 */
D
dzzxzz 已提交
208 209 210 211 212
#define PAGE_TYPE_FREE      0x00
#define PAGE_TYPE_SMALL     0x01
#define PAGE_TYPE_LARGE     0x02
struct memusage 
{
213
	rt_uint32_t type:2 ;		/* page type */
D
dzzxzz 已提交
214
	rt_uint32_t size:30;		/* pages allocated or offset from zone */
215 216 217 218 219 220 221 222 223
};
static struct memusage *memusage = RT_NULL;
#define btokup(addr)	(&memusage[((rt_uint32_t)(addr) - heap_start) >> RT_MM_PAGE_BITS])

static rt_uint32_t heap_start, heap_end;

/* page allocator */
struct rt_page_head
{
D
dzzxzz 已提交
224 225
	struct rt_page_head *next;      /* next valid page */
	rt_size_t page;                 /* number of page  */
226 227

	/* dummy */
qiuyiuestc's avatar
qiuyiuestc 已提交
228
	char dummy[RT_MM_PAGE_SIZE - (sizeof(struct rt_page_head*) + sizeof (rt_size_t))];
229 230
};
static struct rt_page_head *rt_page_list;
231
static struct rt_semaphore heap_sem;
232

qiuyiuestc's avatar
qiuyiuestc 已提交
233
void *rt_page_alloc(rt_size_t npages)
234 235 236 237
{
	struct rt_page_head *b, *n;
	struct rt_page_head **prev;

D
dzzxzz 已提交
238 239
	if(npages == 0)
		return RT_NULL;
240

241 242
	/* lock heap */
	rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261
	for (prev = &rt_page_list; (b = *prev) != RT_NULL; prev = &(b->next))
	{
		if (b->page > npages)
		{
			/* splite pages */
			n = b + npages;
			n->next = b->next;
			n->page = b->page - npages;
			*prev = n;
			break;
		}

		if (b->page == npages)
		{
			/* this node fit, remove this node */
			*prev = b->next;
			break;
		}
	}
qiuyiuestc's avatar
qiuyiuestc 已提交
262

263 264
	/* unlock heap */
	rt_sem_release(&heap_sem);
265 266 267 268

	return b;
}

qiuyiuestc's avatar
qiuyiuestc 已提交
269
void rt_page_free(void *addr, rt_size_t npages)
270 271 272 273 274 275 276 277 278 279
{
	struct rt_page_head *b, *n;
	struct rt_page_head **prev;

	RT_ASSERT(addr != RT_NULL);
	RT_ASSERT((rt_uint32_t)addr % RT_MM_PAGE_SIZE == 0);
	RT_ASSERT(npages != 0);

	n = (struct rt_page_head *)addr;

280 281
	/* lock heap */
	rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
qiuyiuestc's avatar
qiuyiuestc 已提交
282

283 284 285 286 287 288 289 290 291 292 293 294 295
	for (prev = &rt_page_list; (b = *prev) != RT_NULL; prev = &(b->next))
	{
		RT_ASSERT(b->page > 0);
		RT_ASSERT(b > n || b + b->page <= n);

		if (b + b->page == n)
		{
			if (b + (b->page += npages) == b->next)
			{
				b->page += b->next->page;
				b->next  = b->next->next;
			}

296
			goto _return;
297 298 299 300 301 302 303 304
		}

		if (b == n + npages)
		{
			n->page = b->page + npages;
			n->next = b->next;
			*prev = n;

305
			goto _return;
306 307
		}

D
dzzxzz 已提交
308 309
		if (b > n + npages)
			break;
310 311 312 313 314
	}

	n->page = npages;
	n->next = b;
	*prev = n;
315 316 317 318

_return:
	/* unlock heap */
	rt_sem_release(&heap_sem);
319 320 321 322 323
}

/*
 * Initialize the page allocator
 */
D
dzzxzz 已提交
324
static void rt_page_init(void *addr, rt_size_t npages)
325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340
{
	RT_ASSERT(addr != RT_NULL);
	RT_ASSERT(npages != 0);

	rt_page_list = RT_NULL;
	rt_page_free(addr, npages);
}

/**
 * @ingroup SystemInit
 *
 * This function will init system heap
 *
 * @param begin_addr the beginning address of system page
 * @param end_addr the end address of system page
 */
D
dzzxzz 已提交
341
void rt_system_heap_init(void *begin_addr, void *end_addr)
342 343 344
{
	rt_uint32_t limsize, npages;

345
	RT_DEBUG_NOT_IN_INTERRUPT;
346

347
	/* align begin and end addr to page */
D
dzzxzz 已提交
348 349
	heap_start = RT_ALIGN((rt_uint32_t)begin_addr, RT_MM_PAGE_SIZE);
	heap_end   = RT_ALIGN_DOWN((rt_uint32_t)end_addr, RT_MM_PAGE_SIZE);
350

D
dzzxzz 已提交
351 352
	if (heap_start >= heap_end)
	{
353 354
		rt_kprintf("rt_system_heap_init, wrong address[0x%x - 0x%x]\n", 
			(rt_uint32_t)begin_addr, (rt_uint32_t)end_addr);
D
dzzxzz 已提交
355

356 357 358
		return;
	}
	
359 360 361
	limsize = heap_end - heap_start;
	npages = limsize / RT_MM_PAGE_SIZE;

362 363 364
	/* initialize heap semaphore */
	rt_sem_init(&heap_sem, "heap", 1, RT_IPC_FLAG_FIFO);

365 366
	RT_DEBUG_LOG(RT_DEBUG_SLAB,
		("heap[0x%x - 0x%x], size 0x%x, 0x%x pages\n", heap_start, heap_end, limsize, npages));
367 368

	/* init pages */
D
dzzxzz 已提交
369
	rt_page_init((void *)heap_start, npages);
370 371 372 373 374 375 376

	/* calculate zone size */
	zone_size = ZALLOC_MIN_ZONE_SIZE;
	while (zone_size < ZALLOC_MAX_ZONE_SIZE && (zone_size << 1) < (limsize/1024))
		zone_size <<= 1;

	zone_limit = zone_size / 4;
D
dzzxzz 已提交
377 378
	if (zone_limit > ZALLOC_ZONE_LIMIT)
		zone_limit = ZALLOC_ZONE_LIMIT;
379 380 381

	zone_page_cnt = zone_size / RT_MM_PAGE_SIZE;

382 383
	RT_DEBUG_LOG(RT_DEBUG_SLAB,
		("zone size 0x%x, zone page count 0x%x\n", zone_size, zone_page_cnt));
384 385 386 387 388 389

	/* allocate memusage array */
	limsize = npages * sizeof(struct memusage);
	limsize = RT_ALIGN(limsize, RT_MM_PAGE_SIZE);
	memusage = rt_page_alloc(limsize/RT_MM_PAGE_SIZE);

390 391
	RT_DEBUG_LOG(RT_DEBUG_SLAB,
		("memusage 0x%x, size 0x%x\n", (rt_uint32_t)memusage, limsize));
392 393 394 395 396 397 398 399 400 401 402 403 404
}

/*
 * Calculate the zone index for the allocation request size and set the
 * allocation request size to that particular zone's chunk size.
 */
rt_inline int zoneindex(rt_uint32_t *bytes)
{
	rt_uint32_t n = (rt_uint32_t)*bytes;	/* unsigned for shift opt */

	if (n < 128)
	{
		*bytes = n = (n + 7) & ~7;
D
dzzxzz 已提交
405

406 407 408 409 410
		return(n / 8 - 1);		/* 8 byte chunks, 16 zones */
	}
	if (n < 256)
	{
		*bytes = n = (n + 15) & ~15;
D
dzzxzz 已提交
411

412 413 414 415 416 417 418
		return(n / 16 + 7);
	}
	if (n < 8192)
	{
		if (n < 512)
		{
			*bytes = n = (n + 31) & ~31;
D
dzzxzz 已提交
419

420 421 422 423 424
			return(n / 32 + 15);
		}
		if (n < 1024)
		{
			*bytes = n = (n + 63) & ~63;
D
dzzxzz 已提交
425

426 427 428 429 430
			return(n / 64 + 23);
		}
		if (n < 2048)
		{
			*bytes = n = (n + 127) & ~127;
D
dzzxzz 已提交
431

432 433 434 435 436
			return(n / 128 + 31);
		}
		if (n < 4096)
		{
			*bytes = n = (n + 255) & ~255;
D
dzzxzz 已提交
437

438 439 440
			return(n / 256 + 39);
		}
		*bytes = n = (n + 511) & ~511;
D
dzzxzz 已提交
441

442 443 444 445 446
		return(n / 512 + 47);
	}
	if (n < 16384)
	{
		*bytes = n = (n + 1023) & ~1023;
D
dzzxzz 已提交
447

448 449 450 451
		return(n / 1024 + 55);
	}

	rt_kprintf("Unexpected byte count %d", n);
D
dzzxzz 已提交
452

453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480
	return 0;
}

/**
 * @addtogroup MM
 */

/*@{*/

/**
 * This function will allocate a block from system heap memory.
 * - If the nbytes is less than zero,
 * or
 * - If there is no nbytes sized memory valid in system,
 * the RT_NULL is returned.
 *
 * @param size the size of memory to be allocated
 *
 * @return the allocated memory
 */
void *rt_malloc(rt_size_t size)
{
	slab_zone *z;
	rt_int32_t zi;
	slab_chunk *chunk;
	struct memusage *kup;

	/* zero size, return RT_NULL */
D
dzzxzz 已提交
481 482
	if (size == 0)
		return RT_NULL;
483

qiuyiuestc's avatar
qiuyiuestc 已提交
484
#ifdef RT_USING_MODULE
D
dzzxzz 已提交
485 486
	if (rt_module_self() != RT_NULL)
		return rt_module_malloc(size);
qiuyiuestc's avatar
qiuyiuestc 已提交
487 488
#endif

489 490 491 492 493 494 495 496 497
	/*
	 * Handle large allocations directly.  There should not be very many of
	 * these so performance is not a big issue.
	 */
	if (size >= zone_limit)
	{
		size = RT_ALIGN(size, RT_MM_PAGE_SIZE);

		chunk = rt_page_alloc(size >> RT_MM_PAGE_BITS);
D
dzzxzz 已提交
498 499
		if (chunk == RT_NULL)
			return RT_NULL;
500 501 502 503 504 505

		/* set kup */
		kup = btokup(chunk);
		kup->type = PAGE_TYPE_LARGE;
		kup->size = size >> RT_MM_PAGE_BITS;

D
dzzxzz 已提交
506
		RT_DEBUG_LOG(RT_DEBUG_SLAB, ("malloc a large memory 0x%x, page cnt %d, kup %d\n",
507 508
			size,
			size >> RT_MM_PAGE_BITS,
509 510
			((rt_uint32_t)chunk - heap_start) >> RT_MM_PAGE_BITS));

511 512
		/* lock heap */
		rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
513

514 515
#ifdef RT_MEM_STATS
		used_mem += size;
D
dzzxzz 已提交
516 517
		if (used_mem > max_mem)
			max_mem = used_mem;
518
#endif
519 520 521
		goto done;
	}

522 523 524
	/* lock heap */
	rt_sem_take(&heap_sem, RT_WAITING_FOREVER);

525 526 527 528 529 530 531 532 533 534 535
	/*
	 * Attempt to allocate out of an existing zone.  First try the free list,
	 * then allocate out of unallocated space.  If we find a good zone move
	 * it to the head of the list so later allocations find it quickly
	 * (we might have thousands of zones in the list).
	 *
	 * Note: zoneindex() will panic of size is too large.
	 */
	zi = zoneindex(&size);
	RT_ASSERT(zi < NZONES);

D
dzzxzz 已提交
536
	RT_DEBUG_LOG(RT_DEBUG_SLAB, ("try to malloc 0x%x on zone: %d\n", size, zi));
537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568

	if ((z = zone_array[zi]) != RT_NULL)
	{
		RT_ASSERT(z->z_nfree > 0);

		/* Remove us from the zone_array[] when we become empty */
		if (--z->z_nfree == 0)
		{
			zone_array[zi] = z->z_next;
			z->z_next = RT_NULL;
		}

		/*
		 * No chunks are available but nfree said we had some memory, so
		 * it must be available in the never-before-used-memory area
		 * governed by uindex.  The consequences are very serious if our zone
		 * got corrupted so we use an explicit rt_kprintf rather then a KASSERT.
		 */
		if (z->z_uindex + 1 != z->z_nmax)
		{
			z->z_uindex = z->z_uindex + 1;
			chunk = (slab_chunk *)(z->z_baseptr + z->z_uindex * size);
		}
		else
		{
			/* find on free chunk list */
			chunk = z->z_freechunk;

			/* remove this chunk from list */
			z->z_freechunk = z->z_freechunk->c_next;
		}

569 570
#ifdef RT_MEM_STATS
		used_mem += z->z_chunksize;
D
dzzxzz 已提交
571 572
		if (used_mem > max_mem)
			max_mem = used_mem;
573 574
#endif

575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592
		goto done;
	}

	/*
	 * If all zones are exhausted we need to allocate a new zone for this
	 * index.
	 *
	 * At least one subsystem, the tty code (see CROUND) expects power-of-2
	 * allocations to be power-of-2 aligned.  We maintain compatibility by
	 * adjusting the base offset below.
	 */
	{
		rt_int32_t off;

		if ((z = zone_free) != RT_NULL)
		{
			/* remove zone from free zone list */
			zone_free = z->z_next;
D
dzzxzz 已提交
593
			-- zone_free_cnt;
594 595 596
		}
		else
		{
597 598 599
			/* unlock heap, since page allocator will think about lock */
			rt_sem_release(&heap_sem);

600 601
			/* allocate a zone from page */
			z = rt_page_alloc(zone_size / RT_MM_PAGE_SIZE);
D
dzzxzz 已提交
602 603
			if (z == RT_NULL)
				goto fail;
604

605 606 607
			/* lock heap */
			rt_sem_take(&heap_sem, RT_WAITING_FOREVER);

D
dzzxzz 已提交
608
			RT_DEBUG_LOG(RT_DEBUG_SLAB, ("alloc a new zone: 0x%x\n", (rt_uint32_t)z));
609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634

			/* set message usage */
			for (off = 0, kup = btokup(z); off < zone_page_cnt; off ++)
			{
				kup->type = PAGE_TYPE_SMALL;
				kup->size = off;

				kup ++;
			}
		}

		/* clear to zero */
		rt_memset(z, 0, sizeof(slab_zone));

		/* offset of slab zone struct in zone */
		off = sizeof(slab_zone);

		/*
		 * Guarentee power-of-2 alignment for power-of-2-sized chunks.
		 * Otherwise just 8-byte align the data.
		 */
		if ((size | (size - 1)) + 1 == (size << 1))
			off = (off + size - 1) & ~(size - 1);
		else
			off = (off + MIN_CHUNK_MASK) & ~MIN_CHUNK_MASK;

D
dzzxzz 已提交
635 636 637 638 639 640 641
		z->z_magic     = ZALLOC_SLAB_MAGIC;
		z->z_zoneindex = zi;
		z->z_nmax      = (zone_size - off) / size;
		z->z_nfree     = z->z_nmax - 1;
		z->z_baseptr   = (rt_uint8_t *)z + off;
		z->z_uindex    = 0;
		z->z_chunksize = size;
642 643 644 645 646 647

		chunk = (slab_chunk *)(z->z_baseptr + z->z_uindex * size);

		/* link to zone array */
		z->z_next = zone_array[zi];
		zone_array[zi] = z;
648 649 650

#ifdef RT_MEM_STATS
		used_mem += z->z_chunksize;
D
dzzxzz 已提交
651 652
		if (used_mem > max_mem)
			max_mem = used_mem;
653
#endif
654 655 656
	}

done:
657
	rt_sem_release(&heap_sem);
658

D
dzzxzz 已提交
659
	RT_OBJECT_HOOK_CALL(rt_malloc_hook, ((char *)chunk, size));
660 661 662 663

	return chunk;

fail:
664
	rt_sem_release(&heap_sem);
D
dzzxzz 已提交
665

666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682
	return RT_NULL;
}

/**
 * This function will change the size of previously allocated memory block.
 *
 * @param ptr the previously allocated memory block
 * @param size the new size of memory block
 *
 * @return the allocated memory
 */
void *rt_realloc(void *ptr, rt_size_t size)
{
	void *nptr;
	slab_zone *z;
	struct memusage *kup;

D
dzzxzz 已提交
683 684
	if (ptr == RT_NULL)
		return rt_malloc(size);
685 686 687
	if (size == 0)
	{
		rt_free(ptr);
D
dzzxzz 已提交
688

689 690 691
		return RT_NULL;
	}

qiuyiuestc's avatar
qiuyiuestc 已提交
692
#ifdef RT_USING_MODULE
D
dzzxzz 已提交
693 694
	if (rt_module_self() != RT_NULL)
		return rt_module_realloc(ptr, size);
qiuyiuestc's avatar
qiuyiuestc 已提交
695 696
#endif

697 698 699 700 701 702 703 704 705 706
	/*
	 * Get the original allocation's zone.  If the new request winds up
	 * using the same chunk size we do not have to do anything.
	 */
	kup = btokup((rt_uint32_t)ptr & ~RT_MM_PAGE_MASK);
	if (kup->type == PAGE_TYPE_LARGE)
	{
		rt_size_t osize;

		osize = kup->size << RT_MM_PAGE_BITS;
D
dzzxzz 已提交
707 708
		if ((nptr = rt_malloc(size)) == RT_NULL)
			return RT_NULL;
D
dzzxzz 已提交
709
		rt_memcpy(nptr, ptr, size > osize ? osize : size);
710 711 712 713 714 715
		rt_free(ptr);

		return nptr;
	}
	else if (kup->type == PAGE_TYPE_SMALL)
	{
D
dzzxzz 已提交
716
		z = (slab_zone *)(((rt_uint32_t)ptr & ~RT_MM_PAGE_MASK) - kup->size * RT_MM_PAGE_SIZE);
717 718 719
		RT_ASSERT(z->z_magic == ZALLOC_SLAB_MAGIC);

		zoneindex(&size);
D
dzzxzz 已提交
720 721
		if (z->z_chunksize == size)
			return(ptr); /* same chunk */
722 723 724 725 726 727

		/*
		 * Allocate memory for the new request size.  Note that zoneindex has
		 * already adjusted the request size to the appropriate chunk size, which
		 * should optimize our bcopy().  Then copy and return the new pointer.
		 */
D
dzzxzz 已提交
728 729
		if ((nptr = rt_malloc(size)) == RT_NULL)
			return RT_NULL;
730

D
dzzxzz 已提交
731
		rt_memcpy(nptr, ptr, size > z->z_chunksize ? z->z_chunksize : size);
732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759
		rt_free(ptr);

		return nptr;
	}

	return RT_NULL;
}

/**
 * This function will contiguously allocate enough space for count objects
 * that are size bytes of memory each and returns a pointer to the allocated
 * memory.
 *
 * The allocated memory is filled with bytes of value zero.
 *
 * @param count number of objects to allocate
 * @param size size of the objects to allocate
 *
 * @return pointer to allocated memory / NULL pointer if there is an error
 */
void *rt_calloc(rt_size_t count, rt_size_t size)
{
	void *p;

	/* allocate 'count' objects of size 'size' */
	p = rt_malloc(count * size);

	/* zero the memory */
D
dzzxzz 已提交
760 761
	if (p)
		rt_memset(p, 0, count * size);
762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778

	return p;
}

/**
 * This function will release the previously allocated memory block by rt_malloc.
 * The released memory block is taken back to system heap.
 *
 * @param ptr the address of memory which will be released
 */
void rt_free(void *ptr)
{
	slab_zone *z;
	slab_chunk *chunk;
	struct memusage *kup;

	/* free a RT_NULL pointer */
D
dzzxzz 已提交
779 780
	if (ptr == RT_NULL)
		return ;
781

782
	RT_OBJECT_HOOK_CALL(rt_free_hook, (ptr));
783

qiuyiuestc's avatar
qiuyiuestc 已提交
784 785 786 787
#ifdef RT_USING_MODULE
	if(rt_module_self() != RT_NULL)
	{
		rt_module_free(rt_module_self(), ptr); 
D
dzzxzz 已提交
788

qiuyiuestc's avatar
qiuyiuestc 已提交
789 790 791 792
		return;
	}
#endif

793
	/* get memory usage */
794
#if RT_DEBUG_SLAB
795
	{
qiuyiuestc's avatar
qiuyiuestc 已提交
796
		rt_uint32_t addr = ((rt_uint32_t)ptr & ~RT_MM_PAGE_MASK);
797 798
		RT_DEBUG_LOG(RT_DEBUG_SLAB,
			("free a memory 0x%x and align to 0x%x, kup index %d\n",
799 800
			(rt_uint32_t)ptr,
			(rt_uint32_t)addr,
801
			((rt_uint32_t)(addr) - heap_start) >> RT_MM_PAGE_BITS));
802
	}
803 804 805 806 807 808 809 810
#endif

	kup = btokup((rt_uint32_t)ptr & ~RT_MM_PAGE_MASK);
	/* release large allocation */
	if (kup->type == PAGE_TYPE_LARGE)
	{
		rt_uint32_t size;

811 812
		/* lock heap */
		rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
813 814 815
		/* clear page counter */
		size = kup->size;
		kup->size = 0;
816 817

#ifdef RT_MEM_STATS
qiuyiuestc's avatar
qiuyiuestc 已提交
818
		used_mem -= size * RT_MM_PAGE_SIZE;
819 820
#endif
		rt_sem_release(&heap_sem);
821

822 823
		RT_DEBUG_LOG(RT_DEBUG_SLAB,
			("free large memory block 0x%x, page count %d\n", (rt_uint32_t)ptr, size));
824 825 826

		/* free this page */
		rt_page_free(ptr, size);
D
dzzxzz 已提交
827

828 829 830
		return;
	}

831 832 833
	/* lock heap */
	rt_sem_take(&heap_sem, RT_WAITING_FOREVER);

834
	/* zone case. get out zone. */
D
dzzxzz 已提交
835
	z = (slab_zone *)(((rt_uint32_t)ptr & ~RT_MM_PAGE_MASK) - kup->size * RT_MM_PAGE_SIZE);
836 837
	RT_ASSERT(z->z_magic == ZALLOC_SLAB_MAGIC);

D
dzzxzz 已提交
838
	chunk = (slab_chunk *)ptr;
839 840 841
	chunk->c_next = z->z_freechunk;
	z->z_freechunk = chunk;

842 843 844 845
#ifdef RT_MEM_STATS
	used_mem -= z->z_chunksize;
#endif

846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861
	/*
	 * Bump the number of free chunks.  If it becomes non-zero the zone
	 * must be added back onto the appropriate list.
	 */
	if (z->z_nfree++ == 0)
	{
		z->z_next = zone_array[z->z_zoneindex];
		zone_array[z->z_zoneindex] = z;
	}

	/*
	 * If the zone becomes totally free, and there are other zones we
	 * can allocate from, move this zone to the FreeZones list.  Since
	 * this code can be called from an IPI callback, do *NOT* try to mess
	 * with kernel_map here.  Hysteresis will be performed at malloc() time.
	 */
D
dzzxzz 已提交
862
	if (z->z_nfree == z->z_nmax && (z->z_next || zone_array[z->z_zoneindex] != z))
863 864 865
	{
		slab_zone **pz;

D
dzzxzz 已提交
866
		RT_DEBUG_LOG(RT_DEBUG_SLAB, ("free zone 0x%x\n", (rt_uint32_t)z, z->z_zoneindex));
867 868

		/* remove zone from zone array list */
D
dzzxzz 已提交
869 870
		for (pz = &zone_array[z->z_zoneindex]; z != *pz; pz = &(*pz)->z_next)
			;
871 872 873 874 875 876 877 878 879
		*pz = z->z_next;

		/* reset zone */
		z->z_magic = -1;

		/* insert to free zone list */
		z->z_next = zone_free;
		zone_free = z;

D
dzzxzz 已提交
880
		++ zone_free_cnt;
881 882 883 884 885 886 887 888

		/* release zone to page allocator */
		if (zone_free_cnt > ZONE_RELEASE_THRESH)
		{
			register rt_base_t i;

			z = zone_free;
			zone_free = z->z_next;
D
dzzxzz 已提交
889
			-- zone_free_cnt;
890 891 892 893 894 895 896 897 898

			/* set message usage */
			for (i = 0, kup = btokup(z); i < zone_page_cnt; i ++)
			{
				kup->type = PAGE_TYPE_FREE;
				kup->size = 0;
				kup ++;
			}

899 900 901
			/* unlock heap */
			rt_sem_release(&heap_sem);

902
			/* release pages */
qiuyiuestc's avatar
qiuyiuestc 已提交
903
			rt_page_free(z, zone_size / RT_MM_PAGE_SIZE);
D
dzzxzz 已提交
904

905
			return;
906 907
		}
	}
908 909 910 911 912
	/* unlock heap */
	rt_sem_release(&heap_sem);
}

#ifdef RT_MEM_STATS
D
dzzxzz 已提交
913
void rt_memory_info(rt_uint32_t *total, rt_uint32_t *used, rt_uint32_t *max_used)
914
{
D
dzzxzz 已提交
915 916 917 918 919 920 921 922
	if (total != RT_NULL)
		*total = heap_end - heap_start;

	if (used  != RT_NULL)
		*used = used_mem;

	if (max_used != RT_NULL)
		*max_used = max_mem;
923 924
}

925 926
#ifdef RT_USING_FINSH
#include <finsh.h>
D
dzzxzz 已提交
927
void list_mem(void)
928 929 930 931 932 933 934 935 936
{
	rt_kprintf("total memory: %d\n", heap_end - heap_start);
	rt_kprintf("used memory : %d\n", used_mem);
	rt_kprintf("maximum allocated memory: %d\n", max_mem);
}
FINSH_FUNCTION_EXPORT(list_mem, list memory usage information)
#endif
#endif

937 938 939
/*@}*/

#endif