slab.c 25.4 KB
Newer Older
1
/*
2
 * Copyright (c) 2006-2018, RT-Thread Development Team
B
Bernard Xiong 已提交
3
 *
4 5 6 7 8
 * SPDX-License-Identifier: Apache-2.0
 */

/*
 * File      : slab.c
9 10 11 12
 *
 * Change Logs:
 * Date           Author       Notes
 * 2008-07-12     Bernard      the first version
13
 * 2010-07-13     Bernard      fix RT_ALIGN issue found by kuronca
D
dzzxzz 已提交
14 15
 * 2010-10-23     yi.qiu       add module memory allocator
 * 2010-12-18     yi.qiu       fix zone release bug
16 17 18
 */

/*
19
 * KERN_SLABALLOC.C - Kernel SLAB memory allocator
20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57
 *
 * Copyright (c) 2003,2004 The DragonFly Project.  All rights reserved.
 *
 * This code is derived from software contributed to The DragonFly Project
 * by Matthew Dillon <dillon@backplane.com>
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 *
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in
 *    the documentation and/or other materials provided with the
 *    distribution.
 * 3. Neither the name of The DragonFly Project nor the names of its
 *    contributors may be used to endorse or promote products derived
 *    from this software without specific, prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
 * SUCH DAMAGE.
 *
 */

#include <rthw.h>
#include <rtthread.h>

58
#define RT_MEM_STATS
59 60

#if defined (RT_USING_HEAP) && defined (RT_USING_SLAB)
61 62 63 64 65
/* some statistical variable */
#ifdef RT_MEM_STATS
static rt_size_t used_mem, max_mem;
#endif

66 67 68 69 70 71 72
#ifdef RT_USING_HOOK
static void (*rt_malloc_hook)(void *ptr, rt_size_t size);
static void (*rt_free_hook)(void *ptr);

/**
 * @addtogroup Hook
 */
D
dzzxzz 已提交
73

D
dogandog 已提交
74
/**@{*/
75 76 77 78 79 80 81 82 83

/**
 * This function will set a hook function, which will be invoked when a memory
 * block is allocated from heap memory.
 *
 * @param hook the hook function
 */
void rt_malloc_sethook(void (*hook)(void *ptr, rt_size_t size))
{
84
    rt_malloc_hook = hook;
85
}
86
RTM_EXPORT(rt_malloc_sethook);
87 88 89 90 91 92 93 94 95

/**
 * This function will set a hook function, which will be invoked when a memory
 * block is released to heap memory.
 *
 * @param hook the hook function
 */
void rt_free_sethook(void (*hook)(void *ptr))
{
96
    rt_free_hook = hook;
97
}
98
RTM_EXPORT(rt_free_sethook);
99

D
dogandog 已提交
100
/**@}*/
101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132

#endif

/*
 * slab allocator implementation
 *
 * A slab allocator reserves a ZONE for each chunk size, then lays the
 * chunks out in an array within the zone.  Allocation and deallocation
 * is nearly instantanious, and fragmentation/overhead losses are limited
 * to a fixed worst-case amount.
 *
 * The downside of this slab implementation is in the chunk size
 * multiplied by the number of zones.  ~80 zones * 128K = 10MB of VM per cpu.
 * In a kernel implementation all this memory will be physical so
 * the zone size is adjusted downward on machines with less physical
 * memory.  The upside is that overhead is bounded... this is the *worst*
 * case overhead.
 *
 * Slab management is done on a per-cpu basis and no locking or mutexes
 * are required, only a critical section.  When one cpu frees memory
 * belonging to another cpu's slab manager an asynchronous IPI message
 * will be queued to execute the operation.   In addition, both the
 * high level slab allocator and the low level zone allocator optimize
 * M_ZERO requests, and the slab allocator does not have to pre initialize
 * the linked list of chunks.
 *
 * XXX Balancing is needed between cpus.  Balance will be handled through
 * asynchronous IPIs primarily by reassigning the z_Cpu ownership of chunks.
 *
 * XXX If we have to allocate a new zone and M_USE_RESERVE is set, use of
 * the new zone should be restricted to M_USE_RESERVE requests only.
 *
133 134 135 136 137 138 139 140 141 142 143
 *  Alloc Size  Chunking        Number of zones
 *  0-127       8               16
 *  128-255     16              8
 *  256-511     32              8
 *  512-1023    64              8
 *  1024-2047   128             8
 *  2048-4095   256             8
 *  4096-8191   512             8
 *  8192-16383  1024            8
 *  16384-32767 2048            8
 *  (if RT_MM_PAGE_SIZE is 4K the maximum zone allocation is 16383)
144
 *
145
 *  Allocations >= zone_limit go directly to kmem.
146
 *
147
 *          API REQUIREMENTS AND SIDE EFFECTS
148 149 150 151 152 153 154 155 156 157 158 159 160 161 162
 *
 *    To operate as a drop-in replacement to the FreeBSD-4.x malloc() we
 *    have remained compatible with the following API requirements:
 *
 *    + small power-of-2 sized allocations are power-of-2 aligned (kern_tty)
 *    + all power-of-2 sized allocations are power-of-2 aligned (twe)
 *    + malloc(0) is allowed and returns non-RT_NULL (ahc driver)
 *    + ability to allocate arbitrarily large chunks of memory
 */

/*
 * Chunk structure for free elements
 */
typedef struct slab_chunk
{
163
    struct slab_chunk *c_next;
164 165 166 167 168
} slab_chunk;

/*
 * The IN-BAND zone header is placed at the beginning of each zone.
 */
D
dzzxzz 已提交
169 170
typedef struct slab_zone
{
171 172 173
    rt_int32_t  z_magic;        /* magic number for sanity check */
    rt_int32_t  z_nfree;        /* total free chunks / ualloc space in zone */
    rt_int32_t  z_nmax;         /* maximum free chunks */
174

175 176
    struct slab_zone *z_next;   /* zoneary[] link if z_nfree non-zero */
    rt_uint8_t  *z_baseptr;     /* pointer to start of chunk array */
177

178 179
    rt_int32_t  z_uindex;       /* current initial allocation index */
    rt_int32_t  z_chunksize;    /* chunk size for validation */
180

181 182
    rt_int32_t  z_zoneindex;    /* zone index */
    slab_chunk  *z_freechunk;   /* free chunk list */
183 184
} slab_zone;

D
dzzxzz 已提交
185 186 187 188 189 190
#define ZALLOC_SLAB_MAGIC       0x51ab51ab
#define ZALLOC_ZONE_LIMIT       (16 * 1024)     /* max slab-managed alloc */
#define ZALLOC_MIN_ZONE_SIZE    (32 * 1024)     /* minimum zone size */
#define ZALLOC_MAX_ZONE_SIZE    (128 * 1024)    /* maximum zone size */
#define NZONES                  72              /* number of zones */
#define ZONE_RELEASE_THRESH     2               /* threshold number of zones */
191

D
dzzxzz 已提交
192 193
static slab_zone *zone_array[NZONES];   /* linked list of zones NFree > 0 */
static slab_zone *zone_free;            /* whole zones that have become free */
194 195 196 197 198 199 200 201 202 203

static int zone_free_cnt;
static int zone_size;
static int zone_limit;
static int zone_page_cnt;

/*
 * Misc constants.  Note that allocations that are exact multiples of
 * RT_MM_PAGE_SIZE, or exceed the zone limit, fall through to the kmem module.
 */
204
#define MIN_CHUNK_SIZE      8       /* in bytes */
D
dzzxzz 已提交
205
#define MIN_CHUNK_MASK      (MIN_CHUNK_SIZE - 1)
206 207 208 209

/*
 * Array of descriptors that describe the contents of each page
 */
D
dzzxzz 已提交
210 211 212
#define PAGE_TYPE_FREE      0x00
#define PAGE_TYPE_SMALL     0x01
#define PAGE_TYPE_LARGE     0x02
B
Bernard Xiong 已提交
213
struct memusage
D
dzzxzz 已提交
214
{
215 216
    rt_uint32_t type: 2 ;       /* page type */
    rt_uint32_t size: 30;       /* pages allocated or offset from zone */
217 218
};
static struct memusage *memusage = RT_NULL;
219 220
#define btokup(addr)    \
    (&memusage[((rt_uint32_t)(addr) - heap_start) >> RT_MM_PAGE_BITS])
221 222 223 224 225 226

static rt_uint32_t heap_start, heap_end;

/* page allocator */
struct rt_page_head
{
227 228
    struct rt_page_head *next;      /* next valid page */
    rt_size_t page;                 /* number of page  */
229

230
    /* dummy */
231
    char dummy[RT_MM_PAGE_SIZE - (sizeof(struct rt_page_head *) + sizeof(rt_size_t))];
232 233
};
static struct rt_page_head *rt_page_list;
234
static struct rt_semaphore heap_sem;
235

qiuyiuestc's avatar
qiuyiuestc 已提交
236
void *rt_page_alloc(rt_size_t npages)
237
{
238 239 240
    struct rt_page_head *b, *n;
    struct rt_page_head **prev;

241
    if (npages == 0)
242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269
        return RT_NULL;

    /* lock heap */
    rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
    for (prev = &rt_page_list; (b = *prev) != RT_NULL; prev = &(b->next))
    {
        if (b->page > npages)
        {
            /* splite pages */
            n       = b + npages;
            n->next = b->next;
            n->page = b->page - npages;
            *prev   = n;
            break;
        }

        if (b->page == npages)
        {
            /* this node fit, remove this node */
            *prev = b->next;
            break;
        }
    }

    /* unlock heap */
    rt_sem_release(&heap_sem);

    return b;
270 271
}

qiuyiuestc's avatar
qiuyiuestc 已提交
272
void rt_page_free(void *addr, rt_size_t npages)
273
{
274 275
    struct rt_page_head *b, *n;
    struct rt_page_head **prev;
276

277 278 279
    RT_ASSERT(addr != RT_NULL);
    RT_ASSERT((rt_uint32_t)addr % RT_MM_PAGE_SIZE == 0);
    RT_ASSERT(npages != 0);
280

281
    n = (struct rt_page_head *)addr;
282

283 284
    /* lock heap */
    rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
qiuyiuestc's avatar
qiuyiuestc 已提交
285

286 287 288 289
    for (prev = &rt_page_list; (b = *prev) != RT_NULL; prev = &(b->next))
    {
        RT_ASSERT(b->page > 0);
        RT_ASSERT(b > n || b + b->page <= n);
290

291 292 293 294 295 296 297
        if (b + b->page == n)
        {
            if (b + (b->page += npages) == b->next)
            {
                b->page += b->next->page;
                b->next  = b->next->next;
            }
298

299 300
            goto _return;
        }
301

302 303 304 305 306
        if (b == n + npages)
        {
            n->page = b->page + npages;
            n->next = b->next;
            *prev   = n;
307

308 309
            goto _return;
        }
310

311 312 313
        if (b > n + npages)
            break;
    }
314

315 316 317
    n->page = npages;
    n->next = b;
    *prev   = n;
318 319

_return:
320 321
    /* unlock heap */
    rt_sem_release(&heap_sem);
322 323 324 325 326
}

/*
 * Initialize the page allocator
 */
D
dzzxzz 已提交
327
static void rt_page_init(void *addr, rt_size_t npages)
328
{
329 330
    RT_ASSERT(addr != RT_NULL);
    RT_ASSERT(npages != 0);
331

332 333
    rt_page_list = RT_NULL;
    rt_page_free(addr, npages);
334 335 336 337 338 339 340 341 342 343
}

/**
 * @ingroup SystemInit
 *
 * This function will init system heap
 *
 * @param begin_addr the beginning address of system page
 * @param end_addr the end address of system page
 */
D
dzzxzz 已提交
344
void rt_system_heap_init(void *begin_addr, void *end_addr)
345
{
346
    rt_uint32_t limsize, npages;
347

348
    RT_DEBUG_NOT_IN_INTERRUPT;
349

350 351 352
    /* align begin and end addr to page */
    heap_start = RT_ALIGN((rt_uint32_t)begin_addr, RT_MM_PAGE_SIZE);
    heap_end   = RT_ALIGN_DOWN((rt_uint32_t)end_addr, RT_MM_PAGE_SIZE);
353

354 355 356 357
    if (heap_start >= heap_end)
    {
        rt_kprintf("rt_system_heap_init, wrong address[0x%x - 0x%x]\n",
                   (rt_uint32_t)begin_addr, (rt_uint32_t)end_addr);
D
dzzxzz 已提交
358

359 360
        return;
    }
B
Bernard Xiong 已提交
361

362 363
    limsize = heap_end - heap_start;
    npages  = limsize / RT_MM_PAGE_SIZE;
364

365 366
    /* initialize heap semaphore */
    rt_sem_init(&heap_sem, "heap", 1, RT_IPC_FLAG_FIFO);
367

368
    RT_DEBUG_LOG(RT_DEBUG_SLAB, ("heap[0x%x - 0x%x], size 0x%x, 0x%x pages\n",
369
                                 heap_start, heap_end, limsize, npages));
370

371 372
    /* init pages */
    rt_page_init((void *)heap_start, npages);
373

374 375
    /* calculate zone size */
    zone_size = ZALLOC_MIN_ZONE_SIZE;
376
    while (zone_size < ZALLOC_MAX_ZONE_SIZE && (zone_size << 1) < (limsize / 1024))
377
        zone_size <<= 1;
378

379 380 381
    zone_limit = zone_size / 4;
    if (zone_limit > ZALLOC_ZONE_LIMIT)
        zone_limit = ZALLOC_ZONE_LIMIT;
382

383
    zone_page_cnt = zone_size / RT_MM_PAGE_SIZE;
384

385
    RT_DEBUG_LOG(RT_DEBUG_SLAB, ("zone size 0x%x, zone page count 0x%x\n",
386
                                 zone_size, zone_page_cnt));
387

388 389 390
    /* allocate memusage array */
    limsize  = npages * sizeof(struct memusage);
    limsize  = RT_ALIGN(limsize, RT_MM_PAGE_SIZE);
391
    memusage = rt_page_alloc(limsize / RT_MM_PAGE_SIZE);
392

393
    RT_DEBUG_LOG(RT_DEBUG_SLAB, ("memusage 0x%x, size 0x%x\n",
394
                                 (rt_uint32_t)memusage, limsize));
395 396 397 398 399 400 401 402
}

/*
 * Calculate the zone index for the allocation request size and set the
 * allocation request size to that particular zone's chunk size.
 */
rt_inline int zoneindex(rt_uint32_t *bytes)
{
403
    /* unsigned for shift opt */
404
    rt_uint32_t n = (rt_uint32_t) * bytes;
405 406 407 408 409 410

    if (n < 128)
    {
        *bytes = n = (n + 7) & ~7;

        /* 8 byte chunks, 16 zones */
411
        return (n / 8 - 1);
412 413 414 415 416
    }
    if (n < 256)
    {
        *bytes = n = (n + 15) & ~15;

417
        return (n / 16 + 7);
418 419 420 421 422 423 424
    }
    if (n < 8192)
    {
        if (n < 512)
        {
            *bytes = n = (n + 31) & ~31;

425
            return (n / 32 + 15);
426 427 428 429 430
        }
        if (n < 1024)
        {
            *bytes = n = (n + 63) & ~63;

431
            return (n / 64 + 23);
432 433 434 435 436
        }
        if (n < 2048)
        {
            *bytes = n = (n + 127) & ~127;

437
            return (n / 128 + 31);
438 439 440 441 442
        }
        if (n < 4096)
        {
            *bytes = n = (n + 255) & ~255;

443
            return (n / 256 + 39);
444 445 446
        }
        *bytes = n = (n + 511) & ~511;

447
        return (n / 512 + 47);
448 449 450 451 452
    }
    if (n < 16384)
    {
        *bytes = n = (n + 1023) & ~1023;

453
        return (n / 1024 + 55);
454 455 456 457 458
    }

    rt_kprintf("Unexpected byte count %d", n);

    return 0;
459 460 461 462 463 464
}

/**
 * @addtogroup MM
 */

D
dogandog 已提交
465
/**@{*/
466 467 468 469 470 471 472 473 474 475 476 477 478 479

/**
 * This function will allocate a block from system heap memory.
 * - If the nbytes is less than zero,
 * or
 * - If there is no nbytes sized memory valid in system,
 * the RT_NULL is returned.
 *
 * @param size the size of memory to be allocated
 *
 * @return the allocated memory
 */
void *rt_malloc(rt_size_t size)
{
480 481 482 483
    slab_zone *z;
    rt_int32_t zi;
    slab_chunk *chunk;
    struct memusage *kup;
484

485 486 487
    /* zero size, return RT_NULL */
    if (size == 0)
        return RT_NULL;
488

489 490 491 492 493 494 495
    /*
     * Handle large allocations directly.  There should not be very many of
     * these so performance is not a big issue.
     */
    if (size >= zone_limit)
    {
        size = RT_ALIGN(size, RT_MM_PAGE_SIZE);
496

497 498 499
        chunk = rt_page_alloc(size >> RT_MM_PAGE_BITS);
        if (chunk == RT_NULL)
            return RT_NULL;
500

501 502 503 504
        /* set kup */
        kup = btokup(chunk);
        kup->type = PAGE_TYPE_LARGE;
        kup->size = size >> RT_MM_PAGE_BITS;
505

506
        RT_DEBUG_LOG(RT_DEBUG_SLAB,
507 508 509 510
                     ("malloc a large memory 0x%x, page cnt %d, kup %d\n",
                      size,
                      size >> RT_MM_PAGE_BITS,
                      ((rt_uint32_t)chunk - heap_start) >> RT_MM_PAGE_BITS));
511

512 513
        /* lock heap */
        rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
514

515
#ifdef RT_MEM_STATS
516 517 518
        used_mem += size;
        if (used_mem > max_mem)
            max_mem = used_mem;
519
#endif
520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568
        goto done;
    }

    /* lock heap */
    rt_sem_take(&heap_sem, RT_WAITING_FOREVER);

    /*
     * Attempt to allocate out of an existing zone.  First try the free list,
     * then allocate out of unallocated space.  If we find a good zone move
     * it to the head of the list so later allocations find it quickly
     * (we might have thousands of zones in the list).
     *
     * Note: zoneindex() will panic of size is too large.
     */
    zi = zoneindex(&size);
    RT_ASSERT(zi < NZONES);

    RT_DEBUG_LOG(RT_DEBUG_SLAB, ("try to malloc 0x%x on zone: %d\n", size, zi));

    if ((z = zone_array[zi]) != RT_NULL)
    {
        RT_ASSERT(z->z_nfree > 0);

        /* Remove us from the zone_array[] when we become empty */
        if (--z->z_nfree == 0)
        {
            zone_array[zi] = z->z_next;
            z->z_next = RT_NULL;
        }

        /*
         * No chunks are available but nfree said we had some memory, so
         * it must be available in the never-before-used-memory area
         * governed by uindex.  The consequences are very serious if our zone
         * got corrupted so we use an explicit rt_kprintf rather then a KASSERT.
         */
        if (z->z_uindex + 1 != z->z_nmax)
        {
            z->z_uindex = z->z_uindex + 1;
            chunk = (slab_chunk *)(z->z_baseptr + z->z_uindex * size);
        }
        else
        {
            /* find on free chunk list */
            chunk = z->z_freechunk;

            /* remove this chunk from list */
            z->z_freechunk = z->z_freechunk->c_next;
        }
569

570
#ifdef RT_MEM_STATS
571 572 573
        used_mem += z->z_chunksize;
        if (used_mem > max_mem)
            max_mem = used_mem;
574 575
#endif

576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603
        goto done;
    }

    /*
     * If all zones are exhausted we need to allocate a new zone for this
     * index.
     *
     * At least one subsystem, the tty code (see CROUND) expects power-of-2
     * allocations to be power-of-2 aligned.  We maintain compatibility by
     * adjusting the base offset below.
     */
    {
        rt_int32_t off;

        if ((z = zone_free) != RT_NULL)
        {
            /* remove zone from free zone list */
            zone_free = z->z_next;
            -- zone_free_cnt;
        }
        else
        {
            /* unlock heap, since page allocator will think about lock */
            rt_sem_release(&heap_sem);

            /* allocate a zone from page */
            z = rt_page_alloc(zone_size / RT_MM_PAGE_SIZE);
            if (z == RT_NULL)
604 605 606 607
            {
                chunk = RT_NULL;
                goto __exit;
            }
608 609 610 611 612

            /* lock heap */
            rt_sem_take(&heap_sem, RT_WAITING_FOREVER);

            RT_DEBUG_LOG(RT_DEBUG_SLAB, ("alloc a new zone: 0x%x\n",
613
                                         (rt_uint32_t)z));
614

615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652
            /* set message usage */
            for (off = 0, kup = btokup(z); off < zone_page_cnt; off ++)
            {
                kup->type = PAGE_TYPE_SMALL;
                kup->size = off;

                kup ++;
            }
        }

        /* clear to zero */
        rt_memset(z, 0, sizeof(slab_zone));

        /* offset of slab zone struct in zone */
        off = sizeof(slab_zone);

        /*
         * Guarentee power-of-2 alignment for power-of-2-sized chunks.
         * Otherwise just 8-byte align the data.
         */
        if ((size | (size - 1)) + 1 == (size << 1))
            off = (off + size - 1) & ~(size - 1);
        else
            off = (off + MIN_CHUNK_MASK) & ~MIN_CHUNK_MASK;

        z->z_magic     = ZALLOC_SLAB_MAGIC;
        z->z_zoneindex = zi;
        z->z_nmax      = (zone_size - off) / size;
        z->z_nfree     = z->z_nmax - 1;
        z->z_baseptr   = (rt_uint8_t *)z + off;
        z->z_uindex    = 0;
        z->z_chunksize = size;

        chunk = (slab_chunk *)(z->z_baseptr + z->z_uindex * size);

        /* link to zone array */
        z->z_next = zone_array[zi];
        zone_array[zi] = z;
653 654

#ifdef RT_MEM_STATS
655 656 657
        used_mem += z->z_chunksize;
        if (used_mem > max_mem)
            max_mem = used_mem;
658
#endif
659
    }
660 661

done:
662 663
    rt_sem_release(&heap_sem);
    RT_OBJECT_HOOK_CALL(rt_malloc_hook, ((char *)chunk, size));
664

665
__exit:
666
    return chunk;
667
}
668
RTM_EXPORT(rt_malloc);
669 670 671 672 673 674 675 676 677 678 679

/**
 * This function will change the size of previously allocated memory block.
 *
 * @param ptr the previously allocated memory block
 * @param size the new size of memory block
 *
 * @return the allocated memory
 */
void *rt_realloc(void *ptr, rt_size_t size)
{
680 681 682
    void *nptr;
    slab_zone *z;
    struct memusage *kup;
683

684 685 686 687 688
    if (ptr == RT_NULL)
        return rt_malloc(size);
    if (size == 0)
    {
        rt_free(ptr);
D
dzzxzz 已提交
689

690 691
        return RT_NULL;
    }
692

693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717
    /*
     * Get the original allocation's zone.  If the new request winds up
     * using the same chunk size we do not have to do anything.
     */
    kup = btokup((rt_uint32_t)ptr & ~RT_MM_PAGE_MASK);
    if (kup->type == PAGE_TYPE_LARGE)
    {
        rt_size_t osize;

        osize = kup->size << RT_MM_PAGE_BITS;
        if ((nptr = rt_malloc(size)) == RT_NULL)
            return RT_NULL;
        rt_memcpy(nptr, ptr, size > osize ? osize : size);
        rt_free(ptr);

        return nptr;
    }
    else if (kup->type == PAGE_TYPE_SMALL)
    {
        z = (slab_zone *)(((rt_uint32_t)ptr & ~RT_MM_PAGE_MASK) -
                          kup->size * RT_MM_PAGE_SIZE);
        RT_ASSERT(z->z_magic == ZALLOC_SLAB_MAGIC);

        zoneindex(&size);
        if (z->z_chunksize == size)
718
            return (ptr); /* same chunk */
719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734

        /*
         * Allocate memory for the new request size.  Note that zoneindex has
         * already adjusted the request size to the appropriate chunk size, which
         * should optimize our bcopy().  Then copy and return the new pointer.
         */
        if ((nptr = rt_malloc(size)) == RT_NULL)
            return RT_NULL;

        rt_memcpy(nptr, ptr, size > z->z_chunksize ? z->z_chunksize : size);
        rt_free(ptr);

        return nptr;
    }

    return RT_NULL;
735
}
736
RTM_EXPORT(rt_realloc);
737 738 739 740 741 742 743 744 745 746 747 748 749 750 751

/**
 * This function will contiguously allocate enough space for count objects
 * that are size bytes of memory each and returns a pointer to the allocated
 * memory.
 *
 * The allocated memory is filled with bytes of value zero.
 *
 * @param count number of objects to allocate
 * @param size size of the objects to allocate
 *
 * @return pointer to allocated memory / NULL pointer if there is an error
 */
void *rt_calloc(rt_size_t count, rt_size_t size)
{
752
    void *p;
753

754 755
    /* allocate 'count' objects of size 'size' */
    p = rt_malloc(count * size);
756

757 758 759
    /* zero the memory */
    if (p)
        rt_memset(p, 0, count * size);
760

761
    return p;
762
}
763
RTM_EXPORT(rt_calloc);
764 765

/**
766
 * This function will release the previous allocated memory block by rt_malloc.
767 768 769 770 771 772
 * The released memory block is taken back to system heap.
 *
 * @param ptr the address of memory which will be released
 */
void rt_free(void *ptr)
{
773 774 775
    slab_zone *z;
    slab_chunk *chunk;
    struct memusage *kup;
776

777 778 779
    /* free a RT_NULL pointer */
    if (ptr == RT_NULL)
        return ;
780

781
    RT_OBJECT_HOOK_CALL(rt_free_hook, (ptr));
782

783
    /* get memory usage */
784
#if RT_DEBUG_SLAB
785 786 787
    {
        rt_uint32_t addr = ((rt_uint32_t)ptr & ~RT_MM_PAGE_MASK);
        RT_DEBUG_LOG(RT_DEBUG_SLAB,
788 789 790 791
                     ("free a memory 0x%x and align to 0x%x, kup index %d\n",
                      (rt_uint32_t)ptr,
                      (rt_uint32_t)addr,
                      ((rt_uint32_t)(addr) - heap_start) >> RT_MM_PAGE_BITS));
792
    }
793 794
#endif

795 796 797 798 799
    kup = btokup((rt_uint32_t)ptr & ~RT_MM_PAGE_MASK);
    /* release large allocation */
    if (kup->type == PAGE_TYPE_LARGE)
    {
        rt_uint32_t size;
800

801 802 803 804 805
        /* lock heap */
        rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
        /* clear page counter */
        size = kup->size;
        kup->size = 0;
806 807

#ifdef RT_MEM_STATS
808
        used_mem -= size * RT_MM_PAGE_SIZE;
809
#endif
810
        rt_sem_release(&heap_sem);
811

812
        RT_DEBUG_LOG(RT_DEBUG_SLAB,
813 814
                     ("free large memory block 0x%x, page count %d\n",
                      (rt_uint32_t)ptr, size));
815

816 817
        /* free this page */
        rt_page_free(ptr, size);
D
dzzxzz 已提交
818

819 820
        return;
    }
821

822 823
    /* lock heap */
    rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
824

825 826 827 828
    /* zone case. get out zone. */
    z = (slab_zone *)(((rt_uint32_t)ptr & ~RT_MM_PAGE_MASK) -
                      kup->size * RT_MM_PAGE_SIZE);
    RT_ASSERT(z->z_magic == ZALLOC_SLAB_MAGIC);
829

830 831 832
    chunk          = (slab_chunk *)ptr;
    chunk->c_next  = z->z_freechunk;
    z->z_freechunk = chunk;
833

834
#ifdef RT_MEM_STATS
835
    used_mem -= z->z_chunksize;
836 837
#endif

838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859
    /*
     * Bump the number of free chunks.  If it becomes non-zero the zone
     * must be added back onto the appropriate list.
     */
    if (z->z_nfree++ == 0)
    {
        z->z_next = zone_array[z->z_zoneindex];
        zone_array[z->z_zoneindex] = z;
    }

    /*
     * If the zone becomes totally free, and there are other zones we
     * can allocate from, move this zone to the FreeZones list.  Since
     * this code can be called from an IPI callback, do *NOT* try to mess
     * with kernel_map here.  Hysteresis will be performed at malloc() time.
     */
    if (z->z_nfree == z->z_nmax &&
        (z->z_next || zone_array[z->z_zoneindex] != z))
    {
        slab_zone **pz;

        RT_DEBUG_LOG(RT_DEBUG_SLAB, ("free zone 0x%x\n",
860
                                     (rt_uint32_t)z, z->z_zoneindex));
861

862 863 864 865
        /* remove zone from zone array list */
        for (pz = &zone_array[z->z_zoneindex]; z != *pz; pz = &(*pz)->z_next)
            ;
        *pz = z->z_next;
866

867 868
        /* reset zone */
        z->z_magic = -1;
869

870 871 872
        /* insert to free zone list */
        z->z_next = zone_free;
        zone_free = z;
873

874
        ++ zone_free_cnt;
875

876 877 878 879
        /* release zone to page allocator */
        if (zone_free_cnt > ZONE_RELEASE_THRESH)
        {
            register rt_base_t i;
880

881 882 883
            z         = zone_free;
            zone_free = z->z_next;
            -- zone_free_cnt;
884

885 886 887 888 889 890 891
            /* set message usage */
            for (i = 0, kup = btokup(z); i < zone_page_cnt; i ++)
            {
                kup->type = PAGE_TYPE_FREE;
                kup->size = 0;
                kup ++;
            }
892

893 894
            /* unlock heap */
            rt_sem_release(&heap_sem);
895

896 897
            /* release pages */
            rt_page_free(z, zone_size / RT_MM_PAGE_SIZE);
D
dzzxzz 已提交
898

899 900 901 902 903
            return;
        }
    }
    /* unlock heap */
    rt_sem_release(&heap_sem);
904
}
905
RTM_EXPORT(rt_free);
906 907

#ifdef RT_MEM_STATS
908 909 910
void rt_memory_info(rt_uint32_t *total,
                    rt_uint32_t *used,
                    rt_uint32_t *max_used)
911
{
912 913
    if (total != RT_NULL)
        *total = heap_end - heap_start;
D
dzzxzz 已提交
914

915 916
    if (used  != RT_NULL)
        *used = used_mem;
D
dzzxzz 已提交
917

918 919
    if (max_used != RT_NULL)
        *max_used = max_mem;
920 921
}

922 923
#ifdef RT_USING_FINSH
#include <finsh.h>
924

D
dzzxzz 已提交
925
void list_mem(void)
926
{
927 928 929
    rt_kprintf("total memory: %d\n", heap_end - heap_start);
    rt_kprintf("used memory : %d\n", used_mem);
    rt_kprintf("maximum allocated memory: %d\n", max_mem);
930 931 932 933 934
}
FINSH_FUNCTION_EXPORT(list_mem, list memory usage information)
#endif
#endif

D
dogandog 已提交
935
/**@}*/
936 937

#endif