未验证 提交 78db77f6 编写于 作者: B Bernard Xiong 提交者: GitHub

Merge pull request #2481 from lymzzyh/slab

fix slab at 64bits CPU
...@@ -217,9 +217,9 @@ struct memusage ...@@ -217,9 +217,9 @@ struct memusage
}; };
static struct memusage *memusage = RT_NULL; static struct memusage *memusage = RT_NULL;
#define btokup(addr) \ #define btokup(addr) \
(&memusage[((rt_uint32_t)(addr) - heap_start) >> RT_MM_PAGE_BITS]) (&memusage[((rt_ubase_t)(addr) - heap_start) >> RT_MM_PAGE_BITS])
static rt_uint32_t heap_start, heap_end; static rt_ubase_t heap_start, heap_end;
/* page allocator */ /* page allocator */
struct rt_page_head struct rt_page_head
...@@ -275,7 +275,7 @@ void rt_page_free(void *addr, rt_size_t npages) ...@@ -275,7 +275,7 @@ void rt_page_free(void *addr, rt_size_t npages)
struct rt_page_head **prev; struct rt_page_head **prev;
RT_ASSERT(addr != RT_NULL); RT_ASSERT(addr != RT_NULL);
RT_ASSERT((rt_uint32_t)addr % RT_MM_PAGE_SIZE == 0); RT_ASSERT((rt_ubase_t)addr % RT_MM_PAGE_SIZE == 0);
RT_ASSERT(npages != 0); RT_ASSERT(npages != 0);
n = (struct rt_page_head *)addr; n = (struct rt_page_head *)addr;
...@@ -348,13 +348,13 @@ void rt_system_heap_init(void *begin_addr, void *end_addr) ...@@ -348,13 +348,13 @@ void rt_system_heap_init(void *begin_addr, void *end_addr)
RT_DEBUG_NOT_IN_INTERRUPT; RT_DEBUG_NOT_IN_INTERRUPT;
/* align begin and end addr to page */ /* align begin and end addr to page */
heap_start = RT_ALIGN((rt_uint32_t)begin_addr, RT_MM_PAGE_SIZE); heap_start = RT_ALIGN((rt_ubase_t)begin_addr, RT_MM_PAGE_SIZE);
heap_end = RT_ALIGN_DOWN((rt_uint32_t)end_addr, RT_MM_PAGE_SIZE); heap_end = RT_ALIGN_DOWN((rt_ubase_t)end_addr, RT_MM_PAGE_SIZE);
if (heap_start >= heap_end) if (heap_start >= heap_end)
{ {
rt_kprintf("rt_system_heap_init, wrong address[0x%x - 0x%x]\n", rt_kprintf("rt_system_heap_init, wrong address[0x%x - 0x%x]\n",
(rt_uint32_t)begin_addr, (rt_uint32_t)end_addr); (rt_ubase_t)begin_addr, (rt_ubase_t)end_addr);
return; return;
} }
...@@ -391,7 +391,7 @@ void rt_system_heap_init(void *begin_addr, void *end_addr) ...@@ -391,7 +391,7 @@ void rt_system_heap_init(void *begin_addr, void *end_addr)
memusage = rt_page_alloc(limsize / RT_MM_PAGE_SIZE); memusage = rt_page_alloc(limsize / RT_MM_PAGE_SIZE);
RT_DEBUG_LOG(RT_DEBUG_SLAB, ("memusage 0x%x, size 0x%x\n", RT_DEBUG_LOG(RT_DEBUG_SLAB, ("memusage 0x%x, size 0x%x\n",
(rt_uint32_t)memusage, limsize)); (rt_ubase_t)memusage, limsize));
} }
/* /*
...@@ -401,7 +401,7 @@ void rt_system_heap_init(void *begin_addr, void *end_addr) ...@@ -401,7 +401,7 @@ void rt_system_heap_init(void *begin_addr, void *end_addr)
rt_inline int zoneindex(rt_size_t *bytes) rt_inline int zoneindex(rt_size_t *bytes)
{ {
/* unsigned for shift opt */ /* unsigned for shift opt */
rt_uint32_t n = (rt_uint32_t)(*bytes); rt_ubase_t n = (rt_ubase_t)(*bytes);
if (n < 128) if (n < 128)
{ {
...@@ -507,7 +507,7 @@ void *rt_malloc(rt_size_t size) ...@@ -507,7 +507,7 @@ void *rt_malloc(rt_size_t size)
("malloc a large memory 0x%x, page cnt %d, kup %d\n", ("malloc a large memory 0x%x, page cnt %d, kup %d\n",
size, size,
size >> RT_MM_PAGE_BITS, size >> RT_MM_PAGE_BITS,
((rt_uint32_t)chunk - heap_start) >> RT_MM_PAGE_BITS)); ((rt_ubase_t)chunk - heap_start) >> RT_MM_PAGE_BITS));
/* lock heap */ /* lock heap */
rt_sem_take(&heap_sem, RT_WAITING_FOREVER); rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
...@@ -610,7 +610,7 @@ void *rt_malloc(rt_size_t size) ...@@ -610,7 +610,7 @@ void *rt_malloc(rt_size_t size)
rt_sem_take(&heap_sem, RT_WAITING_FOREVER); rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
RT_DEBUG_LOG(RT_DEBUG_SLAB, ("alloc a new zone: 0x%x\n", RT_DEBUG_LOG(RT_DEBUG_SLAB, ("alloc a new zone: 0x%x\n",
(rt_uint32_t)z)); (rt_ubase_t)z));
/* set message usage */ /* set message usage */
for (off = 0, kup = btokup(z); off < zone_page_cnt; off ++) for (off = 0, kup = btokup(z); off < zone_page_cnt; off ++)
...@@ -694,7 +694,7 @@ void *rt_realloc(void *ptr, rt_size_t size) ...@@ -694,7 +694,7 @@ void *rt_realloc(void *ptr, rt_size_t size)
* Get the original allocation's zone. If the new request winds up * Get the original allocation's zone. If the new request winds up
* using the same chunk size we do not have to do anything. * using the same chunk size we do not have to do anything.
*/ */
kup = btokup((rt_uint32_t)ptr & ~RT_MM_PAGE_MASK); kup = btokup((rt_ubase_t)ptr & ~RT_MM_PAGE_MASK);
if (kup->type == PAGE_TYPE_LARGE) if (kup->type == PAGE_TYPE_LARGE)
{ {
rt_size_t osize; rt_size_t osize;
...@@ -709,7 +709,7 @@ void *rt_realloc(void *ptr, rt_size_t size) ...@@ -709,7 +709,7 @@ void *rt_realloc(void *ptr, rt_size_t size)
} }
else if (kup->type == PAGE_TYPE_SMALL) else if (kup->type == PAGE_TYPE_SMALL)
{ {
z = (slab_zone *)(((rt_uint32_t)ptr & ~RT_MM_PAGE_MASK) - z = (slab_zone *)(((rt_ubase_t)ptr & ~RT_MM_PAGE_MASK) -
kup->size * RT_MM_PAGE_SIZE); kup->size * RT_MM_PAGE_SIZE);
RT_ASSERT(z->z_magic == ZALLOC_SLAB_MAGIC); RT_ASSERT(z->z_magic == ZALLOC_SLAB_MAGIC);
...@@ -783,20 +783,20 @@ void rt_free(void *ptr) ...@@ -783,20 +783,20 @@ void rt_free(void *ptr)
/* get memory usage */ /* get memory usage */
#if RT_DEBUG_SLAB #if RT_DEBUG_SLAB
{ {
rt_uint32_t addr = ((rt_uint32_t)ptr & ~RT_MM_PAGE_MASK); rt_ubase_t addr = ((rt_ubase_t)ptr & ~RT_MM_PAGE_MASK);
RT_DEBUG_LOG(RT_DEBUG_SLAB, RT_DEBUG_LOG(RT_DEBUG_SLAB,
("free a memory 0x%x and align to 0x%x, kup index %d\n", ("free a memory 0x%x and align to 0x%x, kup index %d\n",
(rt_uint32_t)ptr, (rt_ubase_t)ptr,
(rt_uint32_t)addr, (rt_ubase_t)addr,
((rt_uint32_t)(addr) - heap_start) >> RT_MM_PAGE_BITS)); ((rt_ubase_t)(addr) - heap_start) >> RT_MM_PAGE_BITS));
} }
#endif #endif
kup = btokup((rt_uint32_t)ptr & ~RT_MM_PAGE_MASK); kup = btokup((rt_ubase_t)ptr & ~RT_MM_PAGE_MASK);
/* release large allocation */ /* release large allocation */
if (kup->type == PAGE_TYPE_LARGE) if (kup->type == PAGE_TYPE_LARGE)
{ {
rt_uint32_t size; rt_ubase_t size;
/* lock heap */ /* lock heap */
rt_sem_take(&heap_sem, RT_WAITING_FOREVER); rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
...@@ -811,7 +811,7 @@ void rt_free(void *ptr) ...@@ -811,7 +811,7 @@ void rt_free(void *ptr)
RT_DEBUG_LOG(RT_DEBUG_SLAB, RT_DEBUG_LOG(RT_DEBUG_SLAB,
("free large memory block 0x%x, page count %d\n", ("free large memory block 0x%x, page count %d\n",
(rt_uint32_t)ptr, size)); (rt_ubase_t)ptr, size));
/* free this page */ /* free this page */
rt_page_free(ptr, size); rt_page_free(ptr, size);
...@@ -823,7 +823,7 @@ void rt_free(void *ptr) ...@@ -823,7 +823,7 @@ void rt_free(void *ptr)
rt_sem_take(&heap_sem, RT_WAITING_FOREVER); rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
/* zone case. get out zone. */ /* zone case. get out zone. */
z = (slab_zone *)(((rt_uint32_t)ptr & ~RT_MM_PAGE_MASK) - z = (slab_zone *)(((rt_ubase_t)ptr & ~RT_MM_PAGE_MASK) -
kup->size * RT_MM_PAGE_SIZE); kup->size * RT_MM_PAGE_SIZE);
RT_ASSERT(z->z_magic == ZALLOC_SLAB_MAGIC); RT_ASSERT(z->z_magic == ZALLOC_SLAB_MAGIC);
...@@ -857,7 +857,7 @@ void rt_free(void *ptr) ...@@ -857,7 +857,7 @@ void rt_free(void *ptr)
slab_zone **pz; slab_zone **pz;
RT_DEBUG_LOG(RT_DEBUG_SLAB, ("free zone 0x%x\n", RT_DEBUG_LOG(RT_DEBUG_SLAB, ("free zone 0x%x\n",
(rt_uint32_t)z, z->z_zoneindex)); (rt_ubase_t)z, z->z_zoneindex));
/* remove zone from zone array list */ /* remove zone from zone array list */
for (pz = &zone_array[z->z_zoneindex]; z != *pz; pz = &(*pz)->z_next) for (pz = &zone_array[z->z_zoneindex]; z != *pz; pz = &(*pz)->z_next)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册