memheap.c 10.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12
/*
 * File      : memheap.c
 * This file is part of RT-Thread RTOS
 * COPYRIGHT (C) 2012, RT-Thread Development Team
 *
 * The license and distribution terms for this file may be
 * found in the file LICENSE in this distribution or at
 * http://www.rt-thread.org/license/LICENSE
 *
 * Change Logs:
 * Date           Author       Notes
 * 2012-04-10     Bernard      first implementation
13
 * 2012-10-16     Bernard      add the mutex lock for heap object.
14
 */
15

16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40
#include <rtthread.h>

#ifdef RT_USING_MEMHEAP

/* dynamic pool magic and mask */
#define RT_MEMHEAP_MAGIC		0x1ea01ea0
#define RT_MEMHEAP_MASK		0xfffffffe
#define RT_MEMHEAP_USED		0x01
#define RT_MEMHEAP_FREED		0x00

#define RT_MEMHEAP_IS_USED(i)	((i)->magic & RT_MEMHEAP_USED)
#define RT_MEMHEAP_MINIALLOC	12

#define RT_MEMHEAP_SIZE		RT_ALIGN(sizeof(struct rt_memheap_item), RT_ALIGN_SIZE)

/*
 * The initialized memory pool will be:
 * +-----------------------------------+--------------------------+
 * | whole freed memory block          | Used Memory Block Tailer |
 * +-----------------------------------+--------------------------+
 *
 * block_list --> whole freed memory block
 *
 * The length of Used Memory Block Tailer is 0, which is prevents block merging across list
 */
41
rt_err_t rt_memheap_init(struct rt_memheap *memheap, const char *name,
42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57
	void *start_addr,
	rt_uint32_t size)
{
	struct rt_memheap_item *item;

	RT_ASSERT(memheap != RT_NULL);

	/* initialize pool object */
	rt_object_init(&(memheap->parent), RT_Object_Class_MemHeap, name);

	memheap->start_addr = start_addr;
	memheap->pool_size = RT_ALIGN_DOWN(size, RT_ALIGN_SIZE);
    memheap->available_size = memheap->pool_size - (2 * RT_MEMHEAP_SIZE);

	/* initialize the free list header */
	item = &(memheap->free_header);
58 59 60 61 62 63
	item->magic = RT_MEMHEAP_MAGIC;
	item->pool_ptr = memheap;
	item->next = RT_NULL;
	item->prev = RT_NULL;
	item->next_free = item;
	item->prev_free = item;
64 65 66 67 68

	/* set the free list to free list header */
	memheap->free_list = item;

	/* initialize the first big memory block */
69 70 71 72 73 74 75 76 77 78 79
	item = (struct rt_memheap_item *)start_addr;
	item->magic = RT_MEMHEAP_MAGIC;
	item->pool_ptr = memheap;
	item->next = RT_NULL;
	item->prev = RT_NULL;
	item->next_free = item;
	item->prev_free = item;

	item->next = (struct rt_memheap_item *)
		((rt_uint8_t *)item + memheap->available_size + RT_MEMHEAP_SIZE);
	item->prev =  item->next;
80 81 82 83 84 85 86 87 88 89 90

	/* block list header */
	memheap->block_list = item;

	/* place the big memory block to free list */
	item->next_free = memheap->free_list->next_free;
	item->prev_free = memheap->free_list;
	memheap->free_list->next_free->prev_free = item;
	memheap->free_list->next_free = item;

	/* move to the end of memory pool to build a small tailer block, which prevents block merging */
91
	item =  item->next;
92
	/* it's a used memory block */
93 94 95 96
	item->magic = RT_MEMHEAP_MAGIC | RT_MEMHEAP_USED;
	item->pool_ptr = memheap;
	item->next = (struct rt_memheap_item *)start_addr;
	item->prev = (struct rt_memheap_item *)start_addr;
97
	/* not in free list */
98
	item->next_free = item->prev_free = RT_NULL;
99

100 101 102
	/* initialize mutex lock */
	rt_mutex_init(&(memheap->lock), name, RT_IPC_FLAG_FIFO);

103 104 105
	RT_DEBUG_LOG(RT_DEBUG_MEMHEAP,
                 ("memory heap: start addr 0x%08x, size %d, free list header 0x%08x",
                  start_addr, size, &(memheap->free_header)));
106

107
	return RT_EOK;
108
}
109
RTM_EXPORT(rt_memheap_init);
110

111
rt_err_t rt_memheap_detach(struct rt_memheap *heap)
112
{
113 114 115
	RT_ASSERT(heap);

	rt_object_detach(&(heap->lock.parent.parent));
116 117
	rt_object_detach(&(heap->parent));

118 119
	/* Return a successful completion. */
	return RT_EOK;
120
}
121
RTM_EXPORT(rt_memheap_detach);
122

123
void *rt_memheap_alloc(struct rt_memheap *heap, rt_uint32_t size)
124
{
125
	rt_err_t result;
126 127 128
	rt_uint32_t free_size;
	struct rt_memheap_item *header_ptr;

129
	RT_ASSERT(heap != RT_NULL);
130 131 132

	/* align allocated size */
	size = RT_ALIGN(size, RT_ALIGN_SIZE);
133 134
	if (size < RT_MEMHEAP_MINIALLOC)
		size = RT_MEMHEAP_MINIALLOC;
135 136 137

	RT_DEBUG_LOG(RT_DEBUG_MEMHEAP, ("allocate %d", size));

138
	if (size < heap->available_size)
139 140 141
	{
		/* search on free list */
		free_size = 0;
142 143 144 145 146 147 148 149 150

		/* lock memheap */
		result = rt_mutex_take(&(heap->lock), RT_WAITING_FOREVER);
		if (result != RT_EOK)
		{
			rt_set_errno(result);
			return RT_NULL;
		}

151
		/* get the first free memory block */
152 153
		header_ptr = heap->free_list->next_free;
		while (header_ptr != heap->free_list && free_size < size)
154 155 156 157 158 159 160 161 162 163 164
		{
			/* get current freed memory block size */
			free_size = (rt_uint32_t)(header_ptr->next) - (rt_uint32_t)header_ptr - RT_MEMHEAP_SIZE;

			if (free_size < size)
			{
				/* move to next free memory block */
				header_ptr = header_ptr->next_free;
			}
		}

165
		/* determine if the memory is available. */
166 167
		if (free_size >= size)
		{
168
			/* a block that satisfies the request has been found. */
169

170
			/* determine if the block needs to be split. */
171 172
			if (free_size >= (size + RT_MEMHEAP_SIZE + RT_MEMHEAP_MINIALLOC))
			{
173
				struct rt_memheap_item *new_ptr;
174

175 176
				/* split the block. */
				new_ptr =  (struct rt_memheap_item *)(((rt_uint8_t *)header_ptr) + size + RT_MEMHEAP_SIZE);
177

178 179 180 181 182 183
				RT_DEBUG_LOG(RT_DEBUG_MEMHEAP,
                             ("split: h[0x%08x] nm[0x%08x] pm[0x%08x] to n[0x%08x]",
                              header_ptr,
                              header_ptr->next,
                              header_ptr->prev,
                              new_ptr));
184 185 186 187

				/* mark the new block as a memory block and freed. */
				new_ptr->magic = RT_MEMHEAP_MAGIC;

188
				/* put the pool pointer into the new block. */
189
				new_ptr->pool_ptr = heap;
190 191 192 193 194 195 196 197 198 199 200 201 202 203

				/* break down the block list */
				new_ptr->prev = header_ptr;
				new_ptr->next = header_ptr->next;
				header_ptr->next->prev = new_ptr;
				header_ptr->next = new_ptr;

				/* remove header ptr from free list */
				header_ptr->next_free->prev_free = header_ptr->prev_free;
				header_ptr->prev_free->next_free = header_ptr->next_free;
				header_ptr->next_free = RT_NULL;
				header_ptr->prev_free = RT_NULL;

				/* insert new_ptr to free list */
204 205 206 207
				new_ptr->next_free = heap->free_list->next_free;
				new_ptr->prev_free = heap->free_list;
				heap->free_list->next_free->prev_free = new_ptr;
				heap->free_list->next_free = new_ptr;
208
				RT_DEBUG_LOG(RT_DEBUG_MEMHEAP, ("new ptr: nf 0x%08x, pf 0x%08x",
209 210
                                                new_ptr->next_free,
                                                new_ptr->prev_free));
211 212

				/* decrement the available byte count.  */
213
				heap->available_size = heap->available_size - size - RT_MEMHEAP_SIZE;
214 215 216 217
			}
			else
			{
				/* decrement the entire free size from the available bytes count.  */
218
				heap->available_size = heap->available_size - free_size;
219 220

				/* remove header_ptr from free list */
221 222 223 224 225
				RT_DEBUG_LOG(RT_DEBUG_MEMHEAP,
                             ("one block: h[0x%08x], nf 0x%08x, pf 0x%08x",
                              header_ptr,
                              header_ptr->next_free,
                              header_ptr->prev_free));
226 227 228 229 230 231 232

				header_ptr->next_free->prev_free = header_ptr->prev_free;
				header_ptr->prev_free->next_free = header_ptr->next_free;
				header_ptr->next_free = RT_NULL;
				header_ptr->prev_free = RT_NULL;
			}

233 234 235
			/* release lock */
			rt_mutex_release(&(heap->lock));
			
236
			/* Mark the allocated block as not available.  */
237
			header_ptr->magic |= RT_MEMHEAP_USED;
238 239

			/* Return a memory address to the caller.  */
240 241 242 243 244
			RT_DEBUG_LOG(RT_DEBUG_MEMHEAP,
                         ("am: m[0x%08x], h[0x%08x], size: %d",
                          (void *)((rt_uint8_t *)header_ptr + RT_MEMHEAP_SIZE),
                          header_ptr,
                          size);
245 246

			return (void *)((rt_uint8_t *)header_ptr + RT_MEMHEAP_SIZE));
247
		}
248 249 250

		/* release lock */
		rt_mutex_release(&(heap->lock));
251 252 253 254 255 256 257
	}

	RT_DEBUG_LOG(RT_DEBUG_MEMHEAP, ("allocate memory: failed\n"));

    /* Return the completion status.  */
    return RT_NULL;
}
258
RTM_EXPORT(rt_memheap_alloc);
259

260
void rt_memheap_free(void *ptr)
261
{
262 263
	rt_err_t result;
	struct rt_memheap *heap;
264 265 266 267
	struct rt_memheap_item *header_ptr, *new_ptr;
	rt_uint32_t insert_header;

	/* set initial status as OK */
268 269 270
	insert_header = 1;
	new_ptr = RT_NULL;
	header_ptr = (struct rt_memheap_item *)((rt_uint8_t *)ptr - RT_MEMHEAP_SIZE);
271

272 273
	RT_DEBUG_LOG(RT_DEBUG_MEMHEAP, ("free memory: m[0x%08x], h[0x%08x]",
                                    ptr, header_ptr));
274 275 276 277 278

	/* check magic */
	RT_ASSERT((header_ptr->magic & RT_MEMHEAP_MASK) == RT_MEMHEAP_MAGIC);

	/* get pool ptr */
279 280 281 282 283 284 285 286 287
	heap = header_ptr->pool_ptr;

	/* lock memheap */
	result = rt_mutex_take(&(heap->lock), RT_WAITING_FOREVER);
	if (result != RT_EOK)
	{
		rt_set_errno(result);
		return ;
	}
288

289 290
	/* Mark the memory as available. */
	header_ptr->magic &= ~RT_MEMHEAP_USED;
291

292
	/* Adjust the available number of bytes. */
293
	heap->available_size =  heap->available_size +
294 295
			((rt_uint32_t)(header_ptr->next) -
			(rt_uint32_t)header_ptr) - RT_MEMHEAP_SIZE;
296

297 298 299
	/* Determine if the block can be merged with the previous neighbor. */
	if (!RT_MEMHEAP_IS_USED(header_ptr->prev))
	{
300 301
		RT_DEBUG_LOG(RT_DEBUG_MEMHEAP, ("merge: left node 0x%08x",
                                        header_ptr->prev));
302

303
		/* adjust the available number of bytes. */
304
		heap->available_size = heap->available_size + RT_MEMHEAP_SIZE;
305

306 307 308
		/* yes, merge block with previous neighbor. */
		(header_ptr->prev)->next = header_ptr->next;
		(header_ptr->next)->prev = header_ptr->prev;
309

310 311 312 313 314
		/* move header pointer to previous. */
		header_ptr = header_ptr->prev;
		/* don't insert header to free list */
		insert_header = 0;
	}
315

316 317 318 319
	/* determine if the block can be merged with the next neighbor. */
	if (!RT_MEMHEAP_IS_USED(header_ptr->next))
	{
		/* adjust the available number of bytes. */
320
		heap->available_size =  heap->available_size + RT_MEMHEAP_SIZE;
321

322 323
		/* merge block with next neighbor. */
		new_ptr =  header_ptr->next;
324

325 326 327
		RT_DEBUG_LOG(RT_DEBUG_MEMHEAP,
                     ("merge: right node 0x%08x, nf 0x%08x, pf 0x%08x",
                      new_ptr, new_ptr->next_free, new_ptr->prev_free));
328

329 330
		new_ptr->next->prev = header_ptr;
		header_ptr->next = new_ptr->next;
331 332 333 334

		/* remove new ptr from free list */
		new_ptr->next_free->prev_free = new_ptr->prev_free;
		new_ptr->prev_free->next_free = new_ptr->next_free;
335
	}
336 337 338 339

	if (insert_header)
	{
		/* no left merge, insert to free list */
340 341 342 343
		header_ptr->next_free = heap->free_list->next_free;
		header_ptr->prev_free = heap->free_list;
		heap->free_list->next_free->prev_free = header_ptr;
		heap->free_list->next_free = header_ptr;
344

345 346 347
		RT_DEBUG_LOG(RT_DEBUG_MEMHEAP,
                     ("insert to free list: nf 0x%08x, pf 0x%08x",
                      header_ptr->next_free, header_ptr->prev_free));
348
	}
349 350 351

	/* release lock */
	rt_mutex_release(&(heap->lock));
352
}
353
RTM_EXPORT(rt_memheap_free);
354 355

#endif