rheap.c 16.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/*
 * A Remote Heap.  Remote means that we don't touch the memory that the
 * heap points to. Normal heap implementations use the memory they manage
 * to place their list. We cannot do that because the memory we manage may
 * have special properties, for example it is uncachable or of different
 * endianess.
 *
 * Author: Pantelis Antoniou <panto@intracom.gr>
 *
 * 2004 (c) INTRACOM S.A. Greece. This file is licensed under
 * the terms of the GNU General Public License version 2. This program
 * is licensed "as is" without any warranty of any kind, whether express
 * or implied.
 */
#include <linux/types.h>
#include <linux/errno.h>
17
#include <linux/kernel.h>
18
#include <linux/module.h>
19
#include <linux/mm.h>
A
Alexey Dobriyan 已提交
20
#include <linux/err.h>
21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56
#include <linux/slab.h>

#include <asm/rheap.h>

/*
 * Fixup a list_head, needed when copying lists.  If the pointers fall
 * between s and e, apply the delta.  This assumes that
 * sizeof(struct list_head *) == sizeof(unsigned long *).
 */
static inline void fixup(unsigned long s, unsigned long e, int d,
			 struct list_head *l)
{
	unsigned long *pp;

	pp = (unsigned long *)&l->next;
	if (*pp >= s && *pp < e)
		*pp += d;

	pp = (unsigned long *)&l->prev;
	if (*pp >= s && *pp < e)
		*pp += d;
}

/* Grow the allocated blocks */
static int grow(rh_info_t * info, int max_blocks)
{
	rh_block_t *block, *blk;
	int i, new_blocks;
	int delta;
	unsigned long blks, blke;

	if (max_blocks <= info->max_blocks)
		return -EINVAL;

	new_blocks = max_blocks - info->max_blocks;

57
	block = kmalloc(sizeof(rh_block_t) * max_blocks, GFP_ATOMIC);
58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
	if (block == NULL)
		return -ENOMEM;

	if (info->max_blocks > 0) {

		/* copy old block area */
		memcpy(block, info->block,
		       sizeof(rh_block_t) * info->max_blocks);

		delta = (char *)block - (char *)info->block;

		/* and fixup list pointers */
		blks = (unsigned long)info->block;
		blke = (unsigned long)(info->block + info->max_blocks);

		for (i = 0, blk = block; i < info->max_blocks; i++, blk++)
			fixup(blks, blke, delta, &blk->list);

		fixup(blks, blke, delta, &info->empty_list);
		fixup(blks, blke, delta, &info->free_list);
		fixup(blks, blke, delta, &info->taken_list);

		/* free the old allocated memory */
		if ((info->flags & RHIF_STATIC_BLOCK) == 0)
			kfree(info->block);
	}

	info->block = block;
	info->empty_slots += new_blocks;
	info->max_blocks = max_blocks;
	info->flags &= ~RHIF_STATIC_BLOCK;

	/* add all new blocks to the free list */
91 92
	blk = block + info->max_blocks - new_blocks;
	for (i = 0; i < new_blocks; i++, blk++)
93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137
		list_add(&blk->list, &info->empty_list);

	return 0;
}

/*
 * Assure at least the required amount of empty slots.  If this function
 * causes a grow in the block area then all pointers kept to the block
 * area are invalid!
 */
static int assure_empty(rh_info_t * info, int slots)
{
	int max_blocks;

	/* This function is not meant to be used to grow uncontrollably */
	if (slots >= 4)
		return -EINVAL;

	/* Enough space */
	if (info->empty_slots >= slots)
		return 0;

	/* Next 16 sized block */
	max_blocks = ((info->max_blocks + slots) + 15) & ~15;

	return grow(info, max_blocks);
}

static rh_block_t *get_slot(rh_info_t * info)
{
	rh_block_t *blk;

	/* If no more free slots, and failure to extend. */
	/* XXX: You should have called assure_empty before */
	if (info->empty_slots == 0) {
		printk(KERN_ERR "rh: out of slots; crash is imminent.\n");
		return NULL;
	}

	/* Get empty slot to use */
	blk = list_entry(info->empty_list.next, rh_block_t, list);
	list_del_init(&blk->list);
	info->empty_slots--;

	/* Initialize */
138
	blk->start = 0;
139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162
	blk->size = 0;
	blk->owner = NULL;

	return blk;
}

static inline void release_slot(rh_info_t * info, rh_block_t * blk)
{
	list_add(&blk->list, &info->empty_list);
	info->empty_slots++;
}

static void attach_free_block(rh_info_t * info, rh_block_t * blkn)
{
	rh_block_t *blk;
	rh_block_t *before;
	rh_block_t *after;
	rh_block_t *next;
	int size;
	unsigned long s, e, bs, be;
	struct list_head *l;

	/* We assume that they are aligned properly */
	size = blkn->size;
163
	s = blkn->start;
164 165 166 167 168 169 170 171 172 173 174
	e = s + size;

	/* Find the blocks immediately before and after the given one
	 * (if any) */
	before = NULL;
	after = NULL;
	next = NULL;

	list_for_each(l, &info->free_list) {
		blk = list_entry(l, rh_block_t, list);

175
		bs = blk->start;
176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192
		be = bs + blk->size;

		if (next == NULL && s >= bs)
			next = blk;

		if (be == s)
			before = blk;

		if (e == bs)
			after = blk;

		/* If both are not null, break now */
		if (before != NULL && after != NULL)
			break;
	}

	/* Now check if they are really adjacent */
193
	if (before && s != (before->start + before->size))
194 195
		before = NULL;

196
	if (after && e != after->start)
197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220
		after = NULL;

	/* No coalescing; list insert and return */
	if (before == NULL && after == NULL) {

		if (next != NULL)
			list_add(&blkn->list, &next->list);
		else
			list_add(&blkn->list, &info->free_list);

		return;
	}

	/* We don't need it anymore */
	release_slot(info, blkn);

	/* Grow the before block */
	if (before != NULL && after == NULL) {
		before->size += size;
		return;
	}

	/* Grow the after block backwards */
	if (before == NULL && after != NULL) {
221
		after->start -= size;
222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260
		after->size += size;
		return;
	}

	/* Grow the before block, and release the after block */
	before->size += size + after->size;
	list_del(&after->list);
	release_slot(info, after);
}

static void attach_taken_block(rh_info_t * info, rh_block_t * blkn)
{
	rh_block_t *blk;
	struct list_head *l;

	/* Find the block immediately before the given one (if any) */
	list_for_each(l, &info->taken_list) {
		blk = list_entry(l, rh_block_t, list);
		if (blk->start > blkn->start) {
			list_add_tail(&blkn->list, &blk->list);
			return;
		}
	}

	list_add_tail(&blkn->list, &info->taken_list);
}

/*
 * Create a remote heap dynamically.  Note that no memory for the blocks
 * are allocated.  It will upon the first allocation
 */
rh_info_t *rh_create(unsigned int alignment)
{
	rh_info_t *info;

	/* Alignment must be a power of two */
	if ((alignment & (alignment - 1)) != 0)
		return ERR_PTR(-EINVAL);

261
	info = kmalloc(sizeof(*info), GFP_ATOMIC);
262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278
	if (info == NULL)
		return ERR_PTR(-ENOMEM);

	info->alignment = alignment;

	/* Initially everything as empty */
	info->block = NULL;
	info->max_blocks = 0;
	info->empty_slots = 0;
	info->flags = 0;

	INIT_LIST_HEAD(&info->empty_list);
	INIT_LIST_HEAD(&info->free_list);
	INIT_LIST_HEAD(&info->taken_list);

	return info;
}
279
EXPORT_SYMBOL_GPL(rh_create);
280 281 282 283 284 285 286 287 288 289 290 291 292

/*
 * Destroy a dynamically created remote heap.  Deallocate only if the areas
 * are not static
 */
void rh_destroy(rh_info_t * info)
{
	if ((info->flags & RHIF_STATIC_BLOCK) == 0 && info->block != NULL)
		kfree(info->block);

	if ((info->flags & RHIF_STATIC_INFO) == 0)
		kfree(info);
}
293
EXPORT_SYMBOL_GPL(rh_destroy);
294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325

/*
 * Initialize in place a remote heap info block.  This is needed to support
 * operation very early in the startup of the kernel, when it is not yet safe
 * to call kmalloc.
 */
void rh_init(rh_info_t * info, unsigned int alignment, int max_blocks,
	     rh_block_t * block)
{
	int i;
	rh_block_t *blk;

	/* Alignment must be a power of two */
	if ((alignment & (alignment - 1)) != 0)
		return;

	info->alignment = alignment;

	/* Initially everything as empty */
	info->block = block;
	info->max_blocks = max_blocks;
	info->empty_slots = max_blocks;
	info->flags = RHIF_STATIC_INFO | RHIF_STATIC_BLOCK;

	INIT_LIST_HEAD(&info->empty_list);
	INIT_LIST_HEAD(&info->free_list);
	INIT_LIST_HEAD(&info->taken_list);

	/* Add all new blocks to the free list */
	for (i = 0, blk = block; i < max_blocks; i++, blk++)
		list_add(&blk->list, &info->empty_list);
}
326
EXPORT_SYMBOL_GPL(rh_init);
327 328

/* Attach a free memory region, coalesces regions if adjuscent */
329
int rh_attach_region(rh_info_t * info, unsigned long start, int size)
330 331 332 333 334 335
{
	rh_block_t *blk;
	unsigned long s, e, m;
	int r;

	/* The region must be aligned */
336
	s = start;
337 338 339 340 341 342 343 344 345
	e = s + size;
	m = info->alignment - 1;

	/* Round start up */
	s = (s + m) & ~m;

	/* Round end down */
	e = e & ~m;

346 347 348
	if (IS_ERR_VALUE(e) || (e < s))
		return -ERANGE;

349
	/* Take final values */
350 351
	start = s;
	size = e - s;
352 353 354 355 356 357 358 359 360 361 362 363 364 365 366

	/* Grow the blocks, if needed */
	r = assure_empty(info, 1);
	if (r < 0)
		return r;

	blk = get_slot(info);
	blk->start = start;
	blk->size = size;
	blk->owner = NULL;

	attach_free_block(info, blk);

	return 0;
}
367
EXPORT_SYMBOL_GPL(rh_attach_region);
368 369

/* Detatch given address range, splits free block if needed. */
370
unsigned long rh_detach_region(rh_info_t * info, unsigned long start, int size)
371 372 373 374 375 376 377
{
	struct list_head *l;
	rh_block_t *blk, *newblk;
	unsigned long s, e, m, bs, be;

	/* Validate size */
	if (size <= 0)
378
		return (unsigned long) -EINVAL;
379 380

	/* The region must be aligned */
381
	s = start;
382 383 384 385 386 387 388 389 390 391
	e = s + size;
	m = info->alignment - 1;

	/* Round start up */
	s = (s + m) & ~m;

	/* Round end down */
	e = e & ~m;

	if (assure_empty(info, 1) < 0)
392
		return (unsigned long) -ENOMEM;
393 394 395 396 397

	blk = NULL;
	list_for_each(l, &info->free_list) {
		blk = list_entry(l, rh_block_t, list);
		/* The range must lie entirely inside one free block */
398 399
		bs = blk->start;
		be = blk->start + blk->size;
400 401 402 403 404 405
		if (s >= bs && e <= be)
			break;
		blk = NULL;
	}

	if (blk == NULL)
406
		return (unsigned long) -ENOMEM;
407 408 409 410 411 412

	/* Perfect fit */
	if (bs == s && be == e) {
		/* Delete from free list, release slot */
		list_del(&blk->list);
		release_slot(info, blk);
413
		return s;
414 415 416 417 418
	}

	/* blk still in free list, with updated start and/or size */
	if (bs == s || be == e) {
		if (bs == s)
419
			blk->start += size;
420 421 422 423 424 425 426 427
		blk->size -= size;

	} else {
		/* The front free fragment */
		blk->size = s - bs;

		/* the back free fragment */
		newblk = get_slot(info);
428
		newblk->start = e;
429 430 431 432 433
		newblk->size = be - e;

		list_add(&newblk->list, &blk->list);
	}

434
	return s;
435
}
436
EXPORT_SYMBOL_GPL(rh_detach_region);
437

438 439 440 441 442
/* Allocate a block of memory at the specified alignment.  The value returned
 * is an offset into the buffer initialized by rh_init(), or a negative number
 * if there is an error.
 */
unsigned long rh_alloc_align(rh_info_t * info, int size, int alignment, const char *owner)
443 444 445 446
{
	struct list_head *l;
	rh_block_t *blk;
	rh_block_t *newblk;
447
	unsigned long start, sp_size;
448

449
	/* Validate size, and alignment must be power of two */
L
Li Yang 已提交
450
	if (size <= 0 || (alignment & (alignment - 1)) != 0)
451
		return (unsigned long) -EINVAL;
452 453 454 455

	/* Align to configured alignment */
	size = (size + (info->alignment - 1)) & ~(info->alignment - 1);

456
	if (assure_empty(info, 2) < 0)
457
		return (unsigned long) -ENOMEM;
458 459 460 461

	blk = NULL;
	list_for_each(l, &info->free_list) {
		blk = list_entry(l, rh_block_t, list);
462 463 464 465 466
		if (size <= blk->size) {
			start = (blk->start + alignment - 1) & ~(alignment - 1);
			if (start + size <= blk->start + blk->size)
				break;
		}
467 468 469 470
		blk = NULL;
	}

	if (blk == NULL)
471
		return (unsigned long) -ENOMEM;
472 473 474 475 476

	/* Just fits */
	if (blk->size == size) {
		/* Move from free list to taken list */
		list_del(&blk->list);
477 478
		newblk = blk;
	} else {
479 480 481 482 483 484 485 486 487 488 489 490
		/* Fragment caused, split if needed */
		/* Create block for fragment in the beginning */
		sp_size = start - blk->start;
		if (sp_size) {
			rh_block_t *spblk;

			spblk = get_slot(info);
			spblk->start = blk->start;
			spblk->size = sp_size;
			/* add before the blk */
			list_add(&spblk->list, blk->list.prev);
		}
491
		newblk = get_slot(info);
492
		newblk->start = start;
493
		newblk->size = size;
494

495 496 497 498 499 500 501 502 503
		/* blk still in free list, with updated start and size
		 * for fragment in the end */
		blk->start = start + size;
		blk->size -= sp_size + size;
		/* No fragment in the end, remove blk */
		if (blk->size == 0) {
			list_del(&blk->list);
			release_slot(info, blk);
		}
504 505 506 507 508 509 510
	}

	newblk->owner = owner;
	attach_taken_block(info, newblk);

	return start;
}
511
EXPORT_SYMBOL_GPL(rh_alloc_align);
512

513 514 515 516 517
/* Allocate a block of memory at the default alignment.  The value returned is
 * an offset into the buffer initialized by rh_init(), or a negative number if
 * there is an error.
 */
unsigned long rh_alloc(rh_info_t * info, int size, const char *owner)
L
Li Yang 已提交
518 519 520
{
	return rh_alloc_align(info, size, info->alignment, owner);
}
521
EXPORT_SYMBOL_GPL(rh_alloc);
L
Li Yang 已提交
522

523 524 525 526 527
/* Allocate a block of memory at the given offset, rounded up to the default
 * alignment.  The value returned is an offset into the buffer initialized by
 * rh_init(), or a negative number if there is an error.
 */
unsigned long rh_alloc_fixed(rh_info_t * info, unsigned long start, int size, const char *owner)
528 529 530
{
	struct list_head *l;
	rh_block_t *blk, *newblk1, *newblk2;
L
Li Yang 已提交
531
	unsigned long s, e, m, bs = 0, be = 0;
532 533 534

	/* Validate size */
	if (size <= 0)
535
		return (unsigned long) -EINVAL;
536 537

	/* The region must be aligned */
538
	s = start;
539 540 541 542 543 544 545 546 547 548
	e = s + size;
	m = info->alignment - 1;

	/* Round start up */
	s = (s + m) & ~m;

	/* Round end down */
	e = e & ~m;

	if (assure_empty(info, 2) < 0)
549
		return (unsigned long) -ENOMEM;
550 551 552 553 554

	blk = NULL;
	list_for_each(l, &info->free_list) {
		blk = list_entry(l, rh_block_t, list);
		/* The range must lie entirely inside one free block */
555 556
		bs = blk->start;
		be = blk->start + blk->size;
557 558
		if (s >= bs && e <= be)
			break;
559
		blk = NULL;
560 561 562
	}

	if (blk == NULL)
563
		return (unsigned long) -ENOMEM;
564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580

	/* Perfect fit */
	if (bs == s && be == e) {
		/* Move from free list to taken list */
		list_del(&blk->list);
		blk->owner = owner;

		start = blk->start;
		attach_taken_block(info, blk);

		return start;

	}

	/* blk still in free list, with updated start and/or size */
	if (bs == s || be == e) {
		if (bs == s)
581
			blk->start += size;
582 583 584 585 586 587 588 589
		blk->size -= size;

	} else {
		/* The front free fragment */
		blk->size = s - bs;

		/* The back free fragment */
		newblk2 = get_slot(info);
590
		newblk2->start = e;
591 592 593 594 595 596
		newblk2->size = be - e;

		list_add(&newblk2->list, &blk->list);
	}

	newblk1 = get_slot(info);
597
	newblk1->start = s;
598 599 600 601 602 603 604 605
	newblk1->size = e - s;
	newblk1->owner = owner;

	start = newblk1->start;
	attach_taken_block(info, newblk1);

	return start;
}
606
EXPORT_SYMBOL_GPL(rh_alloc_fixed);
607

608 609 610 611 612
/* Deallocate the memory previously allocated by one of the rh_alloc functions.
 * The return value is the size of the deallocated block, or a negative number
 * if there is an error.
 */
int rh_free(rh_info_t * info, unsigned long start)
613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638
{
	rh_block_t *blk, *blk2;
	struct list_head *l;
	int size;

	/* Linear search for block */
	blk = NULL;
	list_for_each(l, &info->taken_list) {
		blk2 = list_entry(l, rh_block_t, list);
		if (start < blk2->start)
			break;
		blk = blk2;
	}

	if (blk == NULL || start > (blk->start + blk->size))
		return -EINVAL;

	/* Remove from taken list */
	list_del(&blk->list);

	/* Get size of freed block */
	size = blk->size;
	attach_free_block(info, blk);

	return size;
}
639
EXPORT_SYMBOL_GPL(rh_free);
640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676

int rh_get_stats(rh_info_t * info, int what, int max_stats, rh_stats_t * stats)
{
	rh_block_t *blk;
	struct list_head *l;
	struct list_head *h;
	int nr;

	switch (what) {

	case RHGS_FREE:
		h = &info->free_list;
		break;

	case RHGS_TAKEN:
		h = &info->taken_list;
		break;

	default:
		return -EINVAL;
	}

	/* Linear search for block */
	nr = 0;
	list_for_each(l, h) {
		blk = list_entry(l, rh_block_t, list);
		if (stats != NULL && nr < max_stats) {
			stats->start = blk->start;
			stats->size = blk->size;
			stats->owner = blk->owner;
			stats++;
		}
		nr++;
	}

	return nr;
}
677
EXPORT_SYMBOL_GPL(rh_get_stats);
678

679
int rh_set_owner(rh_info_t * info, unsigned long start, const char *owner)
680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701
{
	rh_block_t *blk, *blk2;
	struct list_head *l;
	int size;

	/* Linear search for block */
	blk = NULL;
	list_for_each(l, &info->taken_list) {
		blk2 = list_entry(l, rh_block_t, list);
		if (start < blk2->start)
			break;
		blk = blk2;
	}

	if (blk == NULL || start > (blk->start + blk->size))
		return -EINVAL;

	blk->owner = owner;
	size = blk->size;

	return size;
}
702
EXPORT_SYMBOL_GPL(rh_set_owner);
703 704 705 706 707 708 709

void rh_dump(rh_info_t * info)
{
	static rh_stats_t st[32];	/* XXX maximum 32 blocks */
	int maxnr;
	int i, nr;

710
	maxnr = ARRAY_SIZE(st);
711 712 713 714 715 716 717 718 719 720 721

	printk(KERN_INFO
	       "info @0x%p (%d slots empty / %d max)\n",
	       info, info->empty_slots, info->max_blocks);

	printk(KERN_INFO "  Free:\n");
	nr = rh_get_stats(info, RHGS_FREE, maxnr, st);
	if (nr > maxnr)
		nr = maxnr;
	for (i = 0; i < nr; i++)
		printk(KERN_INFO
722 723
		       "    0x%lx-0x%lx (%u)\n",
		       st[i].start, st[i].start + st[i].size,
724 725 726 727 728 729 730 731 732
		       st[i].size);
	printk(KERN_INFO "\n");

	printk(KERN_INFO "  Taken:\n");
	nr = rh_get_stats(info, RHGS_TAKEN, maxnr, st);
	if (nr > maxnr)
		nr = maxnr;
	for (i = 0; i < nr; i++)
		printk(KERN_INFO
733 734
		       "    0x%lx-0x%lx (%u) %s\n",
		       st[i].start, st[i].start + st[i].size,
735 736 737
		       st[i].size, st[i].owner != NULL ? st[i].owner : "");
	printk(KERN_INFO "\n");
}
738
EXPORT_SYMBOL_GPL(rh_dump);
739 740 741 742

void rh_dump_blk(rh_info_t * info, rh_block_t * blk)
{
	printk(KERN_INFO
743 744
	       "blk @0x%p: 0x%lx-0x%lx (%u)\n",
	       blk, blk->start, blk->start + blk->size, blk->size);
745
}
746 747
EXPORT_SYMBOL_GPL(rh_dump_blk);