drm_mm.c 16.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40
/**************************************************************************
 *
 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 *
 **************************************************************************/

/*
 * Generic simple memory manager implementation. Intended to be used as a base
 * class implementation for more advanced memory managers.
 *
 * Note that the algorithm used is quite simple and there might be substantial
 * performance gains if a smarter free list is implemented. Currently it is just an
 * unordered stack of free regions. This could easily be improved if an RB-tree
 * is used instead. At least if we expect heavy fragmentation.
 *
 * Aligned allocations can also see improvement.
 *
 * Authors:
41
 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
42 43 44
 */

#include "drmP.h"
45
#include "drm_mm.h"
46
#include <linux/slab.h>
47
#include <linux/seq_file.h>
48

49 50 51 52 53 54 55
#define MM_UNUSED_TARGET 4

static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
{
	struct drm_mm_node *child;

	if (atomic)
56
		child = kzalloc(sizeof(*child), GFP_ATOMIC);
57
	else
58
		child = kzalloc(sizeof(*child), GFP_KERNEL);
59 60 61 62 63 64 65 66

	if (unlikely(child == NULL)) {
		spin_lock(&mm->unused_lock);
		if (list_empty(&mm->unused_nodes))
			child = NULL;
		else {
			child =
			    list_entry(mm->unused_nodes.next,
D
Daniel Vetter 已提交
67 68
				       struct drm_mm_node, free_stack);
			list_del(&child->free_stack);
69 70 71 72 73 74 75
			--mm->num_unused;
		}
		spin_unlock(&mm->unused_lock);
	}
	return child;
}

76 77 78 79 80
/* drm_mm_pre_get() - pre allocate drm_mm_node structure
 * drm_mm:	memory manager struct we are pre-allocating for
 *
 * Returns 0 on success or -ENOMEM if allocation fails.
 */
81 82 83 84 85 86 87
int drm_mm_pre_get(struct drm_mm *mm)
{
	struct drm_mm_node *node;

	spin_lock(&mm->unused_lock);
	while (mm->num_unused < MM_UNUSED_TARGET) {
		spin_unlock(&mm->unused_lock);
88
		node = kzalloc(sizeof(*node), GFP_KERNEL);
89 90 91 92 93 94 95 96
		spin_lock(&mm->unused_lock);

		if (unlikely(node == NULL)) {
			int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
			spin_unlock(&mm->unused_lock);
			return ret;
		}
		++mm->num_unused;
D
Daniel Vetter 已提交
97
		list_add_tail(&node->free_stack, &mm->unused_nodes);
98 99 100 101 102
	}
	spin_unlock(&mm->unused_lock);
	return 0;
}
EXPORT_SYMBOL(drm_mm_pre_get);
103

D
Dave Airlie 已提交
104
static int drm_mm_create_tail_node(struct drm_mm *mm,
105 106
				   unsigned long start,
				   unsigned long size, int atomic)
107
{
D
Dave Airlie 已提交
108
	struct drm_mm_node *child;
109

110 111
	child = drm_mm_kmalloc(mm, atomic);
	if (unlikely(child == NULL))
112 113 114 115 116 117 118
		return -ENOMEM;

	child->free = 1;
	child->size = size;
	child->start = start;
	child->mm = mm;

D
Daniel Vetter 已提交
119 120
	list_add_tail(&child->node_list, &mm->node_list);
	list_add_tail(&child->free_stack, &mm->free_stack);
121 122 123 124

	return 0;
}

D
Dave Airlie 已提交
125
static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
126 127
						 unsigned long size,
						 int atomic)
128
{
D
Dave Airlie 已提交
129
	struct drm_mm_node *child;
130

131 132
	child = drm_mm_kmalloc(parent->mm, atomic);
	if (unlikely(child == NULL))
133 134
		return NULL;

D
Daniel Vetter 已提交
135
	INIT_LIST_HEAD(&child->free_stack);
136 137 138 139 140

	child->size = size;
	child->start = parent->start;
	child->mm = parent->mm;

D
Daniel Vetter 已提交
141 142
	list_add_tail(&child->node_list, &parent->node_list);
	INIT_LIST_HEAD(&child->free_stack);
143 144 145 146 147 148 149

	parent->size -= size;
	parent->start += size;
	return child;
}


150 151 152 153
struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
					     unsigned long size,
					     unsigned alignment,
					     int atomic)
154 155
{

D
Dave Airlie 已提交
156
	struct drm_mm_node *align_splitoff = NULL;
157
	unsigned tmp = 0;
158 159

	if (alignment)
160
		tmp = node->start % alignment;
161 162

	if (tmp) {
163
		align_splitoff =
164
		    drm_mm_split_at_start(node, alignment - tmp, atomic);
165
		if (unlikely(align_splitoff == NULL))
166 167
			return NULL;
	}
168

169
	if (node->size == size) {
D
Daniel Vetter 已提交
170
		list_del_init(&node->free_stack);
171
		node->free = 0;
172
	} else {
173
		node = drm_mm_split_at_start(node, size, atomic);
174
	}
175

176 177
	if (align_splitoff)
		drm_mm_put_block(align_splitoff);
178

179
	return node;
180
}
181
EXPORT_SYMBOL(drm_mm_get_block_generic);
182

183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207
struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *node,
						unsigned long size,
						unsigned alignment,
						unsigned long start,
						unsigned long end,
						int atomic)
{
	struct drm_mm_node *align_splitoff = NULL;
	unsigned tmp = 0;
	unsigned wasted = 0;

	if (node->start < start)
		wasted += start - node->start;
	if (alignment)
		tmp = ((node->start + wasted) % alignment);

	if (tmp)
		wasted += alignment - tmp;
	if (wasted) {
		align_splitoff = drm_mm_split_at_start(node, wasted, atomic);
		if (unlikely(align_splitoff == NULL))
			return NULL;
	}

	if (node->size == size) {
D
Daniel Vetter 已提交
208
		list_del_init(&node->free_stack);
209 210 211 212 213 214 215 216 217 218 219 220
		node->free = 0;
	} else {
		node = drm_mm_split_at_start(node, size, atomic);
	}

	if (align_splitoff)
		drm_mm_put_block(align_splitoff);

	return node;
}
EXPORT_SYMBOL(drm_mm_get_block_range_generic);

221 222 223 224 225
/*
 * Put a block. Merge with the previous and / or next block if they are free.
 * Otherwise add to the free stack.
 */

226
void drm_mm_put_block(struct drm_mm_node *cur)
227 228
{

D
Dave Airlie 已提交
229
	struct drm_mm *mm = cur->mm;
D
Daniel Vetter 已提交
230 231
	struct list_head *cur_head = &cur->node_list;
	struct list_head *root_head = &mm->node_list;
D
Dave Airlie 已提交
232 233
	struct drm_mm_node *prev_node = NULL;
	struct drm_mm_node *next_node;
234

235
	int merged = 0;
236

237 238 239
	BUG_ON(cur->scanned_block || cur->scanned_prev_free
				  || cur->scanned_next_free);

240
	if (cur_head->prev != root_head) {
241
		prev_node =
D
Daniel Vetter 已提交
242
		    list_entry(cur_head->prev, struct drm_mm_node, node_list);
243 244
		if (prev_node->free) {
			prev_node->size += cur->size;
245
			merged = 1;
246 247 248
		}
	}
	if (cur_head->next != root_head) {
249
		next_node =
D
Daniel Vetter 已提交
250
		    list_entry(cur_head->next, struct drm_mm_node, node_list);
251 252 253
		if (next_node->free) {
			if (merged) {
				prev_node->size += next_node->size;
D
Daniel Vetter 已提交
254 255
				list_del(&next_node->node_list);
				list_del(&next_node->free_stack);
256
				spin_lock(&mm->unused_lock);
257
				if (mm->num_unused < MM_UNUSED_TARGET) {
D
Daniel Vetter 已提交
258
					list_add(&next_node->free_stack,
259 260 261 262
						 &mm->unused_nodes);
					++mm->num_unused;
				} else
					kfree(next_node);
263
				spin_unlock(&mm->unused_lock);
264 265 266
			} else {
				next_node->size += cur->size;
				next_node->start = cur->start;
267
				merged = 1;
268 269 270 271
			}
		}
	}
	if (!merged) {
272
		cur->free = 1;
D
Daniel Vetter 已提交
273
		list_add(&cur->free_stack, &mm->free_stack);
274
	} else {
D
Daniel Vetter 已提交
275
		list_del(&cur->node_list);
276
		spin_lock(&mm->unused_lock);
277
		if (mm->num_unused < MM_UNUSED_TARGET) {
D
Daniel Vetter 已提交
278
			list_add(&cur->free_stack, &mm->unused_nodes);
279 280 281
			++mm->num_unused;
		} else
			kfree(cur);
282
		spin_unlock(&mm->unused_lock);
283 284
	}
}
285

286
EXPORT_SYMBOL(drm_mm_put_block);
287

288 289
static int check_free_hole(unsigned long start, unsigned long end,
			   unsigned long size, unsigned alignment)
290 291 292
{
	unsigned wasted = 0;

293
	if (end - start < size)
294 295 296
		return 0;

	if (alignment) {
297
		unsigned tmp = start % alignment;
298 299 300 301
		if (tmp)
			wasted = alignment - tmp;
	}

302
	if (end >= start + size + wasted) {
303 304 305 306 307 308
		return 1;
	}

	return 0;
}

309 310 311
struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
				       unsigned long size,
				       unsigned alignment, int best_match)
312
{
D
Dave Airlie 已提交
313 314
	struct drm_mm_node *entry;
	struct drm_mm_node *best;
315 316
	unsigned long best_size;

317 318
	BUG_ON(mm->scanned_blocks);

319 320 321
	best = NULL;
	best_size = ~0UL;

D
Daniel Vetter 已提交
322
	list_for_each_entry(entry, &mm->free_stack, free_stack) {
323 324
		if (!check_free_hole(entry->start, entry->start + entry->size,
				     size, alignment))
325 326
			continue;

327 328
		if (!best_match)
			return entry;
329

330 331 332
		if (entry->size < best_size) {
			best = entry;
			best_size = entry->size;
333 334 335 336 337
		}
	}

	return best;
}
338
EXPORT_SYMBOL(drm_mm_search_free);
339

340 341 342 343 344 345 346 347 348 349 350
struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
						unsigned long size,
						unsigned alignment,
						unsigned long start,
						unsigned long end,
						int best_match)
{
	struct drm_mm_node *entry;
	struct drm_mm_node *best;
	unsigned long best_size;

351 352
	BUG_ON(mm->scanned_blocks);

353 354 355
	best = NULL;
	best_size = ~0UL;

D
Daniel Vetter 已提交
356
	list_for_each_entry(entry, &mm->free_stack, free_stack) {
357 358 359 360
		unsigned long adj_start = entry->start < start ?
			start : entry->start;
		unsigned long adj_end = entry->start + entry->size > end ?
			end : entry->start + entry->size;
361

362
		if (!check_free_hole(adj_start, adj_end, size, alignment))
363 364
			continue;

365 366
		if (!best_match)
			return entry;
367

368 369 370
		if (entry->size < best_size) {
			best = entry;
			best_size = entry->size;
371 372 373 374 375 376 377
		}
	}

	return best;
}
EXPORT_SYMBOL(drm_mm_search_free_in_range);

378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394
/**
 * Initializa lru scanning.
 *
 * This simply sets up the scanning routines with the parameters for the desired
 * hole.
 *
 * Warning: As long as the scan list is non-empty, no other operations than
 * adding/removing nodes to/from the scan list are allowed.
 */
void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
		      unsigned alignment)
{
	mm->scan_alignment = alignment;
	mm->scan_size = size;
	mm->scanned_blocks = 0;
	mm->scan_hit_start = 0;
	mm->scan_hit_size = 0;
395
	mm->scan_check_range = 0;
396 397 398
}
EXPORT_SYMBOL(drm_mm_init_scan);

399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423
/**
 * Initializa lru scanning.
 *
 * This simply sets up the scanning routines with the parameters for the desired
 * hole. This version is for range-restricted scans.
 *
 * Warning: As long as the scan list is non-empty, no other operations than
 * adding/removing nodes to/from the scan list are allowed.
 */
void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size,
				 unsigned alignment,
				 unsigned long start,
				 unsigned long end)
{
	mm->scan_alignment = alignment;
	mm->scan_size = size;
	mm->scanned_blocks = 0;
	mm->scan_hit_start = 0;
	mm->scan_hit_size = 0;
	mm->scan_start = start;
	mm->scan_end = end;
	mm->scan_check_range = 1;
}
EXPORT_SYMBOL(drm_mm_init_scan_with_range);

424 425 426 427 428 429 430 431 432 433 434
/**
 * Add a node to the scan list that might be freed to make space for the desired
 * hole.
 *
 * Returns non-zero, if a hole has been found, zero otherwise.
 */
int drm_mm_scan_add_block(struct drm_mm_node *node)
{
	struct drm_mm *mm = node->mm;
	struct list_head *prev_free, *next_free;
	struct drm_mm_node *prev_node, *next_node;
435 436
	unsigned long adj_start;
	unsigned long adj_end;
437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482

	mm->scanned_blocks++;

	prev_free = next_free = NULL;

	BUG_ON(node->free);
	node->scanned_block = 1;
	node->free = 1;

	if (node->node_list.prev != &mm->node_list) {
		prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
				       node_list);

		if (prev_node->free) {
			list_del(&prev_node->node_list);

			node->start = prev_node->start;
			node->size += prev_node->size;

			prev_node->scanned_prev_free = 1;

			prev_free = &prev_node->free_stack;
		}
	}

	if (node->node_list.next != &mm->node_list) {
		next_node = list_entry(node->node_list.next, struct drm_mm_node,
				       node_list);

		if (next_node->free) {
			list_del(&next_node->node_list);

			node->size += next_node->size;

			next_node->scanned_next_free = 1;

			next_free = &next_node->free_stack;
		}
	}

	/* The free_stack list is not used for allocated objects, so these two
	 * pointers can be abused (as long as no allocations in this memory
	 * manager happens). */
	node->free_stack.prev = prev_free;
	node->free_stack.next = next_free;

483 484 485 486 487 488 489 490 491 492 493
	if (mm->scan_check_range) {
		adj_start = node->start < mm->scan_start ?
			mm->scan_start : node->start;
		adj_end = node->start + node->size > mm->scan_end ?
			mm->scan_end : node->start + node->size;
	} else {
		adj_start = node->start;
		adj_end = node->start + node->size;
	}

	if (check_free_hole(adj_start , adj_end,
494
			    mm->scan_size, mm->scan_alignment)) {
495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568
		mm->scan_hit_start = node->start;
		mm->scan_hit_size = node->size;

		return 1;
	}

	return 0;
}
EXPORT_SYMBOL(drm_mm_scan_add_block);

/**
 * Remove a node from the scan list.
 *
 * Nodes _must_ be removed in the exact same order from the scan list as they
 * have been added, otherwise the internal state of the memory manager will be
 * corrupted.
 *
 * When the scan list is empty, the selected memory nodes can be freed. An
 * immediatly following drm_mm_search_free with best_match = 0 will then return
 * the just freed block (because its at the top of the free_stack list).
 *
 * Returns one if this block should be evicted, zero otherwise. Will always
 * return zero when no hole has been found.
 */
int drm_mm_scan_remove_block(struct drm_mm_node *node)
{
	struct drm_mm *mm = node->mm;
	struct drm_mm_node *prev_node, *next_node;

	mm->scanned_blocks--;

	BUG_ON(!node->scanned_block);
	node->scanned_block = 0;
	node->free = 0;

	prev_node = list_entry(node->free_stack.prev, struct drm_mm_node,
			       free_stack);
	next_node = list_entry(node->free_stack.next, struct drm_mm_node,
			       free_stack);

	if (prev_node) {
		BUG_ON(!prev_node->scanned_prev_free);
		prev_node->scanned_prev_free = 0;

		list_add_tail(&prev_node->node_list, &node->node_list);

		node->start = prev_node->start + prev_node->size;
		node->size -= prev_node->size;
	}

	if (next_node) {
		BUG_ON(!next_node->scanned_next_free);
		next_node->scanned_next_free = 0;

		list_add(&next_node->node_list, &node->node_list);

		node->size -= next_node->size;
	}

	INIT_LIST_HEAD(&node->free_stack);

	/* Only need to check for containement because start&size for the
	 * complete resulting free block (not just the desired part) is
	 * stored. */
	if (node->start >= mm->scan_hit_start &&
	    node->start + node->size
	    		<= mm->scan_hit_start + mm->scan_hit_size) {
		return 1;
	}

	return 0;
}
EXPORT_SYMBOL(drm_mm_scan_remove_block);

D
Dave Airlie 已提交
569
int drm_mm_clean(struct drm_mm * mm)
570
{
D
Daniel Vetter 已提交
571
	struct list_head *head = &mm->node_list;
572

573 574
	return (head->next->next == head);
}
575
EXPORT_SYMBOL(drm_mm_clean);
576

D
Dave Airlie 已提交
577
int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
578
{
D
Daniel Vetter 已提交
579 580
	INIT_LIST_HEAD(&mm->node_list);
	INIT_LIST_HEAD(&mm->free_stack);
581 582
	INIT_LIST_HEAD(&mm->unused_nodes);
	mm->num_unused = 0;
583
	mm->scanned_blocks = 0;
584
	spin_lock_init(&mm->unused_lock);
585

586
	return drm_mm_create_tail_node(mm, start, size, 0);
587
}
588
EXPORT_SYMBOL(drm_mm_init);
589

D
Dave Airlie 已提交
590
void drm_mm_takedown(struct drm_mm * mm)
591
{
D
Daniel Vetter 已提交
592
	struct list_head *bnode = mm->free_stack.next;
D
Dave Airlie 已提交
593
	struct drm_mm_node *entry;
594
	struct drm_mm_node *next;
595

D
Daniel Vetter 已提交
596
	entry = list_entry(bnode, struct drm_mm_node, free_stack);
597

D
Daniel Vetter 已提交
598 599
	if (entry->node_list.next != &mm->node_list ||
	    entry->free_stack.next != &mm->free_stack) {
600 601 602 603
		DRM_ERROR("Memory manager not clean. Delaying takedown\n");
		return;
	}

D
Daniel Vetter 已提交
604 605
	list_del(&entry->free_stack);
	list_del(&entry->node_list);
606 607 608
	kfree(entry);

	spin_lock(&mm->unused_lock);
D
Daniel Vetter 已提交
609 610
	list_for_each_entry_safe(entry, next, &mm->unused_nodes, free_stack) {
		list_del(&entry->free_stack);
611 612 613 614
		kfree(entry);
		--mm->num_unused;
	}
	spin_unlock(&mm->unused_lock);
615

616
	BUG_ON(mm->num_unused != 0);
617
}
D
Dave Airlie 已提交
618
EXPORT_SYMBOL(drm_mm_takedown);
619

620 621 622 623 624
void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
{
	struct drm_mm_node *entry;
	int total_used = 0, total_free = 0, total = 0;

D
Daniel Vetter 已提交
625
	list_for_each_entry(entry, &mm->node_list, node_list) {
626 627 628 629 630 631 632 633 634 635 636 637 638 639
		printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8ld: %s\n",
			prefix, entry->start, entry->start + entry->size,
			entry->size, entry->free ? "free" : "used");
		total += entry->size;
		if (entry->free)
			total_free += entry->size;
		else
			total_used += entry->size;
	}
	printk(KERN_DEBUG "%s total: %d, used %d free %d\n", prefix, total,
		total_used, total_free);
}
EXPORT_SYMBOL(drm_mm_debug_table);

640 641 642 643 644 645
#if defined(CONFIG_DEBUG_FS)
int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
{
	struct drm_mm_node *entry;
	int total_used = 0, total_free = 0, total = 0;

D
Daniel Vetter 已提交
646
	list_for_each_entry(entry, &mm->node_list, node_list) {
647 648 649 650 651 652 653
		seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: %s\n", entry->start, entry->start + entry->size, entry->size, entry->free ? "free" : "used");
		total += entry->size;
		if (entry->free)
			total_free += entry->size;
		else
			total_used += entry->size;
	}
654
	seq_printf(m, "total: %d, used %d free %d\n", total, total_used, total_free);
655 656 657 658
	return 0;
}
EXPORT_SYMBOL(drm_mm_dump_table);
#endif