drm_mm.c 17.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40
/**************************************************************************
 *
 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 *
 **************************************************************************/

/*
 * Generic simple memory manager implementation. Intended to be used as a base
 * class implementation for more advanced memory managers.
 *
 * Note that the algorithm used is quite simple and there might be substantial
 * performance gains if a smarter free list is implemented. Currently it is just an
 * unordered stack of free regions. This could easily be improved if an RB-tree
 * is used instead. At least if we expect heavy fragmentation.
 *
 * Aligned allocations can also see improvement.
 *
 * Authors:
41
 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
42 43
 */

44 45
#include <drm/drmP.h>
#include <drm/drm_mm.h>
46
#include <linux/slab.h>
47
#include <linux/seq_file.h>
48
#include <linux/export.h>
49

D
David Herrmann 已提交
50 51 52 53 54 55 56 57 58 59 60 61
static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
						unsigned long size,
						unsigned alignment,
						unsigned long color,
						enum drm_mm_search_flags flags);
static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
						unsigned long size,
						unsigned alignment,
						unsigned long color,
						unsigned long start,
						unsigned long end,
						enum drm_mm_search_flags flags);
62

63 64
static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
				 struct drm_mm_node *node,
65 66
				 unsigned long size, unsigned alignment,
				 unsigned long color)
67
{
68 69 70
	struct drm_mm *mm = hole_node->mm;
	unsigned long hole_start = drm_mm_hole_node_start(hole_node);
	unsigned long hole_end = drm_mm_hole_node_end(hole_node);
71 72
	unsigned long adj_start = hole_start;
	unsigned long adj_end = hole_end;
73

74
	BUG_ON(node->allocated);
75

76 77
	if (mm->color_adjust)
		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
78

79 80 81 82 83 84 85
	if (alignment) {
		unsigned tmp = adj_start % alignment;
		if (tmp)
			adj_start += alignment - tmp;
	}

	if (adj_start == hole_start) {
86
		hole_node->hole_follows = 0;
87 88
		list_del(&hole_node->hole_stack);
	}
89

90
	node->start = adj_start;
91 92
	node->size = size;
	node->mm = mm;
93
	node->color = color;
94
	node->allocated = 1;
95

96 97 98
	INIT_LIST_HEAD(&node->hole_stack);
	list_add(&node->node_list, &hole_node->node_list);

99
	BUG_ON(node->start + node->size > adj_end);
100

101
	node->hole_follows = 0;
102
	if (__drm_mm_hole_node_start(node) < hole_end) {
103 104
		list_add(&node->hole_stack, &mm->hole_stack);
		node->hole_follows = 1;
105
	}
106 107
}

108
int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
109
{
110
	struct drm_mm_node *hole;
111
	unsigned long end = node->start + node->size;
112 113
	unsigned long hole_start;
	unsigned long hole_end;
114

115 116 117
	BUG_ON(node == NULL);

	/* Find the relevant hole to add our node to */
118
	drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
119
		if (hole_start > node->start || hole_end < end)
120 121 122 123 124 125 126 127
			continue;

		node->mm = mm;
		node->allocated = 1;

		INIT_LIST_HEAD(&node->hole_stack);
		list_add(&node->node_list, &hole->node_list);

128
		if (node->start == hole_start) {
129 130 131 132 133 134 135 136 137 138
			hole->hole_follows = 0;
			list_del_init(&hole->hole_stack);
		}

		node->hole_follows = 0;
		if (end != hole_end) {
			list_add(&node->hole_stack, &mm->hole_stack);
			node->hole_follows = 1;
		}

139
		return 0;
140 141
	}

142 143
	WARN(1, "no hole found for node 0x%lx + 0x%lx\n",
	     node->start, node->size);
144
	return -ENOSPC;
145
}
146
EXPORT_SYMBOL(drm_mm_reserve_node);
147

148 149 150 151 152
/**
 * Search for free space and insert a preallocated memory node. Returns
 * -ENOSPC if no suitable free area is available. The preallocated memory node
 * must be cleared.
 */
153 154
int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
			       unsigned long size, unsigned alignment,
155 156
			       unsigned long color,
			       enum drm_mm_search_flags flags)
157 158 159
{
	struct drm_mm_node *hole_node;

160
	hole_node = drm_mm_search_free_generic(mm, size, alignment,
161
					       color, flags);
162 163 164
	if (!hole_node)
		return -ENOSPC;

165
	drm_mm_insert_helper(hole_node, node, size, alignment, color);
166 167
	return 0;
}
168 169
EXPORT_SYMBOL(drm_mm_insert_node_generic);

170 171 172
static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
				       struct drm_mm_node *node,
				       unsigned long size, unsigned alignment,
173
				       unsigned long color,
174
				       unsigned long start, unsigned long end)
175
{
176 177 178
	struct drm_mm *mm = hole_node->mm;
	unsigned long hole_start = drm_mm_hole_node_start(hole_node);
	unsigned long hole_end = drm_mm_hole_node_end(hole_node);
179 180
	unsigned long adj_start = hole_start;
	unsigned long adj_end = hole_end;
181

182 183
	BUG_ON(!hole_node->hole_follows || node->allocated);

184 185
	if (adj_start < start)
		adj_start = start;
186 187 188 189 190
	if (adj_end > end)
		adj_end = end;

	if (mm->color_adjust)
		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
191 192 193 194 195 196

	if (alignment) {
		unsigned tmp = adj_start % alignment;
		if (tmp)
			adj_start += alignment - tmp;
	}
197

198
	if (adj_start == hole_start) {
199
		hole_node->hole_follows = 0;
200
		list_del(&hole_node->hole_stack);
201 202
	}

203
	node->start = adj_start;
204 205
	node->size = size;
	node->mm = mm;
206
	node->color = color;
207
	node->allocated = 1;
208 209 210 211

	INIT_LIST_HEAD(&node->hole_stack);
	list_add(&node->node_list, &hole_node->node_list);

212
	BUG_ON(node->start + node->size > adj_end);
213 214
	BUG_ON(node->start + node->size > end);

215
	node->hole_follows = 0;
216
	if (__drm_mm_hole_node_start(node) < hole_end) {
217 218
		list_add(&node->hole_stack, &mm->hole_stack);
		node->hole_follows = 1;
219
	}
220 221
}

222 223 224 225
/**
 * Search for free space and insert a preallocated memory node. Returns
 * -ENOSPC if no suitable free area is available. This is for range
 * restricted allocations. The preallocated memory node must be cleared.
226
 */
227 228
int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
					unsigned long size, unsigned alignment, unsigned long color,
229 230
					unsigned long start, unsigned long end,
					enum drm_mm_search_flags flags)
231
{
232 233
	struct drm_mm_node *hole_node;

234 235
	hole_node = drm_mm_search_free_in_range_generic(mm,
							size, alignment, color,
236
							start, end, flags);
237 238 239
	if (!hole_node)
		return -ENOSPC;

240 241
	drm_mm_insert_helper_range(hole_node, node,
				   size, alignment, color,
242 243 244
				   start, end);
	return 0;
}
245 246
EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);

247 248 249 250 251
/**
 * Remove a memory node from the allocator.
 */
void drm_mm_remove_node(struct drm_mm_node *node)
{
252 253
	struct drm_mm *mm = node->mm;
	struct drm_mm_node *prev_node;
254

255 256 257
	if (WARN_ON(!node->allocated))
		return;

258 259
	BUG_ON(node->scanned_block || node->scanned_prev_free
				   || node->scanned_next_free);
260

261 262
	prev_node =
	    list_entry(node->node_list.prev, struct drm_mm_node, node_list);
263

264
	if (node->hole_follows) {
265 266
		BUG_ON(__drm_mm_hole_node_start(node) ==
		       __drm_mm_hole_node_end(node));
267 268
		list_del(&node->hole_stack);
	} else
269 270 271
		BUG_ON(__drm_mm_hole_node_start(node) !=
		       __drm_mm_hole_node_end(node));

272

273 274 275 276 277 278 279
	if (!prev_node->hole_follows) {
		prev_node->hole_follows = 1;
		list_add(&prev_node->hole_stack, &mm->hole_stack);
	} else
		list_move(&prev_node->hole_stack, &mm->hole_stack);

	list_del(&node->node_list);
280 281 282 283
	node->allocated = 0;
}
EXPORT_SYMBOL(drm_mm_remove_node);

284 285
static int check_free_hole(unsigned long start, unsigned long end,
			   unsigned long size, unsigned alignment)
286
{
287
	if (end - start < size)
288 289 290
		return 0;

	if (alignment) {
291
		unsigned tmp = start % alignment;
292
		if (tmp)
293
			start += alignment - tmp;
294 295
	}

296
	return end >= start + size;
297 298
}

D
David Herrmann 已提交
299 300 301 302 303
static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
						      unsigned long size,
						      unsigned alignment,
						      unsigned long color,
						      enum drm_mm_search_flags flags)
304
{
D
Dave Airlie 已提交
305 306
	struct drm_mm_node *entry;
	struct drm_mm_node *best;
307 308
	unsigned long adj_start;
	unsigned long adj_end;
309 310
	unsigned long best_size;

311 312
	BUG_ON(mm->scanned_blocks);

313 314 315
	best = NULL;
	best_size = ~0UL;

316
	drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
317 318 319 320 321 322 323
		if (mm->color_adjust) {
			mm->color_adjust(entry, color, &adj_start, &adj_end);
			if (adj_end <= adj_start)
				continue;
		}

		if (!check_free_hole(adj_start, adj_end, size, alignment))
324 325
			continue;

326
		if (!(flags & DRM_MM_SEARCH_BEST))
327
			return entry;
328

329 330 331
		if (entry->size < best_size) {
			best = entry;
			best_size = entry->size;
332 333 334 335 336
		}
	}

	return best;
}
337

D
David Herrmann 已提交
338
static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
339 340 341 342 343
							unsigned long size,
							unsigned alignment,
							unsigned long color,
							unsigned long start,
							unsigned long end,
344
							enum drm_mm_search_flags flags)
345 346 347
{
	struct drm_mm_node *entry;
	struct drm_mm_node *best;
348 349
	unsigned long adj_start;
	unsigned long adj_end;
350 351
	unsigned long best_size;

352 353
	BUG_ON(mm->scanned_blocks);

354 355 356
	best = NULL;
	best_size = ~0UL;

357 358 359 360 361
	drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
		if (adj_start < start)
			adj_start = start;
		if (adj_end > end)
			adj_end = end;
362 363 364 365 366 367 368

		if (mm->color_adjust) {
			mm->color_adjust(entry, color, &adj_start, &adj_end);
			if (adj_end <= adj_start)
				continue;
		}

369
		if (!check_free_hole(adj_start, adj_end, size, alignment))
370 371
			continue;

372
		if (!(flags & DRM_MM_SEARCH_BEST))
373
			return entry;
374

375 376 377
		if (entry->size < best_size) {
			best = entry;
			best_size = entry->size;
378 379 380 381 382 383
		}
	}

	return best;
}

384 385 386 387 388 389
/**
 * Moves an allocation. To be used with embedded struct drm_mm_node.
 */
void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
{
	list_replace(&old->node_list, &new->node_list);
D
Daniel Vetter 已提交
390
	list_replace(&old->hole_stack, &new->hole_stack);
391 392 393 394
	new->hole_follows = old->hole_follows;
	new->mm = old->mm;
	new->start = old->start;
	new->size = old->size;
395
	new->color = old->color;
396 397 398 399 400 401

	old->allocated = 0;
	new->allocated = 1;
}
EXPORT_SYMBOL(drm_mm_replace_node);

402 403 404 405 406 407 408 409 410
/**
 * Initializa lru scanning.
 *
 * This simply sets up the scanning routines with the parameters for the desired
 * hole.
 *
 * Warning: As long as the scan list is non-empty, no other operations than
 * adding/removing nodes to/from the scan list are allowed.
 */
411 412 413 414
void drm_mm_init_scan(struct drm_mm *mm,
		      unsigned long size,
		      unsigned alignment,
		      unsigned long color)
415
{
416
	mm->scan_color = color;
417 418 419 420
	mm->scan_alignment = alignment;
	mm->scan_size = size;
	mm->scanned_blocks = 0;
	mm->scan_hit_start = 0;
421
	mm->scan_hit_end = 0;
422
	mm->scan_check_range = 0;
423
	mm->prev_scanned_node = NULL;
424 425 426
}
EXPORT_SYMBOL(drm_mm_init_scan);

427 428 429 430 431 432 433 434 435
/**
 * Initializa lru scanning.
 *
 * This simply sets up the scanning routines with the parameters for the desired
 * hole. This version is for range-restricted scans.
 *
 * Warning: As long as the scan list is non-empty, no other operations than
 * adding/removing nodes to/from the scan list are allowed.
 */
436 437
void drm_mm_init_scan_with_range(struct drm_mm *mm,
				 unsigned long size,
438
				 unsigned alignment,
439
				 unsigned long color,
440 441 442
				 unsigned long start,
				 unsigned long end)
{
443
	mm->scan_color = color;
444 445 446 447
	mm->scan_alignment = alignment;
	mm->scan_size = size;
	mm->scanned_blocks = 0;
	mm->scan_hit_start = 0;
448
	mm->scan_hit_end = 0;
449 450 451
	mm->scan_start = start;
	mm->scan_end = end;
	mm->scan_check_range = 1;
452
	mm->prev_scanned_node = NULL;
453 454 455
}
EXPORT_SYMBOL(drm_mm_init_scan_with_range);

456 457 458 459 460 461 462 463 464
/**
 * Add a node to the scan list that might be freed to make space for the desired
 * hole.
 *
 * Returns non-zero, if a hole has been found, zero otherwise.
 */
int drm_mm_scan_add_block(struct drm_mm_node *node)
{
	struct drm_mm *mm = node->mm;
465 466
	struct drm_mm_node *prev_node;
	unsigned long hole_start, hole_end;
467
	unsigned long adj_start, adj_end;
468 469 470

	mm->scanned_blocks++;

471
	BUG_ON(node->scanned_block);
472 473
	node->scanned_block = 1;

474 475
	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
			       node_list);
476

477 478 479 480
	node->scanned_preceeds_hole = prev_node->hole_follows;
	prev_node->hole_follows = 1;
	list_del(&node->node_list);
	node->node_list.prev = &prev_node->node_list;
481 482
	node->node_list.next = &mm->prev_scanned_node->node_list;
	mm->prev_scanned_node = node;
483

484 485
	adj_start = hole_start = drm_mm_hole_node_start(prev_node);
	adj_end = hole_end = drm_mm_hole_node_end(prev_node);
486

487
	if (mm->scan_check_range) {
488 489 490 491
		if (adj_start < mm->scan_start)
			adj_start = mm->scan_start;
		if (adj_end > mm->scan_end)
			adj_end = mm->scan_end;
492 493
	}

494 495 496 497
	if (mm->color_adjust)
		mm->color_adjust(prev_node, mm->scan_color,
				 &adj_start, &adj_end);

498
	if (check_free_hole(adj_start, adj_end,
499
			    mm->scan_size, mm->scan_alignment)) {
500
		mm->scan_hit_start = hole_start;
501
		mm->scan_hit_end = hole_end;
502 503 504 505 506 507 508 509 510 511 512 513 514 515 516
		return 1;
	}

	return 0;
}
EXPORT_SYMBOL(drm_mm_scan_add_block);

/**
 * Remove a node from the scan list.
 *
 * Nodes _must_ be removed in the exact same order from the scan list as they
 * have been added, otherwise the internal state of the memory manager will be
 * corrupted.
 *
 * When the scan list is empty, the selected memory nodes can be freed. An
517 518
 * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then
 * return the just freed block (because its at the top of the free_stack list).
519 520 521 522 523 524 525
 *
 * Returns one if this block should be evicted, zero otherwise. Will always
 * return zero when no hole has been found.
 */
int drm_mm_scan_remove_block(struct drm_mm_node *node)
{
	struct drm_mm *mm = node->mm;
526
	struct drm_mm_node *prev_node;
527 528 529 530 531 532

	mm->scanned_blocks--;

	BUG_ON(!node->scanned_block);
	node->scanned_block = 0;

533 534
	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
			       node_list);
535

536 537
	prev_node->hole_follows = node->scanned_preceeds_hole;
	list_add(&node->node_list, &prev_node->node_list);
538

539 540
	 return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
		 node->start < mm->scan_hit_end);
541 542 543
}
EXPORT_SYMBOL(drm_mm_scan_remove_block);

D
Dave Airlie 已提交
544
int drm_mm_clean(struct drm_mm * mm)
545
{
546
	struct list_head *head = &mm->head_node.node_list;
547

548 549
	return (head->next->next == head);
}
550
EXPORT_SYMBOL(drm_mm_clean);
551

552
void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
553
{
554
	INIT_LIST_HEAD(&mm->hole_stack);
555
	mm->scanned_blocks = 0;
556

557 558 559 560 561 562 563 564 565 566 567 568
	/* Clever trick to avoid a special case in the free hole tracking. */
	INIT_LIST_HEAD(&mm->head_node.node_list);
	INIT_LIST_HEAD(&mm->head_node.hole_stack);
	mm->head_node.hole_follows = 1;
	mm->head_node.scanned_block = 0;
	mm->head_node.scanned_prev_free = 0;
	mm->head_node.scanned_next_free = 0;
	mm->head_node.mm = mm;
	mm->head_node.start = start + size;
	mm->head_node.size = start - mm->head_node.start;
	list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);

569
	mm->color_adjust = NULL;
570
}
571
EXPORT_SYMBOL(drm_mm_init);
572

D
Dave Airlie 已提交
573
void drm_mm_takedown(struct drm_mm * mm)
574
{
D
David Herrmann 已提交
575 576
	WARN(!list_empty(&mm->head_node.node_list),
	     "Memory manager not clean during takedown.\n");
577
}
D
Dave Airlie 已提交
578
EXPORT_SYMBOL(drm_mm_takedown);
579

D
Daniel Vetter 已提交
580 581
static unsigned long drm_mm_debug_hole(struct drm_mm_node *entry,
				       const char *prefix)
582
{
583 584
	unsigned long hole_start, hole_end, hole_size;

D
Daniel Vetter 已提交
585 586 587 588
	if (entry->hole_follows) {
		hole_start = drm_mm_hole_node_start(entry);
		hole_end = drm_mm_hole_node_end(entry);
		hole_size = hole_end - hole_start;
589 590 591
		printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
			prefix, hole_start, hole_end,
			hole_size);
D
Daniel Vetter 已提交
592 593 594 595 596 597 598 599 600 601 602 603
		return hole_size;
	}

	return 0;
}

void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
{
	struct drm_mm_node *entry;
	unsigned long total_used = 0, total_free = 0, total = 0;

	total_free += drm_mm_debug_hole(&mm->head_node, prefix);
604 605 606

	drm_mm_for_each_node(entry, mm) {
		printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n",
607
			prefix, entry->start, entry->start + entry->size,
608 609
			entry->size);
		total_used += entry->size;
D
Daniel Vetter 已提交
610
		total_free += drm_mm_debug_hole(entry, prefix);
611
	}
612 613 614
	total = total_free + total_used;

	printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total,
615 616 617 618
		total_used, total_free);
}
EXPORT_SYMBOL(drm_mm_debug_table);

619
#if defined(CONFIG_DEBUG_FS)
D
Daniel Vetter 已提交
620
static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
621
{
622 623
	unsigned long hole_start, hole_end, hole_size;

D
Daniel Vetter 已提交
624 625 626 627
	if (entry->hole_follows) {
		hole_start = drm_mm_hole_node_start(entry);
		hole_end = drm_mm_hole_node_end(entry);
		hole_size = hole_end - hole_start;
628 629
		seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
				hole_start, hole_end, hole_size);
D
Daniel Vetter 已提交
630 631 632 633 634 635 636 637 638 639 640 641
		return hole_size;
	}

	return 0;
}

int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
{
	struct drm_mm_node *entry;
	unsigned long total_used = 0, total_free = 0, total = 0;

	total_free += drm_mm_dump_hole(m, &mm->head_node);
642 643 644 645 646 647

	drm_mm_for_each_node(entry, mm) {
		seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
				entry->start, entry->start + entry->size,
				entry->size);
		total_used += entry->size;
D
Daniel Vetter 已提交
648
		total_free += drm_mm_dump_hole(m, entry);
649
	}
650 651 652
	total = total_free + total_used;

	seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free);
653 654 655 656
	return 0;
}
EXPORT_SYMBOL(drm_mm_dump_table);
#endif