drm_mm.c 20.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40
/**************************************************************************
 *
 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 *
 **************************************************************************/

/*
 * Generic simple memory manager implementation. Intended to be used as a base
 * class implementation for more advanced memory managers.
 *
 * Note that the algorithm used is quite simple and there might be substantial
 * performance gains if a smarter free list is implemented. Currently it is just an
 * unordered stack of free regions. This could easily be improved if an RB-tree
 * is used instead. At least if we expect heavy fragmentation.
 *
 * Aligned allocations can also see improvement.
 *
 * Authors:
41
 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
42 43
 */

44 45
#include <drm/drmP.h>
#include <drm/drm_mm.h>
46
#include <linux/slab.h>
47
#include <linux/seq_file.h>
48
#include <linux/export.h>
49

50 51 52 53 54 55 56
#define MM_UNUSED_TARGET 4

static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
{
	struct drm_mm_node *child;

	if (atomic)
57
		child = kzalloc(sizeof(*child), GFP_ATOMIC);
58
	else
59
		child = kzalloc(sizeof(*child), GFP_KERNEL);
60 61 62 63 64 65 66 67

	if (unlikely(child == NULL)) {
		spin_lock(&mm->unused_lock);
		if (list_empty(&mm->unused_nodes))
			child = NULL;
		else {
			child =
			    list_entry(mm->unused_nodes.next,
68 69
				       struct drm_mm_node, node_list);
			list_del(&child->node_list);
70 71 72 73 74 75 76
			--mm->num_unused;
		}
		spin_unlock(&mm->unused_lock);
	}
	return child;
}

77 78 79 80 81
/* drm_mm_pre_get() - pre allocate drm_mm_node structure
 * drm_mm:	memory manager struct we are pre-allocating for
 *
 * Returns 0 on success or -ENOMEM if allocation fails.
 */
82 83 84 85 86 87 88
int drm_mm_pre_get(struct drm_mm *mm)
{
	struct drm_mm_node *node;

	spin_lock(&mm->unused_lock);
	while (mm->num_unused < MM_UNUSED_TARGET) {
		spin_unlock(&mm->unused_lock);
89
		node = kzalloc(sizeof(*node), GFP_KERNEL);
90 91 92 93 94 95 96 97
		spin_lock(&mm->unused_lock);

		if (unlikely(node == NULL)) {
			int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
			spin_unlock(&mm->unused_lock);
			return ret;
		}
		++mm->num_unused;
98
		list_add_tail(&node->node_list, &mm->unused_nodes);
99 100 101 102 103
	}
	spin_unlock(&mm->unused_lock);
	return 0;
}
EXPORT_SYMBOL(drm_mm_pre_get);
104

105 106
static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
				 struct drm_mm_node *node,
107 108
				 unsigned long size, unsigned alignment,
				 unsigned long color)
109
{
110 111 112
	struct drm_mm *mm = hole_node->mm;
	unsigned long hole_start = drm_mm_hole_node_start(hole_node);
	unsigned long hole_end = drm_mm_hole_node_end(hole_node);
113 114
	unsigned long adj_start = hole_start;
	unsigned long adj_end = hole_end;
115

116
	BUG_ON(node->allocated);
117

118 119
	if (mm->color_adjust)
		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
120

121 122 123 124 125 126 127
	if (alignment) {
		unsigned tmp = adj_start % alignment;
		if (tmp)
			adj_start += alignment - tmp;
	}

	if (adj_start == hole_start) {
128
		hole_node->hole_follows = 0;
129 130
		list_del(&hole_node->hole_stack);
	}
131

132
	node->start = adj_start;
133 134
	node->size = size;
	node->mm = mm;
135
	node->color = color;
136
	node->allocated = 1;
137

138 139 140
	INIT_LIST_HEAD(&node->hole_stack);
	list_add(&node->node_list, &hole_node->node_list);

141
	BUG_ON(node->start + node->size > adj_end);
142

143
	node->hole_follows = 0;
144
	if (__drm_mm_hole_node_start(node) < hole_end) {
145 146
		list_add(&node->hole_stack, &mm->hole_stack);
		node->hole_follows = 1;
147
	}
148 149
}

150
int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
151
{
152
	struct drm_mm_node *hole;
153
	unsigned long end = node->start + node->size;
154 155
	unsigned long hole_start;
	unsigned long hole_end;
156

157 158 159
	BUG_ON(node == NULL);

	/* Find the relevant hole to add our node to */
160
	drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
161
		if (hole_start > node->start || hole_end < end)
162 163 164 165 166 167 168 169
			continue;

		node->mm = mm;
		node->allocated = 1;

		INIT_LIST_HEAD(&node->hole_stack);
		list_add(&node->node_list, &hole->node_list);

170
		if (node->start == hole_start) {
171 172 173 174 175 176 177 178 179 180
			hole->hole_follows = 0;
			list_del_init(&hole->hole_stack);
		}

		node->hole_follows = 0;
		if (end != hole_end) {
			list_add(&node->hole_stack, &mm->hole_stack);
			node->hole_follows = 1;
		}

181
		return 0;
182 183
	}

184 185
	WARN(1, "no hole found for node 0x%lx + 0x%lx\n",
	     node->start, node->size);
186
	return -ENOSPC;
187
}
188
EXPORT_SYMBOL(drm_mm_reserve_node);
189

190 191 192
struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
					     unsigned long size,
					     unsigned alignment,
193
					     unsigned long color,
194 195 196 197 198 199 200 201
					     int atomic)
{
	struct drm_mm_node *node;

	node = drm_mm_kmalloc(hole_node->mm, atomic);
	if (unlikely(node == NULL))
		return NULL;

202
	drm_mm_insert_helper(hole_node, node, size, alignment, color);
203

204
	return node;
205
}
206
EXPORT_SYMBOL(drm_mm_get_block_generic);
207

208 209 210 211 212
/**
 * Search for free space and insert a preallocated memory node. Returns
 * -ENOSPC if no suitable free area is available. The preallocated memory node
 * must be cleared.
 */
213 214 215
int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
			       unsigned long size, unsigned alignment,
			       unsigned long color)
216 217 218
{
	struct drm_mm_node *hole_node;

219 220
	hole_node = drm_mm_search_free_generic(mm, size, alignment,
					       color, 0);
221 222 223
	if (!hole_node)
		return -ENOSPC;

224
	drm_mm_insert_helper(hole_node, node, size, alignment, color);
225 226
	return 0;
}
227 228 229 230 231 232 233
EXPORT_SYMBOL(drm_mm_insert_node_generic);

int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
		       unsigned long size, unsigned alignment)
{
	return drm_mm_insert_node_generic(mm, node, size, alignment, 0);
}
234 235
EXPORT_SYMBOL(drm_mm_insert_node);

236 237 238
static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
				       struct drm_mm_node *node,
				       unsigned long size, unsigned alignment,
239
				       unsigned long color,
240
				       unsigned long start, unsigned long end)
241
{
242 243 244
	struct drm_mm *mm = hole_node->mm;
	unsigned long hole_start = drm_mm_hole_node_start(hole_node);
	unsigned long hole_end = drm_mm_hole_node_end(hole_node);
245 246
	unsigned long adj_start = hole_start;
	unsigned long adj_end = hole_end;
247

248 249
	BUG_ON(!hole_node->hole_follows || node->allocated);

250 251
	if (adj_start < start)
		adj_start = start;
252 253 254 255 256
	if (adj_end > end)
		adj_end = end;

	if (mm->color_adjust)
		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
257 258 259 260 261 262

	if (alignment) {
		unsigned tmp = adj_start % alignment;
		if (tmp)
			adj_start += alignment - tmp;
	}
263

264
	if (adj_start == hole_start) {
265
		hole_node->hole_follows = 0;
266
		list_del(&hole_node->hole_stack);
267 268
	}

269
	node->start = adj_start;
270 271
	node->size = size;
	node->mm = mm;
272
	node->color = color;
273
	node->allocated = 1;
274 275 276 277

	INIT_LIST_HEAD(&node->hole_stack);
	list_add(&node->node_list, &hole_node->node_list);

278
	BUG_ON(node->start + node->size > adj_end);
279 280
	BUG_ON(node->start + node->size > end);

281
	node->hole_follows = 0;
282
	if (__drm_mm_hole_node_start(node) < hole_end) {
283 284
		list_add(&node->hole_stack, &mm->hole_stack);
		node->hole_follows = 1;
285
	}
286 287 288 289 290
}

struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
						unsigned long size,
						unsigned alignment,
291
						unsigned long color,
292 293 294 295 296 297 298 299 300 301
						unsigned long start,
						unsigned long end,
						int atomic)
{
	struct drm_mm_node *node;

	node = drm_mm_kmalloc(hole_node->mm, atomic);
	if (unlikely(node == NULL))
		return NULL;

302
	drm_mm_insert_helper_range(hole_node, node, size, alignment, color,
303
				   start, end);
304 305 306 307 308

	return node;
}
EXPORT_SYMBOL(drm_mm_get_block_range_generic);

309 310 311 312
/**
 * Search for free space and insert a preallocated memory node. Returns
 * -ENOSPC if no suitable free area is available. This is for range
 * restricted allocations. The preallocated memory node must be cleared.
313
 */
314 315 316
int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
					unsigned long size, unsigned alignment, unsigned long color,
					unsigned long start, unsigned long end)
317
{
318 319
	struct drm_mm_node *hole_node;

320 321 322
	hole_node = drm_mm_search_free_in_range_generic(mm,
							size, alignment, color,
							start, end, 0);
323 324 325
	if (!hole_node)
		return -ENOSPC;

326 327
	drm_mm_insert_helper_range(hole_node, node,
				   size, alignment, color,
328 329 330
				   start, end);
	return 0;
}
331 332 333 334 335 336 337 338
EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);

int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
				unsigned long size, unsigned alignment,
				unsigned long start, unsigned long end)
{
	return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, 0, start, end);
}
339 340 341 342 343 344 345
EXPORT_SYMBOL(drm_mm_insert_node_in_range);

/**
 * Remove a memory node from the allocator.
 */
void drm_mm_remove_node(struct drm_mm_node *node)
{
346 347
	struct drm_mm *mm = node->mm;
	struct drm_mm_node *prev_node;
348

349 350
	BUG_ON(node->scanned_block || node->scanned_prev_free
				   || node->scanned_next_free);
351

352 353
	prev_node =
	    list_entry(node->node_list.prev, struct drm_mm_node, node_list);
354

355
	if (node->hole_follows) {
356 357
		BUG_ON(__drm_mm_hole_node_start(node) ==
		       __drm_mm_hole_node_end(node));
358 359
		list_del(&node->hole_stack);
	} else
360 361 362
		BUG_ON(__drm_mm_hole_node_start(node) !=
		       __drm_mm_hole_node_end(node));

363

364 365 366 367 368 369 370
	if (!prev_node->hole_follows) {
		prev_node->hole_follows = 1;
		list_add(&prev_node->hole_stack, &mm->hole_stack);
	} else
		list_move(&prev_node->hole_stack, &mm->hole_stack);

	list_del(&node->node_list);
371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386
	node->allocated = 0;
}
EXPORT_SYMBOL(drm_mm_remove_node);

/*
 * Remove a memory node from the allocator and free the allocated struct
 * drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the
 * drm_mm_get_block functions.
 */
void drm_mm_put_block(struct drm_mm_node *node)
{

	struct drm_mm *mm = node->mm;

	drm_mm_remove_node(node);

387 388 389 390 391 392 393 394
	spin_lock(&mm->unused_lock);
	if (mm->num_unused < MM_UNUSED_TARGET) {
		list_add(&node->node_list, &mm->unused_nodes);
		++mm->num_unused;
	} else
		kfree(node);
	spin_unlock(&mm->unused_lock);
}
395
EXPORT_SYMBOL(drm_mm_put_block);
396

397 398
static int check_free_hole(unsigned long start, unsigned long end,
			   unsigned long size, unsigned alignment)
399
{
400
	if (end - start < size)
401 402 403
		return 0;

	if (alignment) {
404
		unsigned tmp = start % alignment;
405
		if (tmp)
406
			start += alignment - tmp;
407 408
	}

409
	return end >= start + size;
410 411
}

412 413 414 415 416
struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
					       unsigned long size,
					       unsigned alignment,
					       unsigned long color,
					       bool best_match)
417
{
D
Dave Airlie 已提交
418 419
	struct drm_mm_node *entry;
	struct drm_mm_node *best;
420 421
	unsigned long adj_start;
	unsigned long adj_end;
422 423
	unsigned long best_size;

424 425
	BUG_ON(mm->scanned_blocks);

426 427 428
	best = NULL;
	best_size = ~0UL;

429
	drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
430 431 432 433 434 435 436
		if (mm->color_adjust) {
			mm->color_adjust(entry, color, &adj_start, &adj_end);
			if (adj_end <= adj_start)
				continue;
		}

		if (!check_free_hole(adj_start, adj_end, size, alignment))
437 438
			continue;

439 440
		if (!best_match)
			return entry;
441

442 443 444
		if (entry->size < best_size) {
			best = entry;
			best_size = entry->size;
445 446 447 448 449
		}
	}

	return best;
}
450 451 452 453 454 455 456 457 458
EXPORT_SYMBOL(drm_mm_search_free_generic);

struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
							unsigned long size,
							unsigned alignment,
							unsigned long color,
							unsigned long start,
							unsigned long end,
							bool best_match)
459 460 461
{
	struct drm_mm_node *entry;
	struct drm_mm_node *best;
462 463
	unsigned long adj_start;
	unsigned long adj_end;
464 465
	unsigned long best_size;

466 467
	BUG_ON(mm->scanned_blocks);

468 469 470
	best = NULL;
	best_size = ~0UL;

471 472 473 474 475
	drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
		if (adj_start < start)
			adj_start = start;
		if (adj_end > end)
			adj_end = end;
476 477 478 479 480 481 482

		if (mm->color_adjust) {
			mm->color_adjust(entry, color, &adj_start, &adj_end);
			if (adj_end <= adj_start)
				continue;
		}

483
		if (!check_free_hole(adj_start, adj_end, size, alignment))
484 485
			continue;

486 487
		if (!best_match)
			return entry;
488

489 490 491
		if (entry->size < best_size) {
			best = entry;
			best_size = entry->size;
492 493 494 495 496
		}
	}

	return best;
}
497
EXPORT_SYMBOL(drm_mm_search_free_in_range_generic);
498

499 500 501 502 503 504
/**
 * Moves an allocation. To be used with embedded struct drm_mm_node.
 */
void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
{
	list_replace(&old->node_list, &new->node_list);
D
Daniel Vetter 已提交
505
	list_replace(&old->hole_stack, &new->hole_stack);
506 507 508 509
	new->hole_follows = old->hole_follows;
	new->mm = old->mm;
	new->start = old->start;
	new->size = old->size;
510
	new->color = old->color;
511 512 513 514 515 516

	old->allocated = 0;
	new->allocated = 1;
}
EXPORT_SYMBOL(drm_mm_replace_node);

517 518 519 520 521 522 523 524 525
/**
 * Initializa lru scanning.
 *
 * This simply sets up the scanning routines with the parameters for the desired
 * hole.
 *
 * Warning: As long as the scan list is non-empty, no other operations than
 * adding/removing nodes to/from the scan list are allowed.
 */
526 527 528 529
void drm_mm_init_scan(struct drm_mm *mm,
		      unsigned long size,
		      unsigned alignment,
		      unsigned long color)
530
{
531
	mm->scan_color = color;
532 533 534 535
	mm->scan_alignment = alignment;
	mm->scan_size = size;
	mm->scanned_blocks = 0;
	mm->scan_hit_start = 0;
536
	mm->scan_hit_end = 0;
537
	mm->scan_check_range = 0;
538
	mm->prev_scanned_node = NULL;
539 540 541
}
EXPORT_SYMBOL(drm_mm_init_scan);

542 543 544 545 546 547 548 549 550
/**
 * Initializa lru scanning.
 *
 * This simply sets up the scanning routines with the parameters for the desired
 * hole. This version is for range-restricted scans.
 *
 * Warning: As long as the scan list is non-empty, no other operations than
 * adding/removing nodes to/from the scan list are allowed.
 */
551 552
void drm_mm_init_scan_with_range(struct drm_mm *mm,
				 unsigned long size,
553
				 unsigned alignment,
554
				 unsigned long color,
555 556 557
				 unsigned long start,
				 unsigned long end)
{
558
	mm->scan_color = color;
559 560 561 562
	mm->scan_alignment = alignment;
	mm->scan_size = size;
	mm->scanned_blocks = 0;
	mm->scan_hit_start = 0;
563
	mm->scan_hit_end = 0;
564 565 566
	mm->scan_start = start;
	mm->scan_end = end;
	mm->scan_check_range = 1;
567
	mm->prev_scanned_node = NULL;
568 569 570
}
EXPORT_SYMBOL(drm_mm_init_scan_with_range);

571 572 573 574 575 576 577 578 579
/**
 * Add a node to the scan list that might be freed to make space for the desired
 * hole.
 *
 * Returns non-zero, if a hole has been found, zero otherwise.
 */
int drm_mm_scan_add_block(struct drm_mm_node *node)
{
	struct drm_mm *mm = node->mm;
580 581
	struct drm_mm_node *prev_node;
	unsigned long hole_start, hole_end;
582
	unsigned long adj_start, adj_end;
583 584 585

	mm->scanned_blocks++;

586
	BUG_ON(node->scanned_block);
587 588
	node->scanned_block = 1;

589 590
	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
			       node_list);
591

592 593 594 595
	node->scanned_preceeds_hole = prev_node->hole_follows;
	prev_node->hole_follows = 1;
	list_del(&node->node_list);
	node->node_list.prev = &prev_node->node_list;
596 597
	node->node_list.next = &mm->prev_scanned_node->node_list;
	mm->prev_scanned_node = node;
598

599 600
	adj_start = hole_start = drm_mm_hole_node_start(prev_node);
	adj_end = hole_end = drm_mm_hole_node_end(prev_node);
601

602
	if (mm->scan_check_range) {
603 604 605 606
		if (adj_start < mm->scan_start)
			adj_start = mm->scan_start;
		if (adj_end > mm->scan_end)
			adj_end = mm->scan_end;
607 608
	}

609 610 611 612
	if (mm->color_adjust)
		mm->color_adjust(prev_node, mm->scan_color,
				 &adj_start, &adj_end);

613
	if (check_free_hole(adj_start, adj_end,
614
			    mm->scan_size, mm->scan_alignment)) {
615
		mm->scan_hit_start = hole_start;
616
		mm->scan_hit_end = hole_end;
617 618 619 620 621 622 623 624 625 626 627 628 629 630 631
		return 1;
	}

	return 0;
}
EXPORT_SYMBOL(drm_mm_scan_add_block);

/**
 * Remove a node from the scan list.
 *
 * Nodes _must_ be removed in the exact same order from the scan list as they
 * have been added, otherwise the internal state of the memory manager will be
 * corrupted.
 *
 * When the scan list is empty, the selected memory nodes can be freed. An
L
Lucas De Marchi 已提交
632
 * immediately following drm_mm_search_free with best_match = 0 will then return
633 634 635 636 637 638 639 640
 * the just freed block (because its at the top of the free_stack list).
 *
 * Returns one if this block should be evicted, zero otherwise. Will always
 * return zero when no hole has been found.
 */
int drm_mm_scan_remove_block(struct drm_mm_node *node)
{
	struct drm_mm *mm = node->mm;
641
	struct drm_mm_node *prev_node;
642 643 644 645 646 647

	mm->scanned_blocks--;

	BUG_ON(!node->scanned_block);
	node->scanned_block = 0;

648 649
	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
			       node_list);
650

651 652
	prev_node->hole_follows = node->scanned_preceeds_hole;
	list_add(&node->node_list, &prev_node->node_list);
653

654 655
	 return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
		 node->start < mm->scan_hit_end);
656 657 658
}
EXPORT_SYMBOL(drm_mm_scan_remove_block);

D
Dave Airlie 已提交
659
int drm_mm_clean(struct drm_mm * mm)
660
{
661
	struct list_head *head = &mm->head_node.node_list;
662

663 664
	return (head->next->next == head);
}
665
EXPORT_SYMBOL(drm_mm_clean);
666

667
void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
668
{
669
	INIT_LIST_HEAD(&mm->hole_stack);
670 671
	INIT_LIST_HEAD(&mm->unused_nodes);
	mm->num_unused = 0;
672
	mm->scanned_blocks = 0;
673
	spin_lock_init(&mm->unused_lock);
674

675 676 677 678 679 680 681 682 683 684 685 686
	/* Clever trick to avoid a special case in the free hole tracking. */
	INIT_LIST_HEAD(&mm->head_node.node_list);
	INIT_LIST_HEAD(&mm->head_node.hole_stack);
	mm->head_node.hole_follows = 1;
	mm->head_node.scanned_block = 0;
	mm->head_node.scanned_prev_free = 0;
	mm->head_node.scanned_next_free = 0;
	mm->head_node.mm = mm;
	mm->head_node.start = start + size;
	mm->head_node.size = start - mm->head_node.start;
	list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);

687
	mm->color_adjust = NULL;
688
}
689
EXPORT_SYMBOL(drm_mm_init);
690

D
Dave Airlie 已提交
691
void drm_mm_takedown(struct drm_mm * mm)
692
{
693
	struct drm_mm_node *entry, *next;
694

695 696
	if (WARN(!list_empty(&mm->head_node.node_list),
		 "Memory manager not clean. Delaying takedown\n")) {
697 698 699
		return;
	}

700
	spin_lock(&mm->unused_lock);
701 702
	list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) {
		list_del(&entry->node_list);
703 704 705 706
		kfree(entry);
		--mm->num_unused;
	}
	spin_unlock(&mm->unused_lock);
707

708
	BUG_ON(mm->num_unused != 0);
709
}
D
Dave Airlie 已提交
710
EXPORT_SYMBOL(drm_mm_takedown);
711

D
Daniel Vetter 已提交
712 713
static unsigned long drm_mm_debug_hole(struct drm_mm_node *entry,
				       const char *prefix)
714
{
715 716
	unsigned long hole_start, hole_end, hole_size;

D
Daniel Vetter 已提交
717 718 719 720
	if (entry->hole_follows) {
		hole_start = drm_mm_hole_node_start(entry);
		hole_end = drm_mm_hole_node_end(entry);
		hole_size = hole_end - hole_start;
721 722 723
		printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
			prefix, hole_start, hole_end,
			hole_size);
D
Daniel Vetter 已提交
724 725 726 727 728 729 730 731 732 733 734 735
		return hole_size;
	}

	return 0;
}

void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
{
	struct drm_mm_node *entry;
	unsigned long total_used = 0, total_free = 0, total = 0;

	total_free += drm_mm_debug_hole(&mm->head_node, prefix);
736 737 738

	drm_mm_for_each_node(entry, mm) {
		printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n",
739
			prefix, entry->start, entry->start + entry->size,
740 741
			entry->size);
		total_used += entry->size;
D
Daniel Vetter 已提交
742
		total_free += drm_mm_debug_hole(entry, prefix);
743
	}
744 745 746
	total = total_free + total_used;

	printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total,
747 748 749 750
		total_used, total_free);
}
EXPORT_SYMBOL(drm_mm_debug_table);

751
#if defined(CONFIG_DEBUG_FS)
D
Daniel Vetter 已提交
752
static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
753
{
754 755
	unsigned long hole_start, hole_end, hole_size;

D
Daniel Vetter 已提交
756 757 758 759
	if (entry->hole_follows) {
		hole_start = drm_mm_hole_node_start(entry);
		hole_end = drm_mm_hole_node_end(entry);
		hole_size = hole_end - hole_start;
760 761
		seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
				hole_start, hole_end, hole_size);
D
Daniel Vetter 已提交
762 763 764 765 766 767 768 769 770 771 772 773
		return hole_size;
	}

	return 0;
}

int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
{
	struct drm_mm_node *entry;
	unsigned long total_used = 0, total_free = 0, total = 0;

	total_free += drm_mm_dump_hole(m, &mm->head_node);
774 775 776 777 778 779

	drm_mm_for_each_node(entry, mm) {
		seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
				entry->start, entry->start + entry->size,
				entry->size);
		total_used += entry->size;
D
Daniel Vetter 已提交
780
		total_free += drm_mm_dump_hole(m, entry);
781
	}
782 783 784
	total = total_free + total_used;

	seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free);
785 786 787 788
	return 0;
}
EXPORT_SYMBOL(drm_mm_dump_table);
#endif