drm_mm.c 20.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40
/**************************************************************************
 *
 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 *
 **************************************************************************/

/*
 * Generic simple memory manager implementation. Intended to be used as a base
 * class implementation for more advanced memory managers.
 *
 * Note that the algorithm used is quite simple and there might be substantial
 * performance gains if a smarter free list is implemented. Currently it is just an
 * unordered stack of free regions. This could easily be improved if an RB-tree
 * is used instead. At least if we expect heavy fragmentation.
 *
 * Aligned allocations can also see improvement.
 *
 * Authors:
41
 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
42 43
 */

44 45
#include <drm/drmP.h>
#include <drm/drm_mm.h>
46
#include <linux/slab.h>
47
#include <linux/seq_file.h>
48
#include <linux/export.h>
49

50 51 52 53 54 55 56
#define MM_UNUSED_TARGET 4

static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
{
	struct drm_mm_node *child;

	if (atomic)
57
		child = kzalloc(sizeof(*child), GFP_ATOMIC);
58
	else
59
		child = kzalloc(sizeof(*child), GFP_KERNEL);
60 61 62 63 64 65 66 67

	if (unlikely(child == NULL)) {
		spin_lock(&mm->unused_lock);
		if (list_empty(&mm->unused_nodes))
			child = NULL;
		else {
			child =
			    list_entry(mm->unused_nodes.next,
68 69
				       struct drm_mm_node, node_list);
			list_del(&child->node_list);
70 71 72 73 74 75 76
			--mm->num_unused;
		}
		spin_unlock(&mm->unused_lock);
	}
	return child;
}

77 78 79 80 81
/* drm_mm_pre_get() - pre allocate drm_mm_node structure
 * drm_mm:	memory manager struct we are pre-allocating for
 *
 * Returns 0 on success or -ENOMEM if allocation fails.
 */
82 83 84 85 86 87 88
int drm_mm_pre_get(struct drm_mm *mm)
{
	struct drm_mm_node *node;

	spin_lock(&mm->unused_lock);
	while (mm->num_unused < MM_UNUSED_TARGET) {
		spin_unlock(&mm->unused_lock);
89
		node = kzalloc(sizeof(*node), GFP_KERNEL);
90 91 92 93 94 95 96 97
		spin_lock(&mm->unused_lock);

		if (unlikely(node == NULL)) {
			int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
			spin_unlock(&mm->unused_lock);
			return ret;
		}
		++mm->num_unused;
98
		list_add_tail(&node->node_list, &mm->unused_nodes);
99 100 101 102 103
	}
	spin_unlock(&mm->unused_lock);
	return 0;
}
EXPORT_SYMBOL(drm_mm_pre_get);
104

105 106
static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
				 struct drm_mm_node *node,
107 108
				 unsigned long size, unsigned alignment,
				 unsigned long color)
109
{
110 111 112
	struct drm_mm *mm = hole_node->mm;
	unsigned long hole_start = drm_mm_hole_node_start(hole_node);
	unsigned long hole_end = drm_mm_hole_node_end(hole_node);
113 114
	unsigned long adj_start = hole_start;
	unsigned long adj_end = hole_end;
115

116
	BUG_ON(node->allocated);
117

118 119
	if (mm->color_adjust)
		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
120

121 122 123 124 125 126 127
	if (alignment) {
		unsigned tmp = adj_start % alignment;
		if (tmp)
			adj_start += alignment - tmp;
	}

	if (adj_start == hole_start) {
128
		hole_node->hole_follows = 0;
129 130
		list_del(&hole_node->hole_stack);
	}
131

132
	node->start = adj_start;
133 134
	node->size = size;
	node->mm = mm;
135
	node->color = color;
136
	node->allocated = 1;
137

138 139 140
	INIT_LIST_HEAD(&node->hole_stack);
	list_add(&node->node_list, &hole_node->node_list);

141
	BUG_ON(node->start + node->size > adj_end);
142

143
	node->hole_follows = 0;
144
	if (__drm_mm_hole_node_start(node) < hole_end) {
145 146
		list_add(&node->hole_stack, &mm->hole_stack);
		node->hole_follows = 1;
147
	}
148 149
}

150 151 152 153 154 155 156
struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
					unsigned long start,
					unsigned long size,
					bool atomic)
{
	struct drm_mm_node *hole, *node;
	unsigned long end = start + size;
157 158
	unsigned long hole_start;
	unsigned long hole_end;
159

160
	drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194
		if (hole_start > start || hole_end < end)
			continue;

		node = drm_mm_kmalloc(mm, atomic);
		if (unlikely(node == NULL))
			return NULL;

		node->start = start;
		node->size = size;
		node->mm = mm;
		node->allocated = 1;

		INIT_LIST_HEAD(&node->hole_stack);
		list_add(&node->node_list, &hole->node_list);

		if (start == hole_start) {
			hole->hole_follows = 0;
			list_del_init(&hole->hole_stack);
		}

		node->hole_follows = 0;
		if (end != hole_end) {
			list_add(&node->hole_stack, &mm->hole_stack);
			node->hole_follows = 1;
		}

		return node;
	}

	WARN(1, "no hole found for block 0x%lx + 0x%lx\n", start, size);
	return NULL;
}
EXPORT_SYMBOL(drm_mm_create_block);

195 196 197
struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
					     unsigned long size,
					     unsigned alignment,
198
					     unsigned long color,
199 200 201 202 203 204 205 206
					     int atomic)
{
	struct drm_mm_node *node;

	node = drm_mm_kmalloc(hole_node->mm, atomic);
	if (unlikely(node == NULL))
		return NULL;

207
	drm_mm_insert_helper(hole_node, node, size, alignment, color);
208

209
	return node;
210
}
211
EXPORT_SYMBOL(drm_mm_get_block_generic);
212

213 214 215 216 217
/**
 * Search for free space and insert a preallocated memory node. Returns
 * -ENOSPC if no suitable free area is available. The preallocated memory node
 * must be cleared.
 */
218 219 220
int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
			       unsigned long size, unsigned alignment,
			       unsigned long color)
221 222 223
{
	struct drm_mm_node *hole_node;

224 225
	hole_node = drm_mm_search_free_generic(mm, size, alignment,
					       color, 0);
226 227 228
	if (!hole_node)
		return -ENOSPC;

229
	drm_mm_insert_helper(hole_node, node, size, alignment, color);
230 231
	return 0;
}
232 233 234 235 236 237 238
EXPORT_SYMBOL(drm_mm_insert_node_generic);

int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
		       unsigned long size, unsigned alignment)
{
	return drm_mm_insert_node_generic(mm, node, size, alignment, 0);
}
239 240
EXPORT_SYMBOL(drm_mm_insert_node);

241 242 243
static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
				       struct drm_mm_node *node,
				       unsigned long size, unsigned alignment,
244
				       unsigned long color,
245
				       unsigned long start, unsigned long end)
246
{
247 248 249
	struct drm_mm *mm = hole_node->mm;
	unsigned long hole_start = drm_mm_hole_node_start(hole_node);
	unsigned long hole_end = drm_mm_hole_node_end(hole_node);
250 251
	unsigned long adj_start = hole_start;
	unsigned long adj_end = hole_end;
252

253 254
	BUG_ON(!hole_node->hole_follows || node->allocated);

255 256
	if (adj_start < start)
		adj_start = start;
257 258 259 260 261
	if (adj_end > end)
		adj_end = end;

	if (mm->color_adjust)
		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
262 263 264 265 266 267

	if (alignment) {
		unsigned tmp = adj_start % alignment;
		if (tmp)
			adj_start += alignment - tmp;
	}
268

269
	if (adj_start == hole_start) {
270
		hole_node->hole_follows = 0;
271
		list_del(&hole_node->hole_stack);
272 273
	}

274
	node->start = adj_start;
275 276
	node->size = size;
	node->mm = mm;
277
	node->color = color;
278
	node->allocated = 1;
279 280 281 282

	INIT_LIST_HEAD(&node->hole_stack);
	list_add(&node->node_list, &hole_node->node_list);

283
	BUG_ON(node->start + node->size > adj_end);
284 285
	BUG_ON(node->start + node->size > end);

286
	node->hole_follows = 0;
287
	if (__drm_mm_hole_node_start(node) < hole_end) {
288 289
		list_add(&node->hole_stack, &mm->hole_stack);
		node->hole_follows = 1;
290
	}
291 292 293 294 295
}

struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
						unsigned long size,
						unsigned alignment,
296
						unsigned long color,
297 298 299 300 301 302 303 304 305 306
						unsigned long start,
						unsigned long end,
						int atomic)
{
	struct drm_mm_node *node;

	node = drm_mm_kmalloc(hole_node->mm, atomic);
	if (unlikely(node == NULL))
		return NULL;

307
	drm_mm_insert_helper_range(hole_node, node, size, alignment, color,
308
				   start, end);
309 310 311 312 313

	return node;
}
EXPORT_SYMBOL(drm_mm_get_block_range_generic);

314 315 316 317
/**
 * Search for free space and insert a preallocated memory node. Returns
 * -ENOSPC if no suitable free area is available. This is for range
 * restricted allocations. The preallocated memory node must be cleared.
318
 */
319 320 321
int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
					unsigned long size, unsigned alignment, unsigned long color,
					unsigned long start, unsigned long end)
322
{
323 324
	struct drm_mm_node *hole_node;

325 326 327
	hole_node = drm_mm_search_free_in_range_generic(mm,
							size, alignment, color,
							start, end, 0);
328 329 330
	if (!hole_node)
		return -ENOSPC;

331 332
	drm_mm_insert_helper_range(hole_node, node,
				   size, alignment, color,
333 334 335
				   start, end);
	return 0;
}
336 337 338 339 340 341 342 343
EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);

int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
				unsigned long size, unsigned alignment,
				unsigned long start, unsigned long end)
{
	return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, 0, start, end);
}
344 345 346 347 348 349 350
EXPORT_SYMBOL(drm_mm_insert_node_in_range);

/**
 * Remove a memory node from the allocator.
 */
void drm_mm_remove_node(struct drm_mm_node *node)
{
351 352
	struct drm_mm *mm = node->mm;
	struct drm_mm_node *prev_node;
353

354 355
	BUG_ON(node->scanned_block || node->scanned_prev_free
				   || node->scanned_next_free);
356

357 358
	prev_node =
	    list_entry(node->node_list.prev, struct drm_mm_node, node_list);
359

360
	if (node->hole_follows) {
361 362
		BUG_ON(__drm_mm_hole_node_start(node) ==
		       __drm_mm_hole_node_end(node));
363 364
		list_del(&node->hole_stack);
	} else
365 366 367
		BUG_ON(__drm_mm_hole_node_start(node) !=
		       __drm_mm_hole_node_end(node));

368

369 370 371 372 373 374 375
	if (!prev_node->hole_follows) {
		prev_node->hole_follows = 1;
		list_add(&prev_node->hole_stack, &mm->hole_stack);
	} else
		list_move(&prev_node->hole_stack, &mm->hole_stack);

	list_del(&node->node_list);
376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391
	node->allocated = 0;
}
EXPORT_SYMBOL(drm_mm_remove_node);

/*
 * Remove a memory node from the allocator and free the allocated struct
 * drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the
 * drm_mm_get_block functions.
 */
void drm_mm_put_block(struct drm_mm_node *node)
{

	struct drm_mm *mm = node->mm;

	drm_mm_remove_node(node);

392 393 394 395 396 397 398 399
	spin_lock(&mm->unused_lock);
	if (mm->num_unused < MM_UNUSED_TARGET) {
		list_add(&node->node_list, &mm->unused_nodes);
		++mm->num_unused;
	} else
		kfree(node);
	spin_unlock(&mm->unused_lock);
}
400
EXPORT_SYMBOL(drm_mm_put_block);
401

402 403
static int check_free_hole(unsigned long start, unsigned long end,
			   unsigned long size, unsigned alignment)
404
{
405
	if (end - start < size)
406 407 408
		return 0;

	if (alignment) {
409
		unsigned tmp = start % alignment;
410
		if (tmp)
411
			start += alignment - tmp;
412 413
	}

414
	return end >= start + size;
415 416
}

417 418 419 420 421
struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
					       unsigned long size,
					       unsigned alignment,
					       unsigned long color,
					       bool best_match)
422
{
D
Dave Airlie 已提交
423 424
	struct drm_mm_node *entry;
	struct drm_mm_node *best;
425 426
	unsigned long adj_start;
	unsigned long adj_end;
427 428
	unsigned long best_size;

429 430
	BUG_ON(mm->scanned_blocks);

431 432 433
	best = NULL;
	best_size = ~0UL;

434
	drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
435 436 437 438 439 440 441
		if (mm->color_adjust) {
			mm->color_adjust(entry, color, &adj_start, &adj_end);
			if (adj_end <= adj_start)
				continue;
		}

		if (!check_free_hole(adj_start, adj_end, size, alignment))
442 443
			continue;

444 445
		if (!best_match)
			return entry;
446

447 448 449
		if (entry->size < best_size) {
			best = entry;
			best_size = entry->size;
450 451 452 453 454
		}
	}

	return best;
}
455 456 457 458 459 460 461 462 463
EXPORT_SYMBOL(drm_mm_search_free_generic);

struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
							unsigned long size,
							unsigned alignment,
							unsigned long color,
							unsigned long start,
							unsigned long end,
							bool best_match)
464 465 466
{
	struct drm_mm_node *entry;
	struct drm_mm_node *best;
467 468
	unsigned long adj_start;
	unsigned long adj_end;
469 470
	unsigned long best_size;

471 472
	BUG_ON(mm->scanned_blocks);

473 474 475
	best = NULL;
	best_size = ~0UL;

476 477 478 479 480
	drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
		if (adj_start < start)
			adj_start = start;
		if (adj_end > end)
			adj_end = end;
481 482 483 484 485 486 487

		if (mm->color_adjust) {
			mm->color_adjust(entry, color, &adj_start, &adj_end);
			if (adj_end <= adj_start)
				continue;
		}

488
		if (!check_free_hole(adj_start, adj_end, size, alignment))
489 490
			continue;

491 492
		if (!best_match)
			return entry;
493

494 495 496
		if (entry->size < best_size) {
			best = entry;
			best_size = entry->size;
497 498 499 500 501
		}
	}

	return best;
}
502
EXPORT_SYMBOL(drm_mm_search_free_in_range_generic);
503

504 505 506 507 508 509
/**
 * Moves an allocation. To be used with embedded struct drm_mm_node.
 */
void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
{
	list_replace(&old->node_list, &new->node_list);
D
Daniel Vetter 已提交
510
	list_replace(&old->hole_stack, &new->hole_stack);
511 512 513 514
	new->hole_follows = old->hole_follows;
	new->mm = old->mm;
	new->start = old->start;
	new->size = old->size;
515
	new->color = old->color;
516 517 518 519 520 521

	old->allocated = 0;
	new->allocated = 1;
}
EXPORT_SYMBOL(drm_mm_replace_node);

522 523 524 525 526 527 528 529 530
/**
 * Initializa lru scanning.
 *
 * This simply sets up the scanning routines with the parameters for the desired
 * hole.
 *
 * Warning: As long as the scan list is non-empty, no other operations than
 * adding/removing nodes to/from the scan list are allowed.
 */
531 532 533 534
void drm_mm_init_scan(struct drm_mm *mm,
		      unsigned long size,
		      unsigned alignment,
		      unsigned long color)
535
{
536
	mm->scan_color = color;
537 538 539 540
	mm->scan_alignment = alignment;
	mm->scan_size = size;
	mm->scanned_blocks = 0;
	mm->scan_hit_start = 0;
541
	mm->scan_hit_end = 0;
542
	mm->scan_check_range = 0;
543
	mm->prev_scanned_node = NULL;
544 545 546
}
EXPORT_SYMBOL(drm_mm_init_scan);

547 548 549 550 551 552 553 554 555
/**
 * Initializa lru scanning.
 *
 * This simply sets up the scanning routines with the parameters for the desired
 * hole. This version is for range-restricted scans.
 *
 * Warning: As long as the scan list is non-empty, no other operations than
 * adding/removing nodes to/from the scan list are allowed.
 */
556 557
void drm_mm_init_scan_with_range(struct drm_mm *mm,
				 unsigned long size,
558
				 unsigned alignment,
559
				 unsigned long color,
560 561 562
				 unsigned long start,
				 unsigned long end)
{
563
	mm->scan_color = color;
564 565 566 567
	mm->scan_alignment = alignment;
	mm->scan_size = size;
	mm->scanned_blocks = 0;
	mm->scan_hit_start = 0;
568
	mm->scan_hit_end = 0;
569 570 571
	mm->scan_start = start;
	mm->scan_end = end;
	mm->scan_check_range = 1;
572
	mm->prev_scanned_node = NULL;
573 574 575
}
EXPORT_SYMBOL(drm_mm_init_scan_with_range);

576 577 578 579 580 581 582 583 584
/**
 * Add a node to the scan list that might be freed to make space for the desired
 * hole.
 *
 * Returns non-zero, if a hole has been found, zero otherwise.
 */
int drm_mm_scan_add_block(struct drm_mm_node *node)
{
	struct drm_mm *mm = node->mm;
585 586
	struct drm_mm_node *prev_node;
	unsigned long hole_start, hole_end;
587
	unsigned long adj_start, adj_end;
588 589 590

	mm->scanned_blocks++;

591
	BUG_ON(node->scanned_block);
592 593
	node->scanned_block = 1;

594 595
	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
			       node_list);
596

597 598 599 600
	node->scanned_preceeds_hole = prev_node->hole_follows;
	prev_node->hole_follows = 1;
	list_del(&node->node_list);
	node->node_list.prev = &prev_node->node_list;
601 602
	node->node_list.next = &mm->prev_scanned_node->node_list;
	mm->prev_scanned_node = node;
603

604 605
	adj_start = hole_start = drm_mm_hole_node_start(prev_node);
	adj_end = hole_end = drm_mm_hole_node_end(prev_node);
606

607
	if (mm->scan_check_range) {
608 609 610 611
		if (adj_start < mm->scan_start)
			adj_start = mm->scan_start;
		if (adj_end > mm->scan_end)
			adj_end = mm->scan_end;
612 613
	}

614 615 616 617
	if (mm->color_adjust)
		mm->color_adjust(prev_node, mm->scan_color,
				 &adj_start, &adj_end);

618
	if (check_free_hole(adj_start, adj_end,
619
			    mm->scan_size, mm->scan_alignment)) {
620
		mm->scan_hit_start = hole_start;
621
		mm->scan_hit_end = hole_end;
622 623 624 625 626 627 628 629 630 631 632 633 634 635 636
		return 1;
	}

	return 0;
}
EXPORT_SYMBOL(drm_mm_scan_add_block);

/**
 * Remove a node from the scan list.
 *
 * Nodes _must_ be removed in the exact same order from the scan list as they
 * have been added, otherwise the internal state of the memory manager will be
 * corrupted.
 *
 * When the scan list is empty, the selected memory nodes can be freed. An
L
Lucas De Marchi 已提交
637
 * immediately following drm_mm_search_free with best_match = 0 will then return
638 639 640 641 642 643 644 645
 * the just freed block (because its at the top of the free_stack list).
 *
 * Returns one if this block should be evicted, zero otherwise. Will always
 * return zero when no hole has been found.
 */
int drm_mm_scan_remove_block(struct drm_mm_node *node)
{
	struct drm_mm *mm = node->mm;
646
	struct drm_mm_node *prev_node;
647 648 649 650 651 652

	mm->scanned_blocks--;

	BUG_ON(!node->scanned_block);
	node->scanned_block = 0;

653 654
	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
			       node_list);
655

656 657
	prev_node->hole_follows = node->scanned_preceeds_hole;
	list_add(&node->node_list, &prev_node->node_list);
658

659 660
	 return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
		 node->start < mm->scan_hit_end);
661 662 663
}
EXPORT_SYMBOL(drm_mm_scan_remove_block);

D
Dave Airlie 已提交
664
int drm_mm_clean(struct drm_mm * mm)
665
{
666
	struct list_head *head = &mm->head_node.node_list;
667

668 669
	return (head->next->next == head);
}
670
EXPORT_SYMBOL(drm_mm_clean);
671

D
Dave Airlie 已提交
672
int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
673
{
674
	INIT_LIST_HEAD(&mm->hole_stack);
675 676
	INIT_LIST_HEAD(&mm->unused_nodes);
	mm->num_unused = 0;
677
	mm->scanned_blocks = 0;
678
	spin_lock_init(&mm->unused_lock);
679

680 681 682 683 684 685 686 687 688 689 690 691
	/* Clever trick to avoid a special case in the free hole tracking. */
	INIT_LIST_HEAD(&mm->head_node.node_list);
	INIT_LIST_HEAD(&mm->head_node.hole_stack);
	mm->head_node.hole_follows = 1;
	mm->head_node.scanned_block = 0;
	mm->head_node.scanned_prev_free = 0;
	mm->head_node.scanned_next_free = 0;
	mm->head_node.mm = mm;
	mm->head_node.start = start + size;
	mm->head_node.size = start - mm->head_node.start;
	list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);

692 693
	mm->color_adjust = NULL;

694
	return 0;
695
}
696
EXPORT_SYMBOL(drm_mm_init);
697

D
Dave Airlie 已提交
698
void drm_mm_takedown(struct drm_mm * mm)
699
{
700
	struct drm_mm_node *entry, *next;
701

702
	if (!list_empty(&mm->head_node.node_list)) {
703 704 705 706
		DRM_ERROR("Memory manager not clean. Delaying takedown\n");
		return;
	}

707
	spin_lock(&mm->unused_lock);
708 709
	list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) {
		list_del(&entry->node_list);
710 711 712 713
		kfree(entry);
		--mm->num_unused;
	}
	spin_unlock(&mm->unused_lock);
714

715
	BUG_ON(mm->num_unused != 0);
716
}
D
Dave Airlie 已提交
717
EXPORT_SYMBOL(drm_mm_takedown);
718

719 720 721
void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
{
	struct drm_mm_node *entry;
722 723 724 725 726 727 728 729 730 731 732 733 734 735
	unsigned long total_used = 0, total_free = 0, total = 0;
	unsigned long hole_start, hole_end, hole_size;

	hole_start = drm_mm_hole_node_start(&mm->head_node);
	hole_end = drm_mm_hole_node_end(&mm->head_node);
	hole_size = hole_end - hole_start;
	if (hole_size)
		printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
			prefix, hole_start, hole_end,
			hole_size);
	total_free += hole_size;

	drm_mm_for_each_node(entry, mm) {
		printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n",
736
			prefix, entry->start, entry->start + entry->size,
737 738 739 740 741 742 743 744 745 746 747 748
			entry->size);
		total_used += entry->size;

		if (entry->hole_follows) {
			hole_start = drm_mm_hole_node_start(entry);
			hole_end = drm_mm_hole_node_end(entry);
			hole_size = hole_end - hole_start;
			printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
				prefix, hole_start, hole_end,
				hole_size);
			total_free += hole_size;
		}
749
	}
750 751 752
	total = total_free + total_used;

	printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total,
753 754 755 756
		total_used, total_free);
}
EXPORT_SYMBOL(drm_mm_debug_table);

757
#if defined(CONFIG_DEBUG_FS)
D
Daniel Vetter 已提交
758
static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
759
{
760 761
	unsigned long hole_start, hole_end, hole_size;

D
Daniel Vetter 已提交
762 763 764 765
	if (entry->hole_follows) {
		hole_start = drm_mm_hole_node_start(entry);
		hole_end = drm_mm_hole_node_end(entry);
		hole_size = hole_end - hole_start;
766 767
		seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
				hole_start, hole_end, hole_size);
D
Daniel Vetter 已提交
768 769 770 771 772 773 774 775 776 777 778 779
		return hole_size;
	}

	return 0;
}

int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
{
	struct drm_mm_node *entry;
	unsigned long total_used = 0, total_free = 0, total = 0;

	total_free += drm_mm_dump_hole(m, &mm->head_node);
780 781 782 783 784 785

	drm_mm_for_each_node(entry, mm) {
		seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
				entry->start, entry->start + entry->size,
				entry->size);
		total_used += entry->size;
D
Daniel Vetter 已提交
786
		total_free += drm_mm_dump_hole(m, entry);
787
	}
788 789 790
	total = total_free + total_used;

	seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free);
791 792 793 794
	return 0;
}
EXPORT_SYMBOL(drm_mm_dump_table);
#endif