drm_mm.c 27.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40
/**************************************************************************
 *
 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 *
 **************************************************************************/

/*
 * Generic simple memory manager implementation. Intended to be used as a base
 * class implementation for more advanced memory managers.
 *
 * Note that the algorithm used is quite simple and there might be substantial
 * performance gains if a smarter free list is implemented. Currently it is just an
 * unordered stack of free regions. This could easily be improved if an RB-tree
 * is used instead. At least if we expect heavy fragmentation.
 *
 * Aligned allocations can also see improvement.
 *
 * Authors:
41
 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
42 43
 */

44 45
#include <drm/drmP.h>
#include <drm/drm_mm.h>
46
#include <linux/slab.h>
47
#include <linux/seq_file.h>
48
#include <linux/export.h>
49
#include <linux/interval_tree_generic.h>
50

51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
/**
 * DOC: Overview
 *
 * drm_mm provides a simple range allocator. The drivers are free to use the
 * resource allocator from the linux core if it suits them, the upside of drm_mm
 * is that it's in the DRM core. Which means that it's easier to extend for
 * some of the crazier special purpose needs of gpus.
 *
 * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node.
 * Drivers are free to embed either of them into their own suitable
 * datastructures. drm_mm itself will not do any allocations of its own, so if
 * drivers choose not to embed nodes they need to still allocate them
 * themselves.
 *
 * The range allocator also supports reservation of preallocated blocks. This is
 * useful for taking over initial mode setting configurations from the firmware,
 * where an object needs to be created which exactly matches the firmware's
 * scanout target. As long as the range is still free it can be inserted anytime
 * after the allocator is initialized, which helps with avoiding looped
 * depencies in the driver load sequence.
 *
 * drm_mm maintains a stack of most recently freed holes, which of all
 * simplistic datastructures seems to be a fairly decent approach to clustering
 * allocations and avoiding too much fragmentation. This means free space
 * searches are O(num_holes). Given that all the fancy features drm_mm supports
 * something better would be fairly complex and since gfx thrashing is a fairly
 * steep cliff not a real concern. Removing a node again is O(1).
 *
 * drm_mm supports a few features: Alignment and range restrictions can be
 * supplied. Further more every &drm_mm_node has a color value (which is just an
 * opaqua unsigned long) which in conjunction with a driver callback can be used
 * to implement sophisticated placement restrictions. The i915 DRM driver uses
 * this to implement guard pages between incompatible caching domains in the
 * graphics TT.
 *
86 87 88 89
 * Two behaviors are supported for searching and allocating: bottom-up and top-down.
 * The default is bottom-up. Top-down allocation can be used if the memory area
 * has different restrictions, or just to reduce fragmentation.
 *
90 91 92 93
 * Finally iteration helpers to walk all nodes and all holes are provided as are
 * some basic allocator dumpers for debugging.
 */

D
David Herrmann 已提交
94
static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
95
						u64 size,
D
David Herrmann 已提交
96 97 98 99
						unsigned alignment,
						unsigned long color,
						enum drm_mm_search_flags flags);
static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
100
						u64 size,
D
David Herrmann 已提交
101 102
						unsigned alignment,
						unsigned long color,
103 104
						u64 start,
						u64 end,
D
David Herrmann 已提交
105
						enum drm_mm_search_flags flags);
106

107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172
#define START(node) ((node)->start)
#define LAST(node)  ((node)->start + (node)->size - 1)

INTERVAL_TREE_DEFINE(struct drm_mm_node, rb,
		     u64, __subtree_last,
		     START, LAST, static inline, drm_mm_interval_tree)

struct drm_mm_node *
drm_mm_interval_first(struct drm_mm *mm, u64 start, u64 last)
{
	return drm_mm_interval_tree_iter_first(&mm->interval_tree,
					       start, last);
}
EXPORT_SYMBOL(drm_mm_interval_first);

struct drm_mm_node *
drm_mm_interval_next(struct drm_mm_node *node, u64 start, u64 last)
{
	return drm_mm_interval_tree_iter_next(node, start, last);
}
EXPORT_SYMBOL(drm_mm_interval_next);

static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
					  struct drm_mm_node *node)
{
	struct drm_mm *mm = hole_node->mm;
	struct rb_node **link, *rb;
	struct drm_mm_node *parent;

	node->__subtree_last = LAST(node);

	if (hole_node->allocated) {
		rb = &hole_node->rb;
		while (rb) {
			parent = rb_entry(rb, struct drm_mm_node, rb);
			if (parent->__subtree_last >= node->__subtree_last)
				break;

			parent->__subtree_last = node->__subtree_last;
			rb = rb_parent(rb);
		}

		rb = &hole_node->rb;
		link = &hole_node->rb.rb_right;
	} else {
		rb = NULL;
		link = &mm->interval_tree.rb_node;
	}

	while (*link) {
		rb = *link;
		parent = rb_entry(rb, struct drm_mm_node, rb);
		if (parent->__subtree_last < node->__subtree_last)
			parent->__subtree_last = node->__subtree_last;
		if (node->start < parent->start)
			link = &parent->rb.rb_left;
		else
			link = &parent->rb.rb_right;
	}

	rb_link_node(&node->rb, rb, link);
	rb_insert_augmented(&node->rb,
			    &mm->interval_tree,
			    &drm_mm_interval_tree_augment);
}

173 174
static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
				 struct drm_mm_node *node,
175
				 u64 size, unsigned alignment,
176 177
				 unsigned long color,
				 enum drm_mm_allocator_flags flags)
178
{
179
	struct drm_mm *mm = hole_node->mm;
180 181 182 183
	u64 hole_start = drm_mm_hole_node_start(hole_node);
	u64 hole_end = drm_mm_hole_node_end(hole_node);
	u64 adj_start = hole_start;
	u64 adj_end = hole_end;
184

185
	BUG_ON(node->allocated);
186

187 188
	if (mm->color_adjust)
		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
189

190 191 192
	if (flags & DRM_MM_CREATE_TOP)
		adj_start = adj_end - size;

193
	if (alignment) {
194 195 196 197 198
		u64 tmp = adj_start;
		unsigned rem;

		rem = do_div(tmp, alignment);
		if (rem) {
199
			if (flags & DRM_MM_CREATE_TOP)
200
				adj_start -= rem;
201
			else
202
				adj_start += alignment - rem;
203
		}
204 205
	}

206 207 208
	BUG_ON(adj_start < hole_start);
	BUG_ON(adj_end > hole_end);

209
	if (adj_start == hole_start) {
210
		hole_node->hole_follows = 0;
211 212
		list_del(&hole_node->hole_stack);
	}
213

214
	node->start = adj_start;
215 216
	node->size = size;
	node->mm = mm;
217
	node->color = color;
218
	node->allocated = 1;
219

220 221
	list_add(&node->node_list, &hole_node->node_list);

222 223
	drm_mm_interval_tree_add_node(hole_node, node);

224
	BUG_ON(node->start + node->size > adj_end);
225

226
	node->hole_follows = 0;
227
	if (__drm_mm_hole_node_start(node) < hole_end) {
228 229
		list_add(&node->hole_stack, &mm->hole_stack);
		node->hole_follows = 1;
230
	}
231 232
}

233 234 235 236 237 238 239 240 241 242 243 244 245 246
/**
 * drm_mm_reserve_node - insert an pre-initialized node
 * @mm: drm_mm allocator to insert @node into
 * @node: drm_mm_node to insert
 *
 * This functions inserts an already set-up drm_mm_node into the allocator,
 * meaning that start, size and color must be set by the caller. This is useful
 * to initialize the allocator with preallocated objects which must be set-up
 * before the range allocator can be set-up, e.g. when taking over a firmware
 * framebuffer.
 *
 * Returns:
 * 0 on success, -ENOSPC if there's no hole where @node is.
 */
247
int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
248
{
249
	u64 end = node->start + node->size;
250
	struct drm_mm_node *hole;
251
	u64 hole_start, hole_end;
252

253 254
	end = node->start + node->size;

255
	/* Find the relevant hole to add our node to */
256 257 258 259 260 261 262 263 264
	hole = drm_mm_interval_tree_iter_first(&mm->interval_tree,
					       node->start, ~(u64)0);
	if (hole) {
		if (hole->start < end)
			return -ENOSPC;
	} else {
		hole = list_entry(&mm->head_node.node_list,
				  typeof(*hole), node_list);
	}
265

266 267 268
	hole = list_last_entry(&hole->node_list, typeof(*hole), node_list);
	if (!hole->hole_follows)
		return -ENOSPC;
269

270 271 272 273
	hole_start = __drm_mm_hole_node_start(hole);
	hole_end = __drm_mm_hole_node_end(hole);
	if (hole_start > node->start || hole_end < end)
		return -ENOSPC;
274

275 276
	node->mm = mm;
	node->allocated = 1;
277

278
	list_add(&node->node_list, &hole->node_list);
279

280 281 282 283
	drm_mm_interval_tree_add_node(hole, node);

	if (node->start == hole_start) {
		hole->hole_follows = 0;
284
		list_del(&hole->hole_stack);
285 286 287 288 289 290
	}

	node->hole_follows = 0;
	if (end != hole_end) {
		list_add(&node->hole_stack, &mm->hole_stack);
		node->hole_follows = 1;
291 292
	}

293
	return 0;
294
}
295
EXPORT_SYMBOL(drm_mm_reserve_node);
296

297
/**
298 299 300 301 302 303
 * drm_mm_insert_node_generic - search for space and insert @node
 * @mm: drm_mm to allocate from
 * @node: preallocate node to insert
 * @size: size of the allocation
 * @alignment: alignment of the allocation
 * @color: opaque tag value to use for this node
304 305
 * @sflags: flags to fine-tune the allocation search
 * @aflags: flags to fine-tune the allocation behavior
306 307 308 309 310
 *
 * The preallocated node must be cleared to 0.
 *
 * Returns:
 * 0 on success, -ENOSPC if there's no suitable hole.
311
 */
312
int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
313
			       u64 size, unsigned alignment,
314
			       unsigned long color,
315 316
			       enum drm_mm_search_flags sflags,
			       enum drm_mm_allocator_flags aflags)
317 318 319
{
	struct drm_mm_node *hole_node;

320
	hole_node = drm_mm_search_free_generic(mm, size, alignment,
321
					       color, sflags);
322 323 324
	if (!hole_node)
		return -ENOSPC;

325
	drm_mm_insert_helper(hole_node, node, size, alignment, color, aflags);
326 327
	return 0;
}
328 329
EXPORT_SYMBOL(drm_mm_insert_node_generic);

330 331
static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
				       struct drm_mm_node *node,
332
				       u64 size, unsigned alignment,
333
				       unsigned long color,
334
				       u64 start, u64 end,
335
				       enum drm_mm_allocator_flags flags)
336
{
337
	struct drm_mm *mm = hole_node->mm;
338 339 340 341
	u64 hole_start = drm_mm_hole_node_start(hole_node);
	u64 hole_end = drm_mm_hole_node_end(hole_node);
	u64 adj_start = hole_start;
	u64 adj_end = hole_end;
342

343 344
	BUG_ON(!hole_node->hole_follows || node->allocated);

345 346
	if (adj_start < start)
		adj_start = start;
347 348 349 350 351
	if (adj_end > end)
		adj_end = end;

	if (mm->color_adjust)
		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
352

353 354 355
	if (flags & DRM_MM_CREATE_TOP)
		adj_start = adj_end - size;

356
	if (alignment) {
357 358 359 360 361
		u64 tmp = adj_start;
		unsigned rem;

		rem = do_div(tmp, alignment);
		if (rem) {
362
			if (flags & DRM_MM_CREATE_TOP)
363
				adj_start -= rem;
364
			else
365
				adj_start += alignment - rem;
366
		}
367
	}
368

369
	if (adj_start == hole_start) {
370
		hole_node->hole_follows = 0;
371
		list_del(&hole_node->hole_stack);
372 373
	}

374
	node->start = adj_start;
375 376
	node->size = size;
	node->mm = mm;
377
	node->color = color;
378
	node->allocated = 1;
379 380 381

	list_add(&node->node_list, &hole_node->node_list);

382 383
	drm_mm_interval_tree_add_node(hole_node, node);

384 385
	BUG_ON(node->start < start);
	BUG_ON(node->start < adj_start);
386
	BUG_ON(node->start + node->size > adj_end);
387 388
	BUG_ON(node->start + node->size > end);

389
	node->hole_follows = 0;
390
	if (__drm_mm_hole_node_start(node) < hole_end) {
391 392
		list_add(&node->hole_stack, &mm->hole_stack);
		node->hole_follows = 1;
393
	}
394 395
}

396
/**
397 398 399 400 401 402 403 404
 * drm_mm_insert_node_in_range_generic - ranged search for space and insert @node
 * @mm: drm_mm to allocate from
 * @node: preallocate node to insert
 * @size: size of the allocation
 * @alignment: alignment of the allocation
 * @color: opaque tag value to use for this node
 * @start: start of the allowed range for this node
 * @end: end of the allowed range for this node
405 406
 * @sflags: flags to fine-tune the allocation search
 * @aflags: flags to fine-tune the allocation behavior
407 408 409 410 411
 *
 * The preallocated node must be cleared to 0.
 *
 * Returns:
 * 0 on success, -ENOSPC if there's no suitable hole.
412
 */
413
int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
414
					u64 size, unsigned alignment,
415
					unsigned long color,
416
					u64 start, u64 end,
417 418
					enum drm_mm_search_flags sflags,
					enum drm_mm_allocator_flags aflags)
419
{
420 421
	struct drm_mm_node *hole_node;

422 423
	hole_node = drm_mm_search_free_in_range_generic(mm,
							size, alignment, color,
424
							start, end, sflags);
425 426 427
	if (!hole_node)
		return -ENOSPC;

428 429
	drm_mm_insert_helper_range(hole_node, node,
				   size, alignment, color,
430
				   start, end, aflags);
431 432
	return 0;
}
433 434
EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);

435
/**
436 437 438 439 440 441
 * drm_mm_remove_node - Remove a memory node from the allocator.
 * @node: drm_mm_node to remove
 *
 * This just removes a node from its drm_mm allocator. The node does not need to
 * be cleared again before it can be re-inserted into this or any other drm_mm
 * allocator. It is a bug to call this function on a un-allocated node.
442 443 444
 */
void drm_mm_remove_node(struct drm_mm_node *node)
{
445 446
	struct drm_mm *mm = node->mm;
	struct drm_mm_node *prev_node;
447

448 449 450
	if (WARN_ON(!node->allocated))
		return;

451 452
	BUG_ON(node->scanned_block || node->scanned_prev_free
				   || node->scanned_next_free);
453

454 455
	prev_node =
	    list_entry(node->node_list.prev, struct drm_mm_node, node_list);
456

457
	if (node->hole_follows) {
458 459
		BUG_ON(__drm_mm_hole_node_start(node) ==
		       __drm_mm_hole_node_end(node));
460 461
		list_del(&node->hole_stack);
	} else
462 463 464
		BUG_ON(__drm_mm_hole_node_start(node) !=
		       __drm_mm_hole_node_end(node));

465

466 467 468 469 470 471
	if (!prev_node->hole_follows) {
		prev_node->hole_follows = 1;
		list_add(&prev_node->hole_stack, &mm->hole_stack);
	} else
		list_move(&prev_node->hole_stack, &mm->hole_stack);

472
	drm_mm_interval_tree_remove(node, &mm->interval_tree);
473
	list_del(&node->node_list);
474 475 476 477
	node->allocated = 0;
}
EXPORT_SYMBOL(drm_mm_remove_node);

478
static int check_free_hole(u64 start, u64 end, u64 size, unsigned alignment)
479
{
480
	if (end - start < size)
481 482 483
		return 0;

	if (alignment) {
484 485 486 487
		u64 tmp = start;
		unsigned rem;

		rem = do_div(tmp, alignment);
488
		if (rem)
489
			start += alignment - rem;
490 491
	}

492
	return end >= start + size;
493 494
}

D
David Herrmann 已提交
495
static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
496
						      u64 size,
D
David Herrmann 已提交
497 498 499
						      unsigned alignment,
						      unsigned long color,
						      enum drm_mm_search_flags flags)
500
{
D
Dave Airlie 已提交
501 502
	struct drm_mm_node *entry;
	struct drm_mm_node *best;
503 504 505
	u64 adj_start;
	u64 adj_end;
	u64 best_size;
506

507 508
	BUG_ON(mm->scanned_blocks);

509 510 511
	best = NULL;
	best_size = ~0UL;

512 513
	__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
			       flags & DRM_MM_SEARCH_BELOW) {
514
		u64 hole_size = adj_end - adj_start;
515

516 517 518 519 520 521 522
		if (mm->color_adjust) {
			mm->color_adjust(entry, color, &adj_start, &adj_end);
			if (adj_end <= adj_start)
				continue;
		}

		if (!check_free_hole(adj_start, adj_end, size, alignment))
523 524
			continue;

525
		if (!(flags & DRM_MM_SEARCH_BEST))
526
			return entry;
527

528
		if (hole_size < best_size) {
529
			best = entry;
530
			best_size = hole_size;
531 532 533 534 535
		}
	}

	return best;
}
536

D
David Herrmann 已提交
537
static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
538
							u64 size,
539 540
							unsigned alignment,
							unsigned long color,
541 542
							u64 start,
							u64 end,
543
							enum drm_mm_search_flags flags)
544 545 546
{
	struct drm_mm_node *entry;
	struct drm_mm_node *best;
547 548 549
	u64 adj_start;
	u64 adj_end;
	u64 best_size;
550

551 552
	BUG_ON(mm->scanned_blocks);

553 554 555
	best = NULL;
	best_size = ~0UL;

556 557
	__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
			       flags & DRM_MM_SEARCH_BELOW) {
558
		u64 hole_size = adj_end - adj_start;
559

560 561 562 563
		if (adj_start < start)
			adj_start = start;
		if (adj_end > end)
			adj_end = end;
564 565 566 567 568 569 570

		if (mm->color_adjust) {
			mm->color_adjust(entry, color, &adj_start, &adj_end);
			if (adj_end <= adj_start)
				continue;
		}

571
		if (!check_free_hole(adj_start, adj_end, size, alignment))
572 573
			continue;

574
		if (!(flags & DRM_MM_SEARCH_BEST))
575
			return entry;
576

577
		if (hole_size < best_size) {
578
			best = entry;
579
			best_size = hole_size;
580 581 582 583 584 585
		}
	}

	return best;
}

586
/**
587 588 589 590 591 592 593
 * drm_mm_replace_node - move an allocation from @old to @new
 * @old: drm_mm_node to remove from the allocator
 * @new: drm_mm_node which should inherit @old's allocation
 *
 * This is useful for when drivers embed the drm_mm_node structure and hence
 * can't move allocations by reassigning pointers. It's a combination of remove
 * and insert with the guarantee that the allocation start will match.
594 595 596 597
 */
void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
{
	list_replace(&old->node_list, &new->node_list);
D
Daniel Vetter 已提交
598
	list_replace(&old->hole_stack, &new->hole_stack);
599
	rb_replace_node(&old->rb, &new->rb, &old->mm->interval_tree);
600 601 602 603
	new->hole_follows = old->hole_follows;
	new->mm = old->mm;
	new->start = old->start;
	new->size = old->size;
604
	new->color = old->color;
605
	new->__subtree_last = old->__subtree_last;
606 607 608 609 610 611

	old->allocated = 0;
	new->allocated = 1;
}
EXPORT_SYMBOL(drm_mm_replace_node);

612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639
/**
 * DOC: lru scan roaster
 *
 * Very often GPUs need to have continuous allocations for a given object. When
 * evicting objects to make space for a new one it is therefore not most
 * efficient when we simply start to select all objects from the tail of an LRU
 * until there's a suitable hole: Especially for big objects or nodes that
 * otherwise have special allocation constraints there's a good chance we evict
 * lots of (smaller) objects unecessarily.
 *
 * The DRM range allocator supports this use-case through the scanning
 * interfaces. First a scan operation needs to be initialized with
 * drm_mm_init_scan() or drm_mm_init_scan_with_range(). The the driver adds
 * objects to the roaster (probably by walking an LRU list, but this can be
 * freely implemented) until a suitable hole is found or there's no further
 * evitable object.
 *
 * The the driver must walk through all objects again in exactly the reverse
 * order to restore the allocator state. Note that while the allocator is used
 * in the scan mode no other operation is allowed.
 *
 * Finally the driver evicts all objects selected in the scan. Adding and
 * removing an object is O(1), and since freeing a node is also O(1) the overall
 * complexity is O(scanned_objects). So like the free stack which needs to be
 * walked before a scan operation even begins this is linear in the number of
 * objects. It doesn't seem to hurt badly.
 */

640
/**
641 642 643 644 645
 * drm_mm_init_scan - initialize lru scanning
 * @mm: drm_mm to scan
 * @size: size of the allocation
 * @alignment: alignment of the allocation
 * @color: opaque tag value to use for the allocation
646 647
 *
 * This simply sets up the scanning routines with the parameters for the desired
648 649
 * hole. Note that there's no need to specify allocation flags, since they only
 * change the place a node is allocated from within a suitable hole.
650
 *
651 652
 * Warning:
 * As long as the scan list is non-empty, no other operations than
653 654
 * adding/removing nodes to/from the scan list are allowed.
 */
655
void drm_mm_init_scan(struct drm_mm *mm,
656
		      u64 size,
657 658
		      unsigned alignment,
		      unsigned long color)
659
{
660
	mm->scan_color = color;
661 662 663 664
	mm->scan_alignment = alignment;
	mm->scan_size = size;
	mm->scanned_blocks = 0;
	mm->scan_hit_start = 0;
665
	mm->scan_hit_end = 0;
666
	mm->scan_check_range = 0;
667
	mm->prev_scanned_node = NULL;
668 669 670
}
EXPORT_SYMBOL(drm_mm_init_scan);

671
/**
672 673 674 675 676 677 678
 * drm_mm_init_scan - initialize range-restricted lru scanning
 * @mm: drm_mm to scan
 * @size: size of the allocation
 * @alignment: alignment of the allocation
 * @color: opaque tag value to use for the allocation
 * @start: start of the allowed range for the allocation
 * @end: end of the allowed range for the allocation
679 680
 *
 * This simply sets up the scanning routines with the parameters for the desired
681 682
 * hole. Note that there's no need to specify allocation flags, since they only
 * change the place a node is allocated from within a suitable hole.
683
 *
684 685
 * Warning:
 * As long as the scan list is non-empty, no other operations than
686 687
 * adding/removing nodes to/from the scan list are allowed.
 */
688
void drm_mm_init_scan_with_range(struct drm_mm *mm,
689
				 u64 size,
690
				 unsigned alignment,
691
				 unsigned long color,
692 693
				 u64 start,
				 u64 end)
694
{
695
	mm->scan_color = color;
696 697 698 699
	mm->scan_alignment = alignment;
	mm->scan_size = size;
	mm->scanned_blocks = 0;
	mm->scan_hit_start = 0;
700
	mm->scan_hit_end = 0;
701 702 703
	mm->scan_start = start;
	mm->scan_end = end;
	mm->scan_check_range = 1;
704
	mm->prev_scanned_node = NULL;
705 706 707
}
EXPORT_SYMBOL(drm_mm_init_scan_with_range);

708
/**
709 710 711
 * drm_mm_scan_add_block - add a node to the scan list
 * @node: drm_mm_node to add
 *
712 713 714
 * Add a node to the scan list that might be freed to make space for the desired
 * hole.
 *
715 716
 * Returns:
 * True if a hole has been found, false otherwise.
717
 */
718
bool drm_mm_scan_add_block(struct drm_mm_node *node)
719 720
{
	struct drm_mm *mm = node->mm;
721
	struct drm_mm_node *prev_node;
722 723
	u64 hole_start, hole_end;
	u64 adj_start, adj_end;
724 725 726

	mm->scanned_blocks++;

727
	BUG_ON(node->scanned_block);
728 729
	node->scanned_block = 1;

730 731
	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
			       node_list);
732

733 734 735 736
	node->scanned_preceeds_hole = prev_node->hole_follows;
	prev_node->hole_follows = 1;
	list_del(&node->node_list);
	node->node_list.prev = &prev_node->node_list;
737 738
	node->node_list.next = &mm->prev_scanned_node->node_list;
	mm->prev_scanned_node = node;
739

740 741
	adj_start = hole_start = drm_mm_hole_node_start(prev_node);
	adj_end = hole_end = drm_mm_hole_node_end(prev_node);
742

743
	if (mm->scan_check_range) {
744 745 746 747
		if (adj_start < mm->scan_start)
			adj_start = mm->scan_start;
		if (adj_end > mm->scan_end)
			adj_end = mm->scan_end;
748 749
	}

750 751 752 753
	if (mm->color_adjust)
		mm->color_adjust(prev_node, mm->scan_color,
				 &adj_start, &adj_end);

754
	if (check_free_hole(adj_start, adj_end,
755
			    mm->scan_size, mm->scan_alignment)) {
756
		mm->scan_hit_start = hole_start;
757
		mm->scan_hit_end = hole_end;
758
		return true;
759 760
	}

761
	return false;
762 763 764 765
}
EXPORT_SYMBOL(drm_mm_scan_add_block);

/**
766 767
 * drm_mm_scan_remove_block - remove a node from the scan list
 * @node: drm_mm_node to remove
768 769 770 771 772 773
 *
 * Nodes _must_ be removed in the exact same order from the scan list as they
 * have been added, otherwise the internal state of the memory manager will be
 * corrupted.
 *
 * When the scan list is empty, the selected memory nodes can be freed. An
774 775
 * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then
 * return the just freed block (because its at the top of the free_stack list).
776
 *
777 778 779
 * Returns:
 * True if this block should be evicted, false otherwise. Will always
 * return false when no hole has been found.
780
 */
781
bool drm_mm_scan_remove_block(struct drm_mm_node *node)
782 783
{
	struct drm_mm *mm = node->mm;
784
	struct drm_mm_node *prev_node;
785 786 787 788 789 790

	mm->scanned_blocks--;

	BUG_ON(!node->scanned_block);
	node->scanned_block = 0;

791 792
	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
			       node_list);
793

794 795
	prev_node->hole_follows = node->scanned_preceeds_hole;
	list_add(&node->node_list, &prev_node->node_list);
796

797 798
	 return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
		 node->start < mm->scan_hit_end);
799 800 801
}
EXPORT_SYMBOL(drm_mm_scan_remove_block);

802 803 804 805 806 807 808 809 810
/**
 * drm_mm_clean - checks whether an allocator is clean
 * @mm: drm_mm allocator to check
 *
 * Returns:
 * True if the allocator is completely free, false if there's still a node
 * allocated in it.
 */
bool drm_mm_clean(struct drm_mm * mm)
811
{
812
	struct list_head *head = &mm->head_node.node_list;
813

814 815
	return (head->next->next == head);
}
816
EXPORT_SYMBOL(drm_mm_clean);
817

818 819 820 821 822 823 824 825
/**
 * drm_mm_init - initialize a drm-mm allocator
 * @mm: the drm_mm structure to initialize
 * @start: start of the range managed by @mm
 * @size: end of the range managed by @mm
 *
 * Note that @mm must be cleared to 0 before calling this function.
 */
826
void drm_mm_init(struct drm_mm * mm, u64 start, u64 size)
827
{
828
	INIT_LIST_HEAD(&mm->hole_stack);
829
	mm->scanned_blocks = 0;
830

831 832 833 834 835 836 837 838 839 840 841
	/* Clever trick to avoid a special case in the free hole tracking. */
	INIT_LIST_HEAD(&mm->head_node.node_list);
	mm->head_node.hole_follows = 1;
	mm->head_node.scanned_block = 0;
	mm->head_node.scanned_prev_free = 0;
	mm->head_node.scanned_next_free = 0;
	mm->head_node.mm = mm;
	mm->head_node.start = start + size;
	mm->head_node.size = start - mm->head_node.start;
	list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);

842 843
	mm->interval_tree = RB_ROOT;

844
	mm->color_adjust = NULL;
845
}
846
EXPORT_SYMBOL(drm_mm_init);
847

848 849 850 851 852 853 854
/**
 * drm_mm_takedown - clean up a drm_mm allocator
 * @mm: drm_mm allocator to clean up
 *
 * Note that it is a bug to call this function on an allocator which is not
 * clean.
 */
D
Dave Airlie 已提交
855
void drm_mm_takedown(struct drm_mm * mm)
856
{
D
David Herrmann 已提交
857 858
	WARN(!list_empty(&mm->head_node.node_list),
	     "Memory manager not clean during takedown.\n");
859
}
D
Dave Airlie 已提交
860
EXPORT_SYMBOL(drm_mm_takedown);
861

862 863
static u64 drm_mm_debug_hole(struct drm_mm_node *entry,
				     const char *prefix)
864
{
865
	u64 hole_start, hole_end, hole_size;
866

D
Daniel Vetter 已提交
867 868 869 870
	if (entry->hole_follows) {
		hole_start = drm_mm_hole_node_start(entry);
		hole_end = drm_mm_hole_node_end(entry);
		hole_size = hole_end - hole_start;
871 872
		pr_debug("%s %#llx-%#llx: %llu: free\n", prefix, hole_start,
			 hole_end, hole_size);
D
Daniel Vetter 已提交
873 874 875 876 877 878
		return hole_size;
	}

	return 0;
}

879 880 881 882 883
/**
 * drm_mm_debug_table - dump allocator state to dmesg
 * @mm: drm_mm allocator to dump
 * @prefix: prefix to use for dumping to dmesg
 */
D
Daniel Vetter 已提交
884 885 886
void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
{
	struct drm_mm_node *entry;
887
	u64 total_used = 0, total_free = 0, total = 0;
D
Daniel Vetter 已提交
888 889

	total_free += drm_mm_debug_hole(&mm->head_node, prefix);
890 891

	drm_mm_for_each_node(entry, mm) {
892 893
		pr_debug("%s %#llx-%#llx: %llu: used\n", prefix, entry->start,
			 entry->start + entry->size, entry->size);
894
		total_used += entry->size;
D
Daniel Vetter 已提交
895
		total_free += drm_mm_debug_hole(entry, prefix);
896
	}
897 898
	total = total_free + total_used;

899 900
	pr_debug("%s total: %llu, used %llu free %llu\n", prefix, total,
		 total_used, total_free);
901 902 903
}
EXPORT_SYMBOL(drm_mm_debug_table);

904
#if defined(CONFIG_DEBUG_FS)
905
static u64 drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
906
{
907
	u64 hole_start, hole_end, hole_size;
908

D
Daniel Vetter 已提交
909 910 911 912
	if (entry->hole_follows) {
		hole_start = drm_mm_hole_node_start(entry);
		hole_end = drm_mm_hole_node_end(entry);
		hole_size = hole_end - hole_start;
913
		seq_printf(m, "%#018llx-%#018llx: %llu: free\n", hole_start,
914
			   hole_end, hole_size);
D
Daniel Vetter 已提交
915 916 917 918 919 920
		return hole_size;
	}

	return 0;
}

921 922 923 924 925
/**
 * drm_mm_dump_table - dump allocator state to a seq_file
 * @m: seq_file to dump to
 * @mm: drm_mm allocator to dump
 */
D
Daniel Vetter 已提交
926 927 928
int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
{
	struct drm_mm_node *entry;
929
	u64 total_used = 0, total_free = 0, total = 0;
D
Daniel Vetter 已提交
930 931

	total_free += drm_mm_dump_hole(m, &mm->head_node);
932 933

	drm_mm_for_each_node(entry, mm) {
934
		seq_printf(m, "%#018llx-%#018llx: %llu: used\n", entry->start,
935
			   entry->start + entry->size, entry->size);
936
		total_used += entry->size;
D
Daniel Vetter 已提交
937
		total_free += drm_mm_dump_hole(m, entry);
938
	}
939 940
	total = total_free + total_used;

941 942
	seq_printf(m, "total: %llu, used %llu free %llu\n", total,
		   total_used, total_free);
943 944 945 946
	return 0;
}
EXPORT_SYMBOL(drm_mm_dump_table);
#endif