drm_mm.c 27.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40
/**************************************************************************
 *
 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 *
 **************************************************************************/

/*
 * Generic simple memory manager implementation. Intended to be used as a base
 * class implementation for more advanced memory managers.
 *
 * Note that the algorithm used is quite simple and there might be substantial
 * performance gains if a smarter free list is implemented. Currently it is just an
 * unordered stack of free regions. This could easily be improved if an RB-tree
 * is used instead. At least if we expect heavy fragmentation.
 *
 * Aligned allocations can also see improvement.
 *
 * Authors:
41
 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
42 43
 */

44 45
#include <drm/drmP.h>
#include <drm/drm_mm.h>
46
#include <linux/slab.h>
47
#include <linux/seq_file.h>
48
#include <linux/export.h>
49
#include <linux/interval_tree_generic.h>
50

51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
/**
 * DOC: Overview
 *
 * drm_mm provides a simple range allocator. The drivers are free to use the
 * resource allocator from the linux core if it suits them, the upside of drm_mm
 * is that it's in the DRM core. Which means that it's easier to extend for
 * some of the crazier special purpose needs of gpus.
 *
 * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node.
 * Drivers are free to embed either of them into their own suitable
 * datastructures. drm_mm itself will not do any allocations of its own, so if
 * drivers choose not to embed nodes they need to still allocate them
 * themselves.
 *
 * The range allocator also supports reservation of preallocated blocks. This is
 * useful for taking over initial mode setting configurations from the firmware,
 * where an object needs to be created which exactly matches the firmware's
 * scanout target. As long as the range is still free it can be inserted anytime
 * after the allocator is initialized, which helps with avoiding looped
 * depencies in the driver load sequence.
 *
 * drm_mm maintains a stack of most recently freed holes, which of all
 * simplistic datastructures seems to be a fairly decent approach to clustering
 * allocations and avoiding too much fragmentation. This means free space
 * searches are O(num_holes). Given that all the fancy features drm_mm supports
 * something better would be fairly complex and since gfx thrashing is a fairly
 * steep cliff not a real concern. Removing a node again is O(1).
 *
 * drm_mm supports a few features: Alignment and range restrictions can be
 * supplied. Further more every &drm_mm_node has a color value (which is just an
 * opaqua unsigned long) which in conjunction with a driver callback can be used
 * to implement sophisticated placement restrictions. The i915 DRM driver uses
 * this to implement guard pages between incompatible caching domains in the
 * graphics TT.
 *
86 87 88 89
 * Two behaviors are supported for searching and allocating: bottom-up and top-down.
 * The default is bottom-up. Top-down allocation can be used if the memory area
 * has different restrictions, or just to reduce fragmentation.
 *
90 91 92 93
 * Finally iteration helpers to walk all nodes and all holes are provided as are
 * some basic allocator dumpers for debugging.
 */

D
David Herrmann 已提交
94
static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
95
						u64 size,
D
David Herrmann 已提交
96 97 98 99
						unsigned alignment,
						unsigned long color,
						enum drm_mm_search_flags flags);
static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
100
						u64 size,
D
David Herrmann 已提交
101 102
						unsigned alignment,
						unsigned long color,
103 104
						u64 start,
						u64 end,
D
David Herrmann 已提交
105
						enum drm_mm_search_flags flags);
106

107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172
#define START(node) ((node)->start)
#define LAST(node)  ((node)->start + (node)->size - 1)

INTERVAL_TREE_DEFINE(struct drm_mm_node, rb,
		     u64, __subtree_last,
		     START, LAST, static inline, drm_mm_interval_tree)

struct drm_mm_node *
drm_mm_interval_first(struct drm_mm *mm, u64 start, u64 last)
{
	return drm_mm_interval_tree_iter_first(&mm->interval_tree,
					       start, last);
}
EXPORT_SYMBOL(drm_mm_interval_first);

struct drm_mm_node *
drm_mm_interval_next(struct drm_mm_node *node, u64 start, u64 last)
{
	return drm_mm_interval_tree_iter_next(node, start, last);
}
EXPORT_SYMBOL(drm_mm_interval_next);

static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
					  struct drm_mm_node *node)
{
	struct drm_mm *mm = hole_node->mm;
	struct rb_node **link, *rb;
	struct drm_mm_node *parent;

	node->__subtree_last = LAST(node);

	if (hole_node->allocated) {
		rb = &hole_node->rb;
		while (rb) {
			parent = rb_entry(rb, struct drm_mm_node, rb);
			if (parent->__subtree_last >= node->__subtree_last)
				break;

			parent->__subtree_last = node->__subtree_last;
			rb = rb_parent(rb);
		}

		rb = &hole_node->rb;
		link = &hole_node->rb.rb_right;
	} else {
		rb = NULL;
		link = &mm->interval_tree.rb_node;
	}

	while (*link) {
		rb = *link;
		parent = rb_entry(rb, struct drm_mm_node, rb);
		if (parent->__subtree_last < node->__subtree_last)
			parent->__subtree_last = node->__subtree_last;
		if (node->start < parent->start)
			link = &parent->rb.rb_left;
		else
			link = &parent->rb.rb_right;
	}

	rb_link_node(&node->rb, rb, link);
	rb_insert_augmented(&node->rb,
			    &mm->interval_tree,
			    &drm_mm_interval_tree_augment);
}

173 174
static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
				 struct drm_mm_node *node,
175
				 u64 size, unsigned alignment,
176 177
				 unsigned long color,
				 enum drm_mm_allocator_flags flags)
178
{
179
	struct drm_mm *mm = hole_node->mm;
180 181 182 183
	u64 hole_start = drm_mm_hole_node_start(hole_node);
	u64 hole_end = drm_mm_hole_node_end(hole_node);
	u64 adj_start = hole_start;
	u64 adj_end = hole_end;
184

185
	BUG_ON(node->allocated);
186

187 188
	if (mm->color_adjust)
		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
189

190 191 192
	if (flags & DRM_MM_CREATE_TOP)
		adj_start = adj_end - size;

193
	if (alignment) {
194 195 196 197 198
		u64 tmp = adj_start;
		unsigned rem;

		rem = do_div(tmp, alignment);
		if (rem) {
199
			if (flags & DRM_MM_CREATE_TOP)
200
				adj_start -= rem;
201
			else
202
				adj_start += alignment - rem;
203
		}
204 205
	}

206 207 208
	BUG_ON(adj_start < hole_start);
	BUG_ON(adj_end > hole_end);

209
	if (adj_start == hole_start) {
210
		hole_node->hole_follows = 0;
211 212
		list_del(&hole_node->hole_stack);
	}
213

214
	node->start = adj_start;
215 216
	node->size = size;
	node->mm = mm;
217
	node->color = color;
218
	node->allocated = 1;
219

220 221
	list_add(&node->node_list, &hole_node->node_list);

222 223
	drm_mm_interval_tree_add_node(hole_node, node);

224
	BUG_ON(node->start + node->size > adj_end);
225

226
	node->hole_follows = 0;
227
	if (__drm_mm_hole_node_start(node) < hole_end) {
228 229
		list_add(&node->hole_stack, &mm->hole_stack);
		node->hole_follows = 1;
230
	}
231 232
}

233 234 235 236 237 238 239 240 241 242 243 244 245 246
/**
 * drm_mm_reserve_node - insert an pre-initialized node
 * @mm: drm_mm allocator to insert @node into
 * @node: drm_mm_node to insert
 *
 * This functions inserts an already set-up drm_mm_node into the allocator,
 * meaning that start, size and color must be set by the caller. This is useful
 * to initialize the allocator with preallocated objects which must be set-up
 * before the range allocator can be set-up, e.g. when taking over a firmware
 * framebuffer.
 *
 * Returns:
 * 0 on success, -ENOSPC if there's no hole where @node is.
 */
247
int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
248
{
249
	u64 end = node->start + node->size;
250
	struct drm_mm_node *hole;
251
	u64 hole_start, hole_end;
252

253 254 255
	if (WARN_ON(node->size == 0))
		return -EINVAL;

256 257
	end = node->start + node->size;

258
	/* Find the relevant hole to add our node to */
259 260 261 262 263 264 265 266 267
	hole = drm_mm_interval_tree_iter_first(&mm->interval_tree,
					       node->start, ~(u64)0);
	if (hole) {
		if (hole->start < end)
			return -ENOSPC;
	} else {
		hole = list_entry(&mm->head_node.node_list,
				  typeof(*hole), node_list);
	}
268

269 270 271
	hole = list_last_entry(&hole->node_list, typeof(*hole), node_list);
	if (!hole->hole_follows)
		return -ENOSPC;
272

273 274 275 276
	hole_start = __drm_mm_hole_node_start(hole);
	hole_end = __drm_mm_hole_node_end(hole);
	if (hole_start > node->start || hole_end < end)
		return -ENOSPC;
277

278 279
	node->mm = mm;
	node->allocated = 1;
280

281
	list_add(&node->node_list, &hole->node_list);
282

283 284 285 286
	drm_mm_interval_tree_add_node(hole, node);

	if (node->start == hole_start) {
		hole->hole_follows = 0;
287
		list_del(&hole->hole_stack);
288 289 290 291 292 293
	}

	node->hole_follows = 0;
	if (end != hole_end) {
		list_add(&node->hole_stack, &mm->hole_stack);
		node->hole_follows = 1;
294 295
	}

296
	return 0;
297
}
298
EXPORT_SYMBOL(drm_mm_reserve_node);
299

300
/**
301 302 303 304 305 306
 * drm_mm_insert_node_generic - search for space and insert @node
 * @mm: drm_mm to allocate from
 * @node: preallocate node to insert
 * @size: size of the allocation
 * @alignment: alignment of the allocation
 * @color: opaque tag value to use for this node
307 308
 * @sflags: flags to fine-tune the allocation search
 * @aflags: flags to fine-tune the allocation behavior
309 310 311 312 313
 *
 * The preallocated node must be cleared to 0.
 *
 * Returns:
 * 0 on success, -ENOSPC if there's no suitable hole.
314
 */
315
int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
316
			       u64 size, unsigned alignment,
317
			       unsigned long color,
318 319
			       enum drm_mm_search_flags sflags,
			       enum drm_mm_allocator_flags aflags)
320 321 322
{
	struct drm_mm_node *hole_node;

323 324 325
	if (WARN_ON(size == 0))
		return -EINVAL;

326
	hole_node = drm_mm_search_free_generic(mm, size, alignment,
327
					       color, sflags);
328 329 330
	if (!hole_node)
		return -ENOSPC;

331
	drm_mm_insert_helper(hole_node, node, size, alignment, color, aflags);
332 333
	return 0;
}
334 335
EXPORT_SYMBOL(drm_mm_insert_node_generic);

336 337
static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
				       struct drm_mm_node *node,
338
				       u64 size, unsigned alignment,
339
				       unsigned long color,
340
				       u64 start, u64 end,
341
				       enum drm_mm_allocator_flags flags)
342
{
343
	struct drm_mm *mm = hole_node->mm;
344 345 346 347
	u64 hole_start = drm_mm_hole_node_start(hole_node);
	u64 hole_end = drm_mm_hole_node_end(hole_node);
	u64 adj_start = hole_start;
	u64 adj_end = hole_end;
348

349 350
	BUG_ON(!hole_node->hole_follows || node->allocated);

351 352
	if (adj_start < start)
		adj_start = start;
353 354 355 356 357
	if (adj_end > end)
		adj_end = end;

	if (mm->color_adjust)
		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
358

359 360 361
	if (flags & DRM_MM_CREATE_TOP)
		adj_start = adj_end - size;

362
	if (alignment) {
363 364 365 366 367
		u64 tmp = adj_start;
		unsigned rem;

		rem = do_div(tmp, alignment);
		if (rem) {
368
			if (flags & DRM_MM_CREATE_TOP)
369
				adj_start -= rem;
370
			else
371
				adj_start += alignment - rem;
372
		}
373
	}
374

375
	if (adj_start == hole_start) {
376
		hole_node->hole_follows = 0;
377
		list_del(&hole_node->hole_stack);
378 379
	}

380
	node->start = adj_start;
381 382
	node->size = size;
	node->mm = mm;
383
	node->color = color;
384
	node->allocated = 1;
385 386 387

	list_add(&node->node_list, &hole_node->node_list);

388 389
	drm_mm_interval_tree_add_node(hole_node, node);

390 391
	BUG_ON(node->start < start);
	BUG_ON(node->start < adj_start);
392
	BUG_ON(node->start + node->size > adj_end);
393 394
	BUG_ON(node->start + node->size > end);

395
	node->hole_follows = 0;
396
	if (__drm_mm_hole_node_start(node) < hole_end) {
397 398
		list_add(&node->hole_stack, &mm->hole_stack);
		node->hole_follows = 1;
399
	}
400 401
}

402
/**
403 404 405 406 407 408 409 410
 * drm_mm_insert_node_in_range_generic - ranged search for space and insert @node
 * @mm: drm_mm to allocate from
 * @node: preallocate node to insert
 * @size: size of the allocation
 * @alignment: alignment of the allocation
 * @color: opaque tag value to use for this node
 * @start: start of the allowed range for this node
 * @end: end of the allowed range for this node
411 412
 * @sflags: flags to fine-tune the allocation search
 * @aflags: flags to fine-tune the allocation behavior
413 414 415 416 417
 *
 * The preallocated node must be cleared to 0.
 *
 * Returns:
 * 0 on success, -ENOSPC if there's no suitable hole.
418
 */
419
int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
420
					u64 size, unsigned alignment,
421
					unsigned long color,
422
					u64 start, u64 end,
423 424
					enum drm_mm_search_flags sflags,
					enum drm_mm_allocator_flags aflags)
425
{
426 427
	struct drm_mm_node *hole_node;

428 429 430
	if (WARN_ON(size == 0))
		return -EINVAL;

431 432
	hole_node = drm_mm_search_free_in_range_generic(mm,
							size, alignment, color,
433
							start, end, sflags);
434 435 436
	if (!hole_node)
		return -ENOSPC;

437 438
	drm_mm_insert_helper_range(hole_node, node,
				   size, alignment, color,
439
				   start, end, aflags);
440 441
	return 0;
}
442 443
EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);

444
/**
445 446 447 448 449 450
 * drm_mm_remove_node - Remove a memory node from the allocator.
 * @node: drm_mm_node to remove
 *
 * This just removes a node from its drm_mm allocator. The node does not need to
 * be cleared again before it can be re-inserted into this or any other drm_mm
 * allocator. It is a bug to call this function on a un-allocated node.
451 452 453
 */
void drm_mm_remove_node(struct drm_mm_node *node)
{
454 455
	struct drm_mm *mm = node->mm;
	struct drm_mm_node *prev_node;
456

457 458 459
	if (WARN_ON(!node->allocated))
		return;

460 461
	BUG_ON(node->scanned_block || node->scanned_prev_free
				   || node->scanned_next_free);
462

463 464
	prev_node =
	    list_entry(node->node_list.prev, struct drm_mm_node, node_list);
465

466
	if (node->hole_follows) {
467 468
		BUG_ON(__drm_mm_hole_node_start(node) ==
		       __drm_mm_hole_node_end(node));
469 470
		list_del(&node->hole_stack);
	} else
471 472 473
		BUG_ON(__drm_mm_hole_node_start(node) !=
		       __drm_mm_hole_node_end(node));

474

475 476 477 478 479 480
	if (!prev_node->hole_follows) {
		prev_node->hole_follows = 1;
		list_add(&prev_node->hole_stack, &mm->hole_stack);
	} else
		list_move(&prev_node->hole_stack, &mm->hole_stack);

481
	drm_mm_interval_tree_remove(node, &mm->interval_tree);
482
	list_del(&node->node_list);
483 484 485 486
	node->allocated = 0;
}
EXPORT_SYMBOL(drm_mm_remove_node);

487
static int check_free_hole(u64 start, u64 end, u64 size, unsigned alignment)
488
{
489
	if (end - start < size)
490 491 492
		return 0;

	if (alignment) {
493 494 495 496
		u64 tmp = start;
		unsigned rem;

		rem = do_div(tmp, alignment);
497
		if (rem)
498
			start += alignment - rem;
499 500
	}

501
	return end >= start + size;
502 503
}

D
David Herrmann 已提交
504
static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
505
						      u64 size,
D
David Herrmann 已提交
506 507 508
						      unsigned alignment,
						      unsigned long color,
						      enum drm_mm_search_flags flags)
509
{
D
Dave Airlie 已提交
510 511
	struct drm_mm_node *entry;
	struct drm_mm_node *best;
512 513 514
	u64 adj_start;
	u64 adj_end;
	u64 best_size;
515

516 517
	BUG_ON(mm->scanned_blocks);

518 519 520
	best = NULL;
	best_size = ~0UL;

521 522
	__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
			       flags & DRM_MM_SEARCH_BELOW) {
523
		u64 hole_size = adj_end - adj_start;
524

525 526 527 528 529 530 531
		if (mm->color_adjust) {
			mm->color_adjust(entry, color, &adj_start, &adj_end);
			if (adj_end <= adj_start)
				continue;
		}

		if (!check_free_hole(adj_start, adj_end, size, alignment))
532 533
			continue;

534
		if (!(flags & DRM_MM_SEARCH_BEST))
535
			return entry;
536

537
		if (hole_size < best_size) {
538
			best = entry;
539
			best_size = hole_size;
540 541 542 543 544
		}
	}

	return best;
}
545

D
David Herrmann 已提交
546
static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
547
							u64 size,
548 549
							unsigned alignment,
							unsigned long color,
550 551
							u64 start,
							u64 end,
552
							enum drm_mm_search_flags flags)
553 554 555
{
	struct drm_mm_node *entry;
	struct drm_mm_node *best;
556 557 558
	u64 adj_start;
	u64 adj_end;
	u64 best_size;
559

560 561
	BUG_ON(mm->scanned_blocks);

562 563 564
	best = NULL;
	best_size = ~0UL;

565 566
	__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
			       flags & DRM_MM_SEARCH_BELOW) {
567
		u64 hole_size = adj_end - adj_start;
568

569 570 571 572
		if (adj_start < start)
			adj_start = start;
		if (adj_end > end)
			adj_end = end;
573 574 575 576 577 578 579

		if (mm->color_adjust) {
			mm->color_adjust(entry, color, &adj_start, &adj_end);
			if (adj_end <= adj_start)
				continue;
		}

580
		if (!check_free_hole(adj_start, adj_end, size, alignment))
581 582
			continue;

583
		if (!(flags & DRM_MM_SEARCH_BEST))
584
			return entry;
585

586
		if (hole_size < best_size) {
587
			best = entry;
588
			best_size = hole_size;
589 590 591 592 593 594
		}
	}

	return best;
}

595
/**
596 597 598 599 600 601 602
 * drm_mm_replace_node - move an allocation from @old to @new
 * @old: drm_mm_node to remove from the allocator
 * @new: drm_mm_node which should inherit @old's allocation
 *
 * This is useful for when drivers embed the drm_mm_node structure and hence
 * can't move allocations by reassigning pointers. It's a combination of remove
 * and insert with the guarantee that the allocation start will match.
603 604 605 606
 */
void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
{
	list_replace(&old->node_list, &new->node_list);
D
Daniel Vetter 已提交
607
	list_replace(&old->hole_stack, &new->hole_stack);
608
	rb_replace_node(&old->rb, &new->rb, &old->mm->interval_tree);
609 610 611 612
	new->hole_follows = old->hole_follows;
	new->mm = old->mm;
	new->start = old->start;
	new->size = old->size;
613
	new->color = old->color;
614
	new->__subtree_last = old->__subtree_last;
615 616 617 618 619 620

	old->allocated = 0;
	new->allocated = 1;
}
EXPORT_SYMBOL(drm_mm_replace_node);

621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648
/**
 * DOC: lru scan roaster
 *
 * Very often GPUs need to have continuous allocations for a given object. When
 * evicting objects to make space for a new one it is therefore not most
 * efficient when we simply start to select all objects from the tail of an LRU
 * until there's a suitable hole: Especially for big objects or nodes that
 * otherwise have special allocation constraints there's a good chance we evict
 * lots of (smaller) objects unecessarily.
 *
 * The DRM range allocator supports this use-case through the scanning
 * interfaces. First a scan operation needs to be initialized with
 * drm_mm_init_scan() or drm_mm_init_scan_with_range(). The the driver adds
 * objects to the roaster (probably by walking an LRU list, but this can be
 * freely implemented) until a suitable hole is found or there's no further
 * evitable object.
 *
 * The the driver must walk through all objects again in exactly the reverse
 * order to restore the allocator state. Note that while the allocator is used
 * in the scan mode no other operation is allowed.
 *
 * Finally the driver evicts all objects selected in the scan. Adding and
 * removing an object is O(1), and since freeing a node is also O(1) the overall
 * complexity is O(scanned_objects). So like the free stack which needs to be
 * walked before a scan operation even begins this is linear in the number of
 * objects. It doesn't seem to hurt badly.
 */

649
/**
650 651 652 653 654
 * drm_mm_init_scan - initialize lru scanning
 * @mm: drm_mm to scan
 * @size: size of the allocation
 * @alignment: alignment of the allocation
 * @color: opaque tag value to use for the allocation
655 656
 *
 * This simply sets up the scanning routines with the parameters for the desired
657 658
 * hole. Note that there's no need to specify allocation flags, since they only
 * change the place a node is allocated from within a suitable hole.
659
 *
660 661
 * Warning:
 * As long as the scan list is non-empty, no other operations than
662 663
 * adding/removing nodes to/from the scan list are allowed.
 */
664
void drm_mm_init_scan(struct drm_mm *mm,
665
		      u64 size,
666 667
		      unsigned alignment,
		      unsigned long color)
668
{
669
	mm->scan_color = color;
670 671 672 673
	mm->scan_alignment = alignment;
	mm->scan_size = size;
	mm->scanned_blocks = 0;
	mm->scan_hit_start = 0;
674
	mm->scan_hit_end = 0;
675
	mm->scan_check_range = 0;
676
	mm->prev_scanned_node = NULL;
677 678 679
}
EXPORT_SYMBOL(drm_mm_init_scan);

680
/**
681 682 683 684 685 686 687
 * drm_mm_init_scan - initialize range-restricted lru scanning
 * @mm: drm_mm to scan
 * @size: size of the allocation
 * @alignment: alignment of the allocation
 * @color: opaque tag value to use for the allocation
 * @start: start of the allowed range for the allocation
 * @end: end of the allowed range for the allocation
688 689
 *
 * This simply sets up the scanning routines with the parameters for the desired
690 691
 * hole. Note that there's no need to specify allocation flags, since they only
 * change the place a node is allocated from within a suitable hole.
692
 *
693 694
 * Warning:
 * As long as the scan list is non-empty, no other operations than
695 696
 * adding/removing nodes to/from the scan list are allowed.
 */
697
void drm_mm_init_scan_with_range(struct drm_mm *mm,
698
				 u64 size,
699
				 unsigned alignment,
700
				 unsigned long color,
701 702
				 u64 start,
				 u64 end)
703
{
704
	mm->scan_color = color;
705 706 707 708
	mm->scan_alignment = alignment;
	mm->scan_size = size;
	mm->scanned_blocks = 0;
	mm->scan_hit_start = 0;
709
	mm->scan_hit_end = 0;
710 711 712
	mm->scan_start = start;
	mm->scan_end = end;
	mm->scan_check_range = 1;
713
	mm->prev_scanned_node = NULL;
714 715 716
}
EXPORT_SYMBOL(drm_mm_init_scan_with_range);

717
/**
718 719 720
 * drm_mm_scan_add_block - add a node to the scan list
 * @node: drm_mm_node to add
 *
721 722 723
 * Add a node to the scan list that might be freed to make space for the desired
 * hole.
 *
724 725
 * Returns:
 * True if a hole has been found, false otherwise.
726
 */
727
bool drm_mm_scan_add_block(struct drm_mm_node *node)
728 729
{
	struct drm_mm *mm = node->mm;
730
	struct drm_mm_node *prev_node;
731 732
	u64 hole_start, hole_end;
	u64 adj_start, adj_end;
733 734 735

	mm->scanned_blocks++;

736
	BUG_ON(node->scanned_block);
737 738
	node->scanned_block = 1;

739 740
	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
			       node_list);
741

742 743 744 745
	node->scanned_preceeds_hole = prev_node->hole_follows;
	prev_node->hole_follows = 1;
	list_del(&node->node_list);
	node->node_list.prev = &prev_node->node_list;
746 747
	node->node_list.next = &mm->prev_scanned_node->node_list;
	mm->prev_scanned_node = node;
748

749 750
	adj_start = hole_start = drm_mm_hole_node_start(prev_node);
	adj_end = hole_end = drm_mm_hole_node_end(prev_node);
751

752
	if (mm->scan_check_range) {
753 754 755 756
		if (adj_start < mm->scan_start)
			adj_start = mm->scan_start;
		if (adj_end > mm->scan_end)
			adj_end = mm->scan_end;
757 758
	}

759 760 761 762
	if (mm->color_adjust)
		mm->color_adjust(prev_node, mm->scan_color,
				 &adj_start, &adj_end);

763
	if (check_free_hole(adj_start, adj_end,
764
			    mm->scan_size, mm->scan_alignment)) {
765
		mm->scan_hit_start = hole_start;
766
		mm->scan_hit_end = hole_end;
767
		return true;
768 769
	}

770
	return false;
771 772 773 774
}
EXPORT_SYMBOL(drm_mm_scan_add_block);

/**
775 776
 * drm_mm_scan_remove_block - remove a node from the scan list
 * @node: drm_mm_node to remove
777 778 779 780 781 782
 *
 * Nodes _must_ be removed in the exact same order from the scan list as they
 * have been added, otherwise the internal state of the memory manager will be
 * corrupted.
 *
 * When the scan list is empty, the selected memory nodes can be freed. An
783 784
 * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then
 * return the just freed block (because its at the top of the free_stack list).
785
 *
786 787 788
 * Returns:
 * True if this block should be evicted, false otherwise. Will always
 * return false when no hole has been found.
789
 */
790
bool drm_mm_scan_remove_block(struct drm_mm_node *node)
791 792
{
	struct drm_mm *mm = node->mm;
793
	struct drm_mm_node *prev_node;
794 795 796 797 798 799

	mm->scanned_blocks--;

	BUG_ON(!node->scanned_block);
	node->scanned_block = 0;

800 801
	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
			       node_list);
802

803 804
	prev_node->hole_follows = node->scanned_preceeds_hole;
	list_add(&node->node_list, &prev_node->node_list);
805

806 807
	 return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
		 node->start < mm->scan_hit_end);
808 809 810
}
EXPORT_SYMBOL(drm_mm_scan_remove_block);

811 812 813 814 815 816 817 818 819
/**
 * drm_mm_clean - checks whether an allocator is clean
 * @mm: drm_mm allocator to check
 *
 * Returns:
 * True if the allocator is completely free, false if there's still a node
 * allocated in it.
 */
bool drm_mm_clean(struct drm_mm * mm)
820
{
821
	struct list_head *head = &mm->head_node.node_list;
822

823 824
	return (head->next->next == head);
}
825
EXPORT_SYMBOL(drm_mm_clean);
826

827 828 829 830 831 832 833 834
/**
 * drm_mm_init - initialize a drm-mm allocator
 * @mm: the drm_mm structure to initialize
 * @start: start of the range managed by @mm
 * @size: end of the range managed by @mm
 *
 * Note that @mm must be cleared to 0 before calling this function.
 */
835
void drm_mm_init(struct drm_mm * mm, u64 start, u64 size)
836
{
837
	INIT_LIST_HEAD(&mm->hole_stack);
838
	mm->scanned_blocks = 0;
839

840 841 842 843 844 845 846 847 848 849 850
	/* Clever trick to avoid a special case in the free hole tracking. */
	INIT_LIST_HEAD(&mm->head_node.node_list);
	mm->head_node.hole_follows = 1;
	mm->head_node.scanned_block = 0;
	mm->head_node.scanned_prev_free = 0;
	mm->head_node.scanned_next_free = 0;
	mm->head_node.mm = mm;
	mm->head_node.start = start + size;
	mm->head_node.size = start - mm->head_node.start;
	list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);

851 852
	mm->interval_tree = RB_ROOT;

853
	mm->color_adjust = NULL;
854
}
855
EXPORT_SYMBOL(drm_mm_init);
856

857 858 859 860 861 862 863
/**
 * drm_mm_takedown - clean up a drm_mm allocator
 * @mm: drm_mm allocator to clean up
 *
 * Note that it is a bug to call this function on an allocator which is not
 * clean.
 */
D
Dave Airlie 已提交
864
void drm_mm_takedown(struct drm_mm * mm)
865
{
D
David Herrmann 已提交
866 867
	WARN(!list_empty(&mm->head_node.node_list),
	     "Memory manager not clean during takedown.\n");
868
}
D
Dave Airlie 已提交
869
EXPORT_SYMBOL(drm_mm_takedown);
870

871 872
static u64 drm_mm_debug_hole(struct drm_mm_node *entry,
				     const char *prefix)
873
{
874
	u64 hole_start, hole_end, hole_size;
875

D
Daniel Vetter 已提交
876 877 878 879
	if (entry->hole_follows) {
		hole_start = drm_mm_hole_node_start(entry);
		hole_end = drm_mm_hole_node_end(entry);
		hole_size = hole_end - hole_start;
880 881
		pr_debug("%s %#llx-%#llx: %llu: free\n", prefix, hole_start,
			 hole_end, hole_size);
D
Daniel Vetter 已提交
882 883 884 885 886 887
		return hole_size;
	}

	return 0;
}

888 889 890 891 892
/**
 * drm_mm_debug_table - dump allocator state to dmesg
 * @mm: drm_mm allocator to dump
 * @prefix: prefix to use for dumping to dmesg
 */
D
Daniel Vetter 已提交
893 894 895
void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
{
	struct drm_mm_node *entry;
896
	u64 total_used = 0, total_free = 0, total = 0;
D
Daniel Vetter 已提交
897 898

	total_free += drm_mm_debug_hole(&mm->head_node, prefix);
899 900

	drm_mm_for_each_node(entry, mm) {
901 902
		pr_debug("%s %#llx-%#llx: %llu: used\n", prefix, entry->start,
			 entry->start + entry->size, entry->size);
903
		total_used += entry->size;
D
Daniel Vetter 已提交
904
		total_free += drm_mm_debug_hole(entry, prefix);
905
	}
906 907
	total = total_free + total_used;

908 909
	pr_debug("%s total: %llu, used %llu free %llu\n", prefix, total,
		 total_used, total_free);
910 911 912
}
EXPORT_SYMBOL(drm_mm_debug_table);

913
#if defined(CONFIG_DEBUG_FS)
914
static u64 drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
915
{
916
	u64 hole_start, hole_end, hole_size;
917

D
Daniel Vetter 已提交
918 919 920 921
	if (entry->hole_follows) {
		hole_start = drm_mm_hole_node_start(entry);
		hole_end = drm_mm_hole_node_end(entry);
		hole_size = hole_end - hole_start;
922
		seq_printf(m, "%#018llx-%#018llx: %llu: free\n", hole_start,
923
			   hole_end, hole_size);
D
Daniel Vetter 已提交
924 925 926 927 928 929
		return hole_size;
	}

	return 0;
}

930 931 932 933 934
/**
 * drm_mm_dump_table - dump allocator state to a seq_file
 * @m: seq_file to dump to
 * @mm: drm_mm allocator to dump
 */
D
Daniel Vetter 已提交
935 936 937
int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
{
	struct drm_mm_node *entry;
938
	u64 total_used = 0, total_free = 0, total = 0;
D
Daniel Vetter 已提交
939 940

	total_free += drm_mm_dump_hole(m, &mm->head_node);
941 942

	drm_mm_for_each_node(entry, mm) {
943
		seq_printf(m, "%#018llx-%#018llx: %llu: used\n", entry->start,
944
			   entry->start + entry->size, entry->size);
945
		total_used += entry->size;
D
Daniel Vetter 已提交
946
		total_free += drm_mm_dump_hole(m, entry);
947
	}
948 949
	total = total_free + total_used;

950 951
	seq_printf(m, "total: %llu, used %llu free %llu\n", total,
		   total_used, total_free);
952 953 954 955
	return 0;
}
EXPORT_SYMBOL(drm_mm_dump_table);
#endif