drm_mm.c 27.2 KB
Newer Older
1 2 3
/**************************************************************************
 *
 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4
 * Copyright 2016 Intel Corporation
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 *
 **************************************************************************/

/*
 * Generic simple memory manager implementation. Intended to be used as a base
 * class implementation for more advanced memory managers.
 *
 * Note that the algorithm used is quite simple and there might be substantial
35 36 37
 * performance gains if a smarter free list is implemented. Currently it is
 * just an unordered stack of free regions. This could easily be improved if
 * an RB-tree is used instead. At least if we expect heavy fragmentation.
38 39 40 41
 *
 * Aligned allocations can also see improvement.
 *
 * Authors:
42
 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
43 44
 */

45 46
#include <drm/drmP.h>
#include <drm/drm_mm.h>
47
#include <linux/slab.h>
48
#include <linux/seq_file.h>
49
#include <linux/export.h>
50
#include <linux/interval_tree_generic.h>
51

52 53 54 55 56 57 58 59 60 61
/**
 * DOC: Overview
 *
 * drm_mm provides a simple range allocator. The drivers are free to use the
 * resource allocator from the linux core if it suits them, the upside of drm_mm
 * is that it's in the DRM core. Which means that it's easier to extend for
 * some of the crazier special purpose needs of gpus.
 *
 * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node.
 * Drivers are free to embed either of them into their own suitable
D
Daniel Vetter 已提交
62 63
 * datastructures. drm_mm itself will not do any memory allocations of its own,
 * so if drivers choose not to embed nodes they need to still allocate them
64 65 66 67 68 69 70
 * themselves.
 *
 * The range allocator also supports reservation of preallocated blocks. This is
 * useful for taking over initial mode setting configurations from the firmware,
 * where an object needs to be created which exactly matches the firmware's
 * scanout target. As long as the range is still free it can be inserted anytime
 * after the allocator is initialized, which helps with avoiding looped
71
 * dependencies in the driver load sequence.
72 73 74 75 76 77 78 79 80
 *
 * drm_mm maintains a stack of most recently freed holes, which of all
 * simplistic datastructures seems to be a fairly decent approach to clustering
 * allocations and avoiding too much fragmentation. This means free space
 * searches are O(num_holes). Given that all the fancy features drm_mm supports
 * something better would be fairly complex and since gfx thrashing is a fairly
 * steep cliff not a real concern. Removing a node again is O(1).
 *
 * drm_mm supports a few features: Alignment and range restrictions can be
D
Daniel Vetter 已提交
81
 * supplied. Furthermore every &drm_mm_node has a color value (which is just an
82
 * opaque unsigned long) which in conjunction with a driver callback can be used
83 84 85 86
 * to implement sophisticated placement restrictions. The i915 DRM driver uses
 * this to implement guard pages between incompatible caching domains in the
 * graphics TT.
 *
87 88 89
 * Two behaviors are supported for searching and allocating: bottom-up and
 * top-down. The default is bottom-up. Top-down allocation can be used if the
 * memory area has different restrictions, or just to reduce fragmentation.
90
 *
91 92
 * Finally iteration helpers to walk all nodes and all holes are provided as are
 * some basic allocator dumpers for debugging.
D
Daniel Vetter 已提交
93 94 95 96 97
 *
 * Note that this range allocator is not thread-safe, drivers need to protect
 * modifications with their on locking. The idea behind this is that for a full
 * memory manager additional data needs to be protected anyway, hence internal
 * locking would be fully redundant.
98 99
 */

100
#ifdef CONFIG_DRM_DEBUG_MM
101 102
#include <linux/stackdepot.h>

103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133
#define STACKDEPTH 32
#define BUFSZ 4096

static noinline void save_stack(struct drm_mm_node *node)
{
	unsigned long entries[STACKDEPTH];
	struct stack_trace trace = {
		.entries = entries,
		.max_entries = STACKDEPTH,
		.skip = 1
	};

	save_stack_trace(&trace);
	if (trace.nr_entries != 0 &&
	    trace.entries[trace.nr_entries-1] == ULONG_MAX)
		trace.nr_entries--;

	/* May be called under spinlock, so avoid sleeping */
	node->stack = depot_save_stack(&trace, GFP_NOWAIT);
}

static void show_leaks(struct drm_mm *mm)
{
	struct drm_mm_node *node;
	unsigned long entries[STACKDEPTH];
	char *buf;

	buf = kmalloc(BUFSZ, GFP_KERNEL);
	if (!buf)
		return;

134
	list_for_each_entry(node, drm_mm_nodes(mm), node_list) {
135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161
		struct stack_trace trace = {
			.entries = entries,
			.max_entries = STACKDEPTH
		};

		if (!node->stack) {
			DRM_ERROR("node [%08llx + %08llx]: unknown owner\n",
				  node->start, node->size);
			continue;
		}

		depot_fetch_stack(node->stack, &trace);
		snprint_stack_trace(buf, BUFSZ, &trace, 0);
		DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s",
			  node->start, node->size, buf);
	}

	kfree(buf);
}

#undef STACKDEPTH
#undef BUFSZ
#else
static void save_stack(struct drm_mm_node *node) { }
static void show_leaks(struct drm_mm *mm) { }
#endif

162 163 164 165 166 167 168 169
#define START(node) ((node)->start)
#define LAST(node)  ((node)->start + (node)->size - 1)

INTERVAL_TREE_DEFINE(struct drm_mm_node, rb,
		     u64, __subtree_last,
		     START, LAST, static inline, drm_mm_interval_tree)

struct drm_mm_node *
C
Chris Wilson 已提交
170
__drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last)
171
{
172
	return drm_mm_interval_tree_iter_first((struct rb_root_cached *)&mm->interval_tree,
173
					       start, last) ?: (struct drm_mm_node *)&mm->head_node;
174
}
175
EXPORT_SYMBOL(__drm_mm_interval_first);
176 177 178 179 180 181 182

static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
					  struct drm_mm_node *node)
{
	struct drm_mm *mm = hole_node->mm;
	struct rb_node **link, *rb;
	struct drm_mm_node *parent;
183
	bool leftmost = true;
184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199

	node->__subtree_last = LAST(node);

	if (hole_node->allocated) {
		rb = &hole_node->rb;
		while (rb) {
			parent = rb_entry(rb, struct drm_mm_node, rb);
			if (parent->__subtree_last >= node->__subtree_last)
				break;

			parent->__subtree_last = node->__subtree_last;
			rb = rb_parent(rb);
		}

		rb = &hole_node->rb;
		link = &hole_node->rb.rb_right;
200
		leftmost = false;
201 202
	} else {
		rb = NULL;
203
		link = &mm->interval_tree.rb_root.rb_node;
204 205 206 207 208 209 210 211 212
	}

	while (*link) {
		rb = *link;
		parent = rb_entry(rb, struct drm_mm_node, rb);
		if (parent->__subtree_last < node->__subtree_last)
			parent->__subtree_last = node->__subtree_last;
		if (node->start < parent->start)
			link = &parent->rb.rb_left;
213
		else {
214
			link = &parent->rb.rb_right;
215 216
			leftmost = true;
		}
217 218 219
	}

	rb_link_node(&node->rb, rb, link);
220 221
	rb_insert_augmented_cached(&node->rb, &mm->interval_tree, leftmost,
				   &drm_mm_interval_tree_augment);
222 223
}

224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241
#define RB_INSERT(root, member, expr) do { \
	struct rb_node **link = &root.rb_node, *rb = NULL; \
	u64 x = expr(node); \
	while (*link) { \
		rb = *link; \
		if (x < expr(rb_entry(rb, struct drm_mm_node, member))) \
			link = &rb->rb_left; \
		else \
			link = &rb->rb_right; \
	} \
	rb_link_node(&node->member, rb, link); \
	rb_insert_color(&node->member, &root); \
} while (0)

#define HOLE_SIZE(NODE) ((NODE)->hole_size)
#define HOLE_ADDR(NODE) (__drm_mm_hole_node_start(NODE))

static void add_hole(struct drm_mm_node *node)
242
{
243
	struct drm_mm *mm = node->mm;
244

245 246 247
	node->hole_size =
		__drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node);
	DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
248

249 250
	RB_INSERT(mm->holes_size, rb_hole_size, HOLE_SIZE);
	RB_INSERT(mm->holes_addr, rb_hole_addr, HOLE_ADDR);
251

252 253
	list_add(&node->hole_stack, &mm->hole_stack);
}
254

255 256 257
static void rm_hole(struct drm_mm_node *node)
{
	DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
258

259 260 261 262
	list_del(&node->hole_stack);
	rb_erase(&node->rb_hole_size, &node->mm->holes_size);
	rb_erase(&node->rb_hole_addr, &node->mm->holes_addr);
	node->hole_size = 0;
263

264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294
	DRM_MM_BUG_ON(drm_mm_hole_follows(node));
}

static inline struct drm_mm_node *rb_hole_size_to_node(struct rb_node *rb)
{
	return rb_entry_safe(rb, struct drm_mm_node, rb_hole_size);
}

static inline struct drm_mm_node *rb_hole_addr_to_node(struct rb_node *rb)
{
	return rb_entry_safe(rb, struct drm_mm_node, rb_hole_addr);
}

static inline u64 rb_hole_size(struct rb_node *rb)
{
	return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size;
}

static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size)
{
	struct rb_node *best = NULL;
	struct rb_node **link = &mm->holes_size.rb_node;

	while (*link) {
		struct rb_node *rb = *link;

		if (size <= rb_hole_size(rb)) {
			link = &rb->rb_left;
			best = rb;
		} else {
			link = &rb->rb_right;
295
		}
296 297
	}

298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317
	return rb_hole_size_to_node(best);
}

static struct drm_mm_node *find_hole(struct drm_mm *mm, u64 addr)
{
	struct drm_mm_node *node = NULL;
	struct rb_node **link = &mm->holes_addr.rb_node;

	while (*link) {
		u64 hole_start;

		node = rb_hole_addr_to_node(*link);
		hole_start = __drm_mm_hole_node_start(node);

		if (addr < hole_start)
			link = &node->rb_hole_addr.rb_left;
		else if (addr > hole_start + node->hole_size)
			link = &node->rb_hole_addr.rb_right;
		else
			break;
318
	}
319

320 321
	return node;
}
322

323 324 325 326 327 328 329
static struct drm_mm_node *
first_hole(struct drm_mm *mm,
	   u64 start, u64 end, u64 size,
	   enum drm_mm_insert_mode mode)
{
	if (RB_EMPTY_ROOT(&mm->holes_size))
		return NULL;
330

331 332 333 334
	switch (mode) {
	default:
	case DRM_MM_INSERT_BEST:
		return best_hole(mm, size);
335

336 337
	case DRM_MM_INSERT_LOW:
		return find_hole(mm, start);
338

339 340 341 342 343 344 345
	case DRM_MM_INSERT_HIGH:
		return find_hole(mm, end);

	case DRM_MM_INSERT_EVICT:
		return list_first_entry_or_null(&mm->hole_stack,
						struct drm_mm_node,
						hole_stack);
346
	}
347
}
348

349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368
static struct drm_mm_node *
next_hole(struct drm_mm *mm,
	  struct drm_mm_node *node,
	  enum drm_mm_insert_mode mode)
{
	switch (mode) {
	default:
	case DRM_MM_INSERT_BEST:
		return rb_hole_size_to_node(rb_next(&node->rb_hole_size));

	case DRM_MM_INSERT_LOW:
		return rb_hole_addr_to_node(rb_next(&node->rb_hole_addr));

	case DRM_MM_INSERT_HIGH:
		return rb_hole_addr_to_node(rb_prev(&node->rb_hole_addr));

	case DRM_MM_INSERT_EVICT:
		node = list_next_entry(node, hole_stack);
		return &node->hole_stack == &mm->hole_stack ? NULL : node;
	}
369 370
}

371 372 373 374 375
/**
 * drm_mm_reserve_node - insert an pre-initialized node
 * @mm: drm_mm allocator to insert @node into
 * @node: drm_mm_node to insert
 *
D
Daniel Vetter 已提交
376 377 378 379 380
 * This functions inserts an already set-up &drm_mm_node into the allocator,
 * meaning that start, size and color must be set by the caller. All other
 * fields must be cleared to 0. This is useful to initialize the allocator with
 * preallocated objects which must be set-up before the range allocator can be
 * set-up, e.g. when taking over a firmware framebuffer.
381 382 383 384
 *
 * Returns:
 * 0 on success, -ENOSPC if there's no hole where @node is.
 */
385
int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
386
{
387
	u64 end = node->start + node->size;
388
	struct drm_mm_node *hole;
389
	u64 hole_start, hole_end;
390
	u64 adj_start, adj_end;
391

392
	end = node->start + node->size;
393 394
	if (unlikely(end <= node->start))
		return -ENOSPC;
395

396
	/* Find the relevant hole to add our node to */
397 398
	hole = find_hole(mm, node->start);
	if (!hole)
399
		return -ENOSPC;
400

401
	adj_start = hole_start = __drm_mm_hole_node_start(hole);
402
	adj_end = hole_end = hole_start + hole->hole_size;
403 404 405 406 407

	if (mm->color_adjust)
		mm->color_adjust(hole, node->color, &adj_start, &adj_end);

	if (adj_start > node->start || adj_end < end)
408
		return -ENOSPC;
409

410
	node->mm = mm;
411

412 413
	list_add(&node->node_list, &hole->node_list);
	drm_mm_interval_tree_add_node(hole, node);
414 415
	node->allocated = true;
	node->hole_size = 0;
416

417 418 419 420 421
	rm_hole(hole);
	if (node->start > hole_start)
		add_hole(hole);
	if (end < hole_end)
		add_hole(node);
422

423
	save_stack(node);
424
	return 0;
425
}
426
EXPORT_SYMBOL(drm_mm_reserve_node);
427

428
/**
429
 * drm_mm_insert_node_in_range - ranged search for space and insert @node
430 431 432 433 434
 * @mm: drm_mm to allocate from
 * @node: preallocate node to insert
 * @size: size of the allocation
 * @alignment: alignment of the allocation
 * @color: opaque tag value to use for this node
435 436 437
 * @range_start: start of the allowed range for this node
 * @range_end: end of the allowed range for this node
 * @mode: fine-tune the allocation search and placement
438
 *
D
Daniel Vetter 已提交
439
 * The preallocated @node must be cleared to 0.
440 441 442
 *
 * Returns:
 * 0 on success, -ENOSPC if there's no suitable hole.
443
 */
444 445 446 447 448 449
int drm_mm_insert_node_in_range(struct drm_mm * const mm,
				struct drm_mm_node * const node,
				u64 size, u64 alignment,
				unsigned long color,
				u64 range_start, u64 range_end,
				enum drm_mm_insert_mode mode)
450
{
451 452
	struct drm_mm_node *hole;
	u64 remainder_mask;
453

454
	DRM_MM_BUG_ON(range_start >= range_end);
455

456
	if (unlikely(size == 0 || range_end - range_start < size))
457 458
		return -ENOSPC;

459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532
	if (alignment <= 1)
		alignment = 0;

	remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
	for (hole = first_hole(mm, range_start, range_end, size, mode); hole;
	     hole = next_hole(mm, hole, mode)) {
		u64 hole_start = __drm_mm_hole_node_start(hole);
		u64 hole_end = hole_start + hole->hole_size;
		u64 adj_start, adj_end;
		u64 col_start, col_end;

		if (mode == DRM_MM_INSERT_LOW && hole_start >= range_end)
			break;

		if (mode == DRM_MM_INSERT_HIGH && hole_end <= range_start)
			break;

		col_start = hole_start;
		col_end = hole_end;
		if (mm->color_adjust)
			mm->color_adjust(hole, color, &col_start, &col_end);

		adj_start = max(col_start, range_start);
		adj_end = min(col_end, range_end);

		if (adj_end <= adj_start || adj_end - adj_start < size)
			continue;

		if (mode == DRM_MM_INSERT_HIGH)
			adj_start = adj_end - size;

		if (alignment) {
			u64 rem;

			if (likely(remainder_mask))
				rem = adj_start & remainder_mask;
			else
				div64_u64_rem(adj_start, alignment, &rem);
			if (rem) {
				adj_start -= rem;
				if (mode != DRM_MM_INSERT_HIGH)
					adj_start += alignment;

				if (adj_start < max(col_start, range_start) ||
				    min(col_end, range_end) - adj_start < size)
					continue;

				if (adj_end <= adj_start ||
				    adj_end - adj_start < size)
					continue;
			}
		}

		node->mm = mm;
		node->size = size;
		node->start = adj_start;
		node->color = color;
		node->hole_size = 0;

		list_add(&node->node_list, &hole->node_list);
		drm_mm_interval_tree_add_node(hole, node);
		node->allocated = true;

		rm_hole(hole);
		if (adj_start > hole_start)
			add_hole(hole);
		if (adj_start + size < hole_end)
			add_hole(node);

		save_stack(node);
		return 0;
	}

	return -ENOSPC;
533
}
534
EXPORT_SYMBOL(drm_mm_insert_node_in_range);
535

536
/**
537 538 539 540 541
 * drm_mm_remove_node - Remove a memory node from the allocator.
 * @node: drm_mm_node to remove
 *
 * This just removes a node from its drm_mm allocator. The node does not need to
 * be cleared again before it can be re-inserted into this or any other drm_mm
542
 * allocator. It is a bug to call this function on a unallocated node.
543 544 545
 */
void drm_mm_remove_node(struct drm_mm_node *node)
{
546 547
	struct drm_mm *mm = node->mm;
	struct drm_mm_node *prev_node;
548

549
	DRM_MM_BUG_ON(!node->allocated);
550
	DRM_MM_BUG_ON(node->scanned_block);
551

552
	prev_node = list_prev_entry(node, node_list);
553

554 555
	if (drm_mm_hole_follows(node))
		rm_hole(node);
556

557
	drm_mm_interval_tree_remove(node, &mm->interval_tree);
558
	list_del(&node->node_list);
559
	node->allocated = false;
560

561 562 563
	if (drm_mm_hole_follows(prev_node))
		rm_hole(prev_node);
	add_hole(prev_node);
564
}
565
EXPORT_SYMBOL(drm_mm_remove_node);
566

567
/**
568 569 570 571 572 573 574
 * drm_mm_replace_node - move an allocation from @old to @new
 * @old: drm_mm_node to remove from the allocator
 * @new: drm_mm_node which should inherit @old's allocation
 *
 * This is useful for when drivers embed the drm_mm_node structure and hence
 * can't move allocations by reassigning pointers. It's a combination of remove
 * and insert with the guarantee that the allocation start will match.
575 576 577
 */
void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
{
578 579
	DRM_MM_BUG_ON(!old->allocated);

580 581
	*new = *old;

582
	list_replace(&old->node_list, &new->node_list);
583
	rb_replace_node(&old->rb, &new->rb, &old->mm->interval_tree.rb_root);
584 585 586 587 588 589 590 591 592 593 594 595 596

	if (drm_mm_hole_follows(old)) {
		list_replace(&old->hole_stack, &new->hole_stack);
		rb_replace_node(&old->rb_hole_size,
				&new->rb_hole_size,
				&old->mm->holes_size);
		rb_replace_node(&old->rb_hole_addr,
				&new->rb_hole_addr,
				&old->mm->holes_addr);
	}

	old->allocated = false;
	new->allocated = true;
597 598 599
}
EXPORT_SYMBOL(drm_mm_replace_node);

600
/**
D
Daniel Vetter 已提交
601
 * DOC: lru scan roster
602 603 604 605 606 607
 *
 * Very often GPUs need to have continuous allocations for a given object. When
 * evicting objects to make space for a new one it is therefore not most
 * efficient when we simply start to select all objects from the tail of an LRU
 * until there's a suitable hole: Especially for big objects or nodes that
 * otherwise have special allocation constraints there's a good chance we evict
608
 * lots of (smaller) objects unnecessarily.
609 610 611
 *
 * The DRM range allocator supports this use-case through the scanning
 * interfaces. First a scan operation needs to be initialized with
612
 * drm_mm_scan_init() or drm_mm_scan_init_with_range(). The driver adds
D
Daniel Vetter 已提交
613 614 615
 * objects to the roster, probably by walking an LRU list, but this can be
 * freely implemented. Eviction candiates are added using
 * drm_mm_scan_add_block() until a suitable hole is found or there are no
616 617
 * further evictable objects. Eviction roster metadata is tracked in &struct
 * drm_mm_scan.
618
 *
619
 * The driver must walk through all objects again in exactly the reverse
620 621 622
 * order to restore the allocator state. Note that while the allocator is used
 * in the scan mode no other operation is allowed.
 *
623 624
 * Finally the driver evicts all objects selected (drm_mm_scan_remove_block()
 * reported true) in the scan, and any overlapping nodes after color adjustment
D
Daniel Vetter 已提交
625
 * (drm_mm_scan_color_evict()). Adding and removing an object is O(1), and
626 627 628 629
 * since freeing a node is also O(1) the overall complexity is
 * O(scanned_objects). So like the free stack which needs to be walked before a
 * scan operation even begins this is linear in the number of objects. It
 * doesn't seem to hurt too badly.
630 631
 */

632
/**
633 634
 * drm_mm_scan_init_with_range - initialize range-restricted lru scanning
 * @scan: scan state
635 636 637 638 639 640
 * @mm: drm_mm to scan
 * @size: size of the allocation
 * @alignment: alignment of the allocation
 * @color: opaque tag value to use for the allocation
 * @start: start of the allowed range for the allocation
 * @end: end of the allowed range for the allocation
641
 * @mode: fine-tune the allocation search and placement
642 643
 *
 * This simply sets up the scanning routines with the parameters for the desired
644
 * hole.
645
 *
646 647
 * Warning:
 * As long as the scan list is non-empty, no other operations than
648 649
 * adding/removing nodes to/from the scan list are allowed.
 */
650 651
void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
				 struct drm_mm *mm,
652
				 u64 size,
653
				 u64 alignment,
654
				 unsigned long color,
655
				 u64 start,
656
				 u64 end,
657
				 enum drm_mm_insert_mode mode)
658
{
659 660
	DRM_MM_BUG_ON(start >= end);
	DRM_MM_BUG_ON(!size || size > end - start);
661 662 663 664
	DRM_MM_BUG_ON(mm->scan_active);

	scan->mm = mm;

665 666 667
	if (alignment <= 1)
		alignment = 0;

668 669
	scan->color = color;
	scan->alignment = alignment;
670
	scan->remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
671
	scan->size = size;
672
	scan->mode = mode;
673 674 675 676

	DRM_MM_BUG_ON(end <= start);
	scan->range_start = start;
	scan->range_end = end;
677

678 679
	scan->hit_start = U64_MAX;
	scan->hit_end = 0;
680
}
681
EXPORT_SYMBOL(drm_mm_scan_init_with_range);
682

683
/**
684
 * drm_mm_scan_add_block - add a node to the scan list
685
 * @scan: the active drm_mm scanner
686 687
 * @node: drm_mm_node to add
 *
688 689 690
 * Add a node to the scan list that might be freed to make space for the desired
 * hole.
 *
691 692
 * Returns:
 * True if a hole has been found, false otherwise.
693
 */
694 695
bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
			   struct drm_mm_node *node)
696
{
697
	struct drm_mm *mm = scan->mm;
698
	struct drm_mm_node *hole;
699
	u64 hole_start, hole_end;
700
	u64 col_start, col_end;
701
	u64 adj_start, adj_end;
702

703 704
	DRM_MM_BUG_ON(node->mm != mm);
	DRM_MM_BUG_ON(!node->allocated);
705
	DRM_MM_BUG_ON(node->scanned_block);
706
	node->scanned_block = true;
707
	mm->scan_active++;
708

709 710 711 712 713
	/* Remove this block from the node_list so that we enlarge the hole
	 * (distance between the end of our previous node and the start of
	 * or next), without poisoning the link so that we can restore it
	 * later in drm_mm_scan_remove_block().
	 */
714
	hole = list_prev_entry(node, node_list);
715 716
	DRM_MM_BUG_ON(list_next_entry(hole, node_list) != node);
	__list_del_entry(&node->node_list);
717

718 719
	hole_start = __drm_mm_hole_node_start(hole);
	hole_end = __drm_mm_hole_node_end(hole);
720

721 722
	col_start = hole_start;
	col_end = hole_end;
723
	if (mm->color_adjust)
724 725 726 727
		mm->color_adjust(hole, scan->color, &col_start, &col_end);

	adj_start = max(col_start, scan->range_start);
	adj_end = min(col_end, scan->range_end);
728 729 730
	if (adj_end <= adj_start || adj_end - adj_start < scan->size)
		return false;

731
	if (scan->mode == DRM_MM_INSERT_HIGH)
732 733 734 735 736
		adj_start = adj_end - scan->size;

	if (scan->alignment) {
		u64 rem;

737 738 739 740
		if (likely(scan->remainder_mask))
			rem = adj_start & scan->remainder_mask;
		else
			div64_u64_rem(adj_start, scan->alignment, &rem);
741 742
		if (rem) {
			adj_start -= rem;
743
			if (scan->mode != DRM_MM_INSERT_HIGH)
744 745 746 747 748 749 750 751 752 753
				adj_start += scan->alignment;
			if (adj_start < max(col_start, scan->range_start) ||
			    min(col_end, scan->range_end) - adj_start < scan->size)
				return false;

			if (adj_end <= adj_start ||
			    adj_end - adj_start < scan->size)
				return false;
		}
	}
754

755 756
	scan->hit_start = adj_start;
	scan->hit_end = adj_start + scan->size;
757

758 759 760 761 762
	DRM_MM_BUG_ON(scan->hit_start >= scan->hit_end);
	DRM_MM_BUG_ON(scan->hit_start < hole_start);
	DRM_MM_BUG_ON(scan->hit_end > hole_end);

	return true;
763 764 765 766
}
EXPORT_SYMBOL(drm_mm_scan_add_block);

/**
767
 * drm_mm_scan_remove_block - remove a node from the scan list
768
 * @scan: the active drm_mm scanner
769
 * @node: drm_mm_node to remove
770
 *
D
Daniel Vetter 已提交
771 772 773
 * Nodes **must** be removed in exactly the reverse order from the scan list as
 * they have been added (e.g. using list_add() as they are added and then
 * list_for_each() over that eviction list to remove), otherwise the internal
774
 * state of the memory manager will be corrupted.
775 776
 *
 * When the scan list is empty, the selected memory nodes can be freed. An
D
Daniel Vetter 已提交
777 778 779
 * immediately following drm_mm_insert_node_in_range_generic() or one of the
 * simpler versions of that function with !DRM_MM_SEARCH_BEST will then return
 * the just freed block (because its at the top of the free_stack list).
780
 *
781 782 783
 * Returns:
 * True if this block should be evicted, false otherwise. Will always
 * return false when no hole has been found.
784
 */
785 786
bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
			      struct drm_mm_node *node)
787
{
788
	struct drm_mm_node *prev_node;
789

790
	DRM_MM_BUG_ON(node->mm != scan->mm);
791
	DRM_MM_BUG_ON(!node->scanned_block);
792
	node->scanned_block = false;
793

794 795 796
	DRM_MM_BUG_ON(!node->mm->scan_active);
	node->mm->scan_active--;

797 798 799 800 801 802 803 804
	/* During drm_mm_scan_add_block() we decoupled this node leaving
	 * its pointers intact. Now that the caller is walking back along
	 * the eviction list we can restore this block into its rightful
	 * place on the full node_list. To confirm that the caller is walking
	 * backwards correctly we check that prev_node->next == node->next,
	 * i.e. both believe the same node should be on the other side of the
	 * hole.
	 */
805
	prev_node = list_prev_entry(node, node_list);
806 807
	DRM_MM_BUG_ON(list_next_entry(prev_node, node_list) !=
		      list_next_entry(node, node_list));
808
	list_add(&node->node_list, &prev_node->node_list);
809

810
	return (node->start + node->size > scan->hit_start &&
811
		node->start < scan->hit_end);
812 813 814
}
EXPORT_SYMBOL(drm_mm_scan_remove_block);

815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838
/**
 * drm_mm_scan_color_evict - evict overlapping nodes on either side of hole
 * @scan: drm_mm scan with target hole
 *
 * After completing an eviction scan and removing the selected nodes, we may
 * need to remove a few more nodes from either side of the target hole if
 * mm.color_adjust is being used.
 *
 * Returns:
 * A node to evict, or NULL if there are no overlapping nodes.
 */
struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan)
{
	struct drm_mm *mm = scan->mm;
	struct drm_mm_node *hole;
	u64 hole_start, hole_end;

	DRM_MM_BUG_ON(list_empty(&mm->hole_stack));

	if (!mm->color_adjust)
		return NULL;

	hole = list_first_entry(&mm->hole_stack, typeof(*hole), hole_stack);
	hole_start = __drm_mm_hole_node_start(hole);
839
	hole_end = hole_start + hole->hole_size;
840 841 842 843 844 845 846 847 848 849 850 851 852 853

	DRM_MM_BUG_ON(hole_start > scan->hit_start);
	DRM_MM_BUG_ON(hole_end < scan->hit_end);

	mm->color_adjust(hole, scan->color, &hole_start, &hole_end);
	if (hole_start > scan->hit_start)
		return hole;
	if (hole_end < scan->hit_end)
		return list_next_entry(hole, node_list);

	return NULL;
}
EXPORT_SYMBOL(drm_mm_scan_color_evict);

854 855 856 857 858 859 860 861
/**
 * drm_mm_init - initialize a drm-mm allocator
 * @mm: the drm_mm structure to initialize
 * @start: start of the range managed by @mm
 * @size: end of the range managed by @mm
 *
 * Note that @mm must be cleared to 0 before calling this function.
 */
C
Chris Wilson 已提交
862
void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
863
{
864 865
	DRM_MM_BUG_ON(start + size <= start);

866 867
	mm->color_adjust = NULL;

868
	INIT_LIST_HEAD(&mm->hole_stack);
869
	mm->interval_tree = RB_ROOT_CACHED;
870 871
	mm->holes_size = RB_ROOT;
	mm->holes_addr = RB_ROOT;
872

873 874
	/* Clever trick to avoid a special case in the free hole tracking. */
	INIT_LIST_HEAD(&mm->head_node.node_list);
875
	mm->head_node.allocated = false;
876 877
	mm->head_node.mm = mm;
	mm->head_node.start = start + size;
878 879
	mm->head_node.size = -size;
	add_hole(&mm->head_node);
880

881
	mm->scan_active = 0;
882
}
883
EXPORT_SYMBOL(drm_mm_init);
884

885 886 887 888 889 890 891
/**
 * drm_mm_takedown - clean up a drm_mm allocator
 * @mm: drm_mm allocator to clean up
 *
 * Note that it is a bug to call this function on an allocator which is not
 * clean.
 */
892
void drm_mm_takedown(struct drm_mm *mm)
893
{
C
Chris Wilson 已提交
894
	if (WARN(!drm_mm_clean(mm),
895 896
		 "Memory manager not clean during takedown.\n"))
		show_leaks(mm);
897
}
D
Dave Airlie 已提交
898
EXPORT_SYMBOL(drm_mm_takedown);
899

D
Daniel Vetter 已提交
900
static u64 drm_mm_dump_hole(struct drm_printer *p, const struct drm_mm_node *entry)
901
{
902 903 904 905 906 907 908
	u64 start, size;

	size = entry->hole_size;
	if (size) {
		start = drm_mm_hole_node_start(entry);
		drm_printf(p, "%#018llx-%#018llx: %llu: free\n",
			   start, start + size, size);
D
Daniel Vetter 已提交
909 910
	}

911
	return size;
D
Daniel Vetter 已提交
912
}
913
/**
D
Daniel Vetter 已提交
914 915 916
 * drm_mm_print - print allocator state
 * @mm: drm_mm allocator to print
 * @p: DRM printer to use
917
 */
D
Daniel Vetter 已提交
918
void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p)
D
Daniel Vetter 已提交
919
{
C
Chris Wilson 已提交
920
	const struct drm_mm_node *entry;
921
	u64 total_used = 0, total_free = 0, total = 0;
D
Daniel Vetter 已提交
922

D
Daniel Vetter 已提交
923
	total_free += drm_mm_dump_hole(p, &mm->head_node);
924 925

	drm_mm_for_each_node(entry, mm) {
D
Daniel Vetter 已提交
926
		drm_printf(p, "%#018llx-%#018llx: %llu: used\n", entry->start,
927
			   entry->start + entry->size, entry->size);
928
		total_used += entry->size;
D
Daniel Vetter 已提交
929
		total_free += drm_mm_dump_hole(p, entry);
930
	}
931 932
	total = total_free + total_used;

D
Daniel Vetter 已提交
933
	drm_printf(p, "total: %llu, used %llu free %llu\n", total,
934
		   total_used, total_free);
935
}
D
Daniel Vetter 已提交
936
EXPORT_SYMBOL(drm_mm_print);