drm_mm.c 28.8 KB
Newer Older
1 2 3
/**************************************************************************
 *
 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4
 * Copyright 2016 Intel Corporation
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 *
 **************************************************************************/

/*
 * Generic simple memory manager implementation. Intended to be used as a base
 * class implementation for more advanced memory managers.
 *
 * Note that the algorithm used is quite simple and there might be substantial
35 36 37
 * performance gains if a smarter free list is implemented. Currently it is
 * just an unordered stack of free regions. This could easily be improved if
 * an RB-tree is used instead. At least if we expect heavy fragmentation.
38 39 40 41
 *
 * Aligned allocations can also see improvement.
 *
 * Authors:
42
 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
43 44
 */

45 46
#include <drm/drmP.h>
#include <drm/drm_mm.h>
47
#include <linux/slab.h>
48
#include <linux/seq_file.h>
49
#include <linux/export.h>
50
#include <linux/interval_tree_generic.h>
51

52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70
/**
 * DOC: Overview
 *
 * drm_mm provides a simple range allocator. The drivers are free to use the
 * resource allocator from the linux core if it suits them, the upside of drm_mm
 * is that it's in the DRM core. Which means that it's easier to extend for
 * some of the crazier special purpose needs of gpus.
 *
 * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node.
 * Drivers are free to embed either of them into their own suitable
 * datastructures. drm_mm itself will not do any allocations of its own, so if
 * drivers choose not to embed nodes they need to still allocate them
 * themselves.
 *
 * The range allocator also supports reservation of preallocated blocks. This is
 * useful for taking over initial mode setting configurations from the firmware,
 * where an object needs to be created which exactly matches the firmware's
 * scanout target. As long as the range is still free it can be inserted anytime
 * after the allocator is initialized, which helps with avoiding looped
71
 * dependencies in the driver load sequence.
72 73 74 75 76 77 78 79 80 81
 *
 * drm_mm maintains a stack of most recently freed holes, which of all
 * simplistic datastructures seems to be a fairly decent approach to clustering
 * allocations and avoiding too much fragmentation. This means free space
 * searches are O(num_holes). Given that all the fancy features drm_mm supports
 * something better would be fairly complex and since gfx thrashing is a fairly
 * steep cliff not a real concern. Removing a node again is O(1).
 *
 * drm_mm supports a few features: Alignment and range restrictions can be
 * supplied. Further more every &drm_mm_node has a color value (which is just an
82
 * opaque unsigned long) which in conjunction with a driver callback can be used
83 84 85 86
 * to implement sophisticated placement restrictions. The i915 DRM driver uses
 * this to implement guard pages between incompatible caching domains in the
 * graphics TT.
 *
87 88 89
 * Two behaviors are supported for searching and allocating: bottom-up and
 * top-down. The default is bottom-up. Top-down allocation can be used if the
 * memory area has different restrictions, or just to reduce fragmentation.
90
 *
91 92 93 94
 * Finally iteration helpers to walk all nodes and all holes are provided as are
 * some basic allocator dumpers for debugging.
 */

D
David Herrmann 已提交
95
static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
96
						u64 size,
97
						u64 alignment,
D
David Herrmann 已提交
98 99 100
						unsigned long color,
						enum drm_mm_search_flags flags);
static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
101
						u64 size,
102
						u64 alignment,
D
David Herrmann 已提交
103
						unsigned long color,
104 105
						u64 start,
						u64 end,
D
David Herrmann 已提交
106
						enum drm_mm_search_flags flags);
107

108
#ifdef CONFIG_DRM_DEBUG_MM
109 110
#include <linux/stackdepot.h>

111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141
#define STACKDEPTH 32
#define BUFSZ 4096

static noinline void save_stack(struct drm_mm_node *node)
{
	unsigned long entries[STACKDEPTH];
	struct stack_trace trace = {
		.entries = entries,
		.max_entries = STACKDEPTH,
		.skip = 1
	};

	save_stack_trace(&trace);
	if (trace.nr_entries != 0 &&
	    trace.entries[trace.nr_entries-1] == ULONG_MAX)
		trace.nr_entries--;

	/* May be called under spinlock, so avoid sleeping */
	node->stack = depot_save_stack(&trace, GFP_NOWAIT);
}

static void show_leaks(struct drm_mm *mm)
{
	struct drm_mm_node *node;
	unsigned long entries[STACKDEPTH];
	char *buf;

	buf = kmalloc(BUFSZ, GFP_KERNEL);
	if (!buf)
		return;

142
	list_for_each_entry(node, drm_mm_nodes(mm), node_list) {
143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169
		struct stack_trace trace = {
			.entries = entries,
			.max_entries = STACKDEPTH
		};

		if (!node->stack) {
			DRM_ERROR("node [%08llx + %08llx]: unknown owner\n",
				  node->start, node->size);
			continue;
		}

		depot_fetch_stack(node->stack, &trace);
		snprint_stack_trace(buf, BUFSZ, &trace, 0);
		DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s",
			  node->start, node->size, buf);
	}

	kfree(buf);
}

#undef STACKDEPTH
#undef BUFSZ
#else
static void save_stack(struct drm_mm_node *node) { }
static void show_leaks(struct drm_mm *mm) { }
#endif

170 171 172 173 174 175 176 177
#define START(node) ((node)->start)
#define LAST(node)  ((node)->start + (node)->size - 1)

INTERVAL_TREE_DEFINE(struct drm_mm_node, rb,
		     u64, __subtree_last,
		     START, LAST, static inline, drm_mm_interval_tree)

struct drm_mm_node *
C
Chris Wilson 已提交
178
__drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last)
179
{
C
Chris Wilson 已提交
180
	return drm_mm_interval_tree_iter_first((struct rb_root *)&mm->interval_tree,
181 182
					       start, last);
}
183
EXPORT_SYMBOL(__drm_mm_interval_first);
184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228

static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
					  struct drm_mm_node *node)
{
	struct drm_mm *mm = hole_node->mm;
	struct rb_node **link, *rb;
	struct drm_mm_node *parent;

	node->__subtree_last = LAST(node);

	if (hole_node->allocated) {
		rb = &hole_node->rb;
		while (rb) {
			parent = rb_entry(rb, struct drm_mm_node, rb);
			if (parent->__subtree_last >= node->__subtree_last)
				break;

			parent->__subtree_last = node->__subtree_last;
			rb = rb_parent(rb);
		}

		rb = &hole_node->rb;
		link = &hole_node->rb.rb_right;
	} else {
		rb = NULL;
		link = &mm->interval_tree.rb_node;
	}

	while (*link) {
		rb = *link;
		parent = rb_entry(rb, struct drm_mm_node, rb);
		if (parent->__subtree_last < node->__subtree_last)
			parent->__subtree_last = node->__subtree_last;
		if (node->start < parent->start)
			link = &parent->rb.rb_left;
		else
			link = &parent->rb.rb_right;
	}

	rb_link_node(&node->rb, rb, link);
	rb_insert_augmented(&node->rb,
			    &mm->interval_tree,
			    &drm_mm_interval_tree_augment);
}

229 230
static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
				 struct drm_mm_node *node,
231
				 u64 size, u64 alignment,
232 233
				 unsigned long color,
				 enum drm_mm_allocator_flags flags)
234
{
235
	struct drm_mm *mm = hole_node->mm;
236 237 238 239
	u64 hole_start = drm_mm_hole_node_start(hole_node);
	u64 hole_end = drm_mm_hole_node_end(hole_node);
	u64 adj_start = hole_start;
	u64 adj_end = hole_end;
240

241
	DRM_MM_BUG_ON(node->allocated);
242

243 244
	if (mm->color_adjust)
		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
245

246 247 248
	if (flags & DRM_MM_CREATE_TOP)
		adj_start = adj_end - size;

249
	if (alignment) {
250
		u64 rem;
251

252
		div64_u64_rem(adj_start, alignment, &rem);
253
		if (rem) {
254
			if (flags & DRM_MM_CREATE_TOP)
255
				adj_start -= rem;
256
			else
257
				adj_start += alignment - rem;
258
		}
259 260
	}

261 262
	DRM_MM_BUG_ON(adj_start < hole_start);
	DRM_MM_BUG_ON(adj_end > hole_end);
263

264
	if (adj_start == hole_start) {
265
		hole_node->hole_follows = 0;
266 267
		list_del(&hole_node->hole_stack);
	}
268

269
	node->start = adj_start;
270 271
	node->size = size;
	node->mm = mm;
272
	node->color = color;
273
	node->allocated = 1;
274

275 276
	list_add(&node->node_list, &hole_node->node_list);

277 278
	drm_mm_interval_tree_add_node(hole_node, node);

279
	DRM_MM_BUG_ON(node->start + node->size > adj_end);
280

281
	node->hole_follows = 0;
282
	if (__drm_mm_hole_node_start(node) < hole_end) {
283 284
		list_add(&node->hole_stack, &mm->hole_stack);
		node->hole_follows = 1;
285
	}
286 287

	save_stack(node);
288 289
}

290 291 292 293 294 295 296 297 298 299 300 301 302 303
/**
 * drm_mm_reserve_node - insert an pre-initialized node
 * @mm: drm_mm allocator to insert @node into
 * @node: drm_mm_node to insert
 *
 * This functions inserts an already set-up drm_mm_node into the allocator,
 * meaning that start, size and color must be set by the caller. This is useful
 * to initialize the allocator with preallocated objects which must be set-up
 * before the range allocator can be set-up, e.g. when taking over a firmware
 * framebuffer.
 *
 * Returns:
 * 0 on success, -ENOSPC if there's no hole where @node is.
 */
304
int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
305
{
306
	u64 end = node->start + node->size;
307
	struct drm_mm_node *hole;
308
	u64 hole_start, hole_end;
309
	u64 adj_start, adj_end;
310

311
	end = node->start + node->size;
312 313
	if (unlikely(end <= node->start))
		return -ENOSPC;
314

315
	/* Find the relevant hole to add our node to */
316 317 318 319 320 321
	hole = drm_mm_interval_tree_iter_first(&mm->interval_tree,
					       node->start, ~(u64)0);
	if (hole) {
		if (hole->start < end)
			return -ENOSPC;
	} else {
322
		hole = list_entry(drm_mm_nodes(mm), typeof(*hole), node_list);
323
	}
324

325 326 327
	hole = list_last_entry(&hole->node_list, typeof(*hole), node_list);
	if (!hole->hole_follows)
		return -ENOSPC;
328

329 330 331 332 333 334 335
	adj_start = hole_start = __drm_mm_hole_node_start(hole);
	adj_end = hole_end = __drm_mm_hole_node_end(hole);

	if (mm->color_adjust)
		mm->color_adjust(hole, node->color, &adj_start, &adj_end);

	if (adj_start > node->start || adj_end < end)
336
		return -ENOSPC;
337

338 339
	node->mm = mm;
	node->allocated = 1;
340

341
	list_add(&node->node_list, &hole->node_list);
342

343 344 345 346
	drm_mm_interval_tree_add_node(hole, node);

	if (node->start == hole_start) {
		hole->hole_follows = 0;
347
		list_del(&hole->hole_stack);
348 349 350 351 352 353
	}

	node->hole_follows = 0;
	if (end != hole_end) {
		list_add(&node->hole_stack, &mm->hole_stack);
		node->hole_follows = 1;
354 355
	}

356 357
	save_stack(node);

358
	return 0;
359
}
360
EXPORT_SYMBOL(drm_mm_reserve_node);
361

362
/**
363 364 365 366 367 368
 * drm_mm_insert_node_generic - search for space and insert @node
 * @mm: drm_mm to allocate from
 * @node: preallocate node to insert
 * @size: size of the allocation
 * @alignment: alignment of the allocation
 * @color: opaque tag value to use for this node
369 370
 * @sflags: flags to fine-tune the allocation search
 * @aflags: flags to fine-tune the allocation behavior
371 372 373 374 375
 *
 * The preallocated node must be cleared to 0.
 *
 * Returns:
 * 0 on success, -ENOSPC if there's no suitable hole.
376
 */
377
int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
378
			       u64 size, u64 alignment,
379
			       unsigned long color,
380 381
			       enum drm_mm_search_flags sflags,
			       enum drm_mm_allocator_flags aflags)
382 383 384
{
	struct drm_mm_node *hole_node;

385 386 387
	if (WARN_ON(size == 0))
		return -EINVAL;

388
	hole_node = drm_mm_search_free_generic(mm, size, alignment,
389
					       color, sflags);
390 391 392
	if (!hole_node)
		return -ENOSPC;

393
	drm_mm_insert_helper(hole_node, node, size, alignment, color, aflags);
394 395
	return 0;
}
396 397
EXPORT_SYMBOL(drm_mm_insert_node_generic);

398 399
static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
				       struct drm_mm_node *node,
400
				       u64 size, u64 alignment,
401
				       unsigned long color,
402
				       u64 start, u64 end,
403
				       enum drm_mm_allocator_flags flags)
404
{
405
	struct drm_mm *mm = hole_node->mm;
406 407 408 409
	u64 hole_start = drm_mm_hole_node_start(hole_node);
	u64 hole_end = drm_mm_hole_node_end(hole_node);
	u64 adj_start = hole_start;
	u64 adj_end = hole_end;
410

411
	DRM_MM_BUG_ON(!hole_node->hole_follows || node->allocated);
412

413 414
	if (adj_start < start)
		adj_start = start;
415 416 417 418 419
	if (adj_end > end)
		adj_end = end;

	if (mm->color_adjust)
		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
420

421 422 423
	if (flags & DRM_MM_CREATE_TOP)
		adj_start = adj_end - size;

424
	if (alignment) {
425
		u64 rem;
426

427
		div64_u64_rem(adj_start, alignment, &rem);
428
		if (rem) {
429
			if (flags & DRM_MM_CREATE_TOP)
430
				adj_start -= rem;
431
			else
432
				adj_start += alignment - rem;
433
		}
434
	}
435

436
	if (adj_start == hole_start) {
437
		hole_node->hole_follows = 0;
438
		list_del(&hole_node->hole_stack);
439 440
	}

441
	node->start = adj_start;
442 443
	node->size = size;
	node->mm = mm;
444
	node->color = color;
445
	node->allocated = 1;
446 447 448

	list_add(&node->node_list, &hole_node->node_list);

449 450
	drm_mm_interval_tree_add_node(hole_node, node);

451 452 453 454
	DRM_MM_BUG_ON(node->start < start);
	DRM_MM_BUG_ON(node->start < adj_start);
	DRM_MM_BUG_ON(node->start + node->size > adj_end);
	DRM_MM_BUG_ON(node->start + node->size > end);
455

456
	node->hole_follows = 0;
457
	if (__drm_mm_hole_node_start(node) < hole_end) {
458 459
		list_add(&node->hole_stack, &mm->hole_stack);
		node->hole_follows = 1;
460
	}
461 462

	save_stack(node);
463 464
}

465
/**
466 467 468 469 470 471 472 473
 * drm_mm_insert_node_in_range_generic - ranged search for space and insert @node
 * @mm: drm_mm to allocate from
 * @node: preallocate node to insert
 * @size: size of the allocation
 * @alignment: alignment of the allocation
 * @color: opaque tag value to use for this node
 * @start: start of the allowed range for this node
 * @end: end of the allowed range for this node
474 475
 * @sflags: flags to fine-tune the allocation search
 * @aflags: flags to fine-tune the allocation behavior
476 477 478 479 480
 *
 * The preallocated node must be cleared to 0.
 *
 * Returns:
 * 0 on success, -ENOSPC if there's no suitable hole.
481
 */
482
int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
483
					u64 size, u64 alignment,
484
					unsigned long color,
485
					u64 start, u64 end,
486 487
					enum drm_mm_search_flags sflags,
					enum drm_mm_allocator_flags aflags)
488
{
489 490
	struct drm_mm_node *hole_node;

491 492 493
	if (WARN_ON(size == 0))
		return -EINVAL;

494 495
	hole_node = drm_mm_search_free_in_range_generic(mm,
							size, alignment, color,
496
							start, end, sflags);
497 498 499
	if (!hole_node)
		return -ENOSPC;

500 501
	drm_mm_insert_helper_range(hole_node, node,
				   size, alignment, color,
502
				   start, end, aflags);
503 504
	return 0;
}
505 506
EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);

507
/**
508 509 510 511 512
 * drm_mm_remove_node - Remove a memory node from the allocator.
 * @node: drm_mm_node to remove
 *
 * This just removes a node from its drm_mm allocator. The node does not need to
 * be cleared again before it can be re-inserted into this or any other drm_mm
513
 * allocator. It is a bug to call this function on a unallocated node.
514 515 516
 */
void drm_mm_remove_node(struct drm_mm_node *node)
{
517 518
	struct drm_mm *mm = node->mm;
	struct drm_mm_node *prev_node;
519

520 521 522 523
	DRM_MM_BUG_ON(!node->allocated);
	DRM_MM_BUG_ON(node->scanned_block ||
		      node->scanned_prev_free ||
		      node->scanned_next_free);
524

525 526
	prev_node =
	    list_entry(node->node_list.prev, struct drm_mm_node, node_list);
527

528
	if (node->hole_follows) {
529 530
		DRM_MM_BUG_ON(__drm_mm_hole_node_start(node) ==
			      __drm_mm_hole_node_end(node));
531 532
		list_del(&node->hole_stack);
	} else
533 534
		DRM_MM_BUG_ON(__drm_mm_hole_node_start(node) !=
			      __drm_mm_hole_node_end(node));
535

536

537 538 539 540 541 542
	if (!prev_node->hole_follows) {
		prev_node->hole_follows = 1;
		list_add(&prev_node->hole_stack, &mm->hole_stack);
	} else
		list_move(&prev_node->hole_stack, &mm->hole_stack);

543
	drm_mm_interval_tree_remove(node, &mm->interval_tree);
544
	list_del(&node->node_list);
545 546 547 548
	node->allocated = 0;
}
EXPORT_SYMBOL(drm_mm_remove_node);

549
static int check_free_hole(u64 start, u64 end, u64 size, u64 alignment)
550
{
551
	if (end - start < size)
552 553 554
		return 0;

	if (alignment) {
555
		u64 rem;
556

557
		div64_u64_rem(start, alignment, &rem);
558
		if (rem)
559
			start += alignment - rem;
560 561
	}

562
	return end >= start + size;
563 564
}

D
David Herrmann 已提交
565
static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
566
						      u64 size,
567
						      u64 alignment,
D
David Herrmann 已提交
568 569
						      unsigned long color,
						      enum drm_mm_search_flags flags)
570
{
D
Dave Airlie 已提交
571 572
	struct drm_mm_node *entry;
	struct drm_mm_node *best;
573 574 575
	u64 adj_start;
	u64 adj_end;
	u64 best_size;
576

577
	DRM_MM_BUG_ON(mm->scan_active);
578

579 580 581
	best = NULL;
	best_size = ~0UL;

582 583
	__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
			       flags & DRM_MM_SEARCH_BELOW) {
584
		u64 hole_size = adj_end - adj_start;
585

586 587 588 589 590 591 592
		if (mm->color_adjust) {
			mm->color_adjust(entry, color, &adj_start, &adj_end);
			if (adj_end <= adj_start)
				continue;
		}

		if (!check_free_hole(adj_start, adj_end, size, alignment))
593 594
			continue;

595
		if (!(flags & DRM_MM_SEARCH_BEST))
596
			return entry;
597

598
		if (hole_size < best_size) {
599
			best = entry;
600
			best_size = hole_size;
601 602 603 604 605
		}
	}

	return best;
}
606

D
David Herrmann 已提交
607
static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
608
							u64 size,
609
							u64 alignment,
610
							unsigned long color,
611 612
							u64 start,
							u64 end,
613
							enum drm_mm_search_flags flags)
614 615 616
{
	struct drm_mm_node *entry;
	struct drm_mm_node *best;
617 618 619
	u64 adj_start;
	u64 adj_end;
	u64 best_size;
620

621
	DRM_MM_BUG_ON(mm->scan_active);
622

623 624 625
	best = NULL;
	best_size = ~0UL;

626 627
	__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
			       flags & DRM_MM_SEARCH_BELOW) {
628
		u64 hole_size = adj_end - adj_start;
629

630 631 632 633
		if (adj_start < start)
			adj_start = start;
		if (adj_end > end)
			adj_end = end;
634 635 636 637 638 639 640

		if (mm->color_adjust) {
			mm->color_adjust(entry, color, &adj_start, &adj_end);
			if (adj_end <= adj_start)
				continue;
		}

641
		if (!check_free_hole(adj_start, adj_end, size, alignment))
642 643
			continue;

644
		if (!(flags & DRM_MM_SEARCH_BEST))
645
			return entry;
646

647
		if (hole_size < best_size) {
648
			best = entry;
649
			best_size = hole_size;
650 651 652 653 654 655
		}
	}

	return best;
}

656
/**
657 658 659 660 661 662 663
 * drm_mm_replace_node - move an allocation from @old to @new
 * @old: drm_mm_node to remove from the allocator
 * @new: drm_mm_node which should inherit @old's allocation
 *
 * This is useful for when drivers embed the drm_mm_node structure and hence
 * can't move allocations by reassigning pointers. It's a combination of remove
 * and insert with the guarantee that the allocation start will match.
664 665 666
 */
void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
{
667 668
	DRM_MM_BUG_ON(!old->allocated);

669
	list_replace(&old->node_list, &new->node_list);
D
Daniel Vetter 已提交
670
	list_replace(&old->hole_stack, &new->hole_stack);
671
	rb_replace_node(&old->rb, &new->rb, &old->mm->interval_tree);
672 673 674 675
	new->hole_follows = old->hole_follows;
	new->mm = old->mm;
	new->start = old->start;
	new->size = old->size;
676
	new->color = old->color;
677
	new->__subtree_last = old->__subtree_last;
678 679 680 681 682 683

	old->allocated = 0;
	new->allocated = 1;
}
EXPORT_SYMBOL(drm_mm_replace_node);

684 685 686 687 688 689 690 691
/**
 * DOC: lru scan roaster
 *
 * Very often GPUs need to have continuous allocations for a given object. When
 * evicting objects to make space for a new one it is therefore not most
 * efficient when we simply start to select all objects from the tail of an LRU
 * until there's a suitable hole: Especially for big objects or nodes that
 * otherwise have special allocation constraints there's a good chance we evict
692
 * lots of (smaller) objects unnecessarily.
693 694 695
 *
 * The DRM range allocator supports this use-case through the scanning
 * interfaces. First a scan operation needs to be initialized with
696
 * drm_mm_scan_init() or drm_mm_scan_init_with_range(). The driver adds
697 698
 * objects to the roaster (probably by walking an LRU list, but this can be
 * freely implemented) until a suitable hole is found or there's no further
699
 * evictable object.
700
 *
701
 * The driver must walk through all objects again in exactly the reverse
702 703 704 705 706 707 708 709 710 711
 * order to restore the allocator state. Note that while the allocator is used
 * in the scan mode no other operation is allowed.
 *
 * Finally the driver evicts all objects selected in the scan. Adding and
 * removing an object is O(1), and since freeing a node is also O(1) the overall
 * complexity is O(scanned_objects). So like the free stack which needs to be
 * walked before a scan operation even begins this is linear in the number of
 * objects. It doesn't seem to hurt badly.
 */

712
/**
713 714
 * drm_mm_scan_init - initialize lru scanning
 * @scan: scan state
715 716 717 718
 * @mm: drm_mm to scan
 * @size: size of the allocation
 * @alignment: alignment of the allocation
 * @color: opaque tag value to use for the allocation
719 720
 *
 * This simply sets up the scanning routines with the parameters for the desired
721 722
 * hole. Note that there's no need to specify allocation flags, since they only
 * change the place a node is allocated from within a suitable hole.
723
 *
724 725
 * Warning:
 * As long as the scan list is non-empty, no other operations than
726 727
 * adding/removing nodes to/from the scan list are allowed.
 */
728 729
void drm_mm_scan_init(struct drm_mm_scan *scan,
		      struct drm_mm *mm,
730
		      u64 size,
731
		      u64 alignment,
732
		      unsigned long color)
733
{
734
	DRM_MM_BUG_ON(!size);
735
	DRM_MM_BUG_ON(mm->scan_active);
736

737 738 739 740 741 742 743 744 745 746 747 748
	scan->mm = mm;

	scan->color = color;
	scan->alignment = alignment;
	scan->size = size;

	scan->check_range = 0;

	scan->hit_start = U64_MAX;
	scan->hit_end = 0;

	scan->prev_scanned_node = NULL;
749
}
750
EXPORT_SYMBOL(drm_mm_scan_init);
751

752
/**
753 754
 * drm_mm_scan_init_with_range - initialize range-restricted lru scanning
 * @scan: scan state
755 756 757 758 759 760
 * @mm: drm_mm to scan
 * @size: size of the allocation
 * @alignment: alignment of the allocation
 * @color: opaque tag value to use for the allocation
 * @start: start of the allowed range for the allocation
 * @end: end of the allowed range for the allocation
761 762
 *
 * This simply sets up the scanning routines with the parameters for the desired
763 764
 * hole. Note that there's no need to specify allocation flags, since they only
 * change the place a node is allocated from within a suitable hole.
765
 *
766 767
 * Warning:
 * As long as the scan list is non-empty, no other operations than
768 769
 * adding/removing nodes to/from the scan list are allowed.
 */
770 771
void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
				 struct drm_mm *mm,
772
				 u64 size,
773
				 u64 alignment,
774
				 unsigned long color,
775 776
				 u64 start,
				 u64 end)
777
{
778 779
	DRM_MM_BUG_ON(start >= end);
	DRM_MM_BUG_ON(!size || size > end - start);
780 781 782 783 784 785 786 787 788 789 790 791
	DRM_MM_BUG_ON(mm->scan_active);

	scan->mm = mm;

	scan->color = color;
	scan->alignment = alignment;
	scan->size = size;

	DRM_MM_BUG_ON(end <= start);
	scan->range_start = start;
	scan->range_end = end;
	scan->check_range = 1;
792

793 794 795 796
	scan->hit_start = U64_MAX;
	scan->hit_end = 0;

	scan->prev_scanned_node = NULL;
797
}
798
EXPORT_SYMBOL(drm_mm_scan_init_with_range);
799

800
/**
801 802 803
 * drm_mm_scan_add_block - add a node to the scan list
 * @node: drm_mm_node to add
 *
804 805 806
 * Add a node to the scan list that might be freed to make space for the desired
 * hole.
 *
807 808
 * Returns:
 * True if a hole has been found, false otherwise.
809
 */
810 811
bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
			   struct drm_mm_node *node)
812
{
813
	struct drm_mm *mm = scan->mm;
814
	struct drm_mm_node *prev_node;
815 816
	u64 hole_start, hole_end;
	u64 adj_start, adj_end;
817

818 819
	DRM_MM_BUG_ON(node->mm != mm);
	DRM_MM_BUG_ON(!node->allocated);
820
	DRM_MM_BUG_ON(node->scanned_block);
821
	node->scanned_block = 1;
822
	mm->scan_active++;
823

824
	prev_node = list_prev_entry(node, node_list);
825

826 827 828 829
	node->scanned_preceeds_hole = prev_node->hole_follows;
	prev_node->hole_follows = 1;
	list_del(&node->node_list);
	node->node_list.prev = &prev_node->node_list;
830 831
	node->node_list.next = &scan->prev_scanned_node->node_list;
	scan->prev_scanned_node = node;
832

833 834
	adj_start = hole_start = drm_mm_hole_node_start(prev_node);
	adj_end = hole_end = drm_mm_hole_node_end(prev_node);
835

836 837 838 839 840
	if (scan->check_range) {
		if (adj_start < scan->range_start)
			adj_start = scan->range_start;
		if (adj_end > scan->range_end)
			adj_end = scan->range_end;
841 842
	}

843
	if (mm->color_adjust)
844
		mm->color_adjust(prev_node, scan->color, &adj_start, &adj_end);
845

846
	if (check_free_hole(adj_start, adj_end,
847 848 849
			    scan->size, scan->alignment)) {
		scan->hit_start = hole_start;
		scan->hit_end = hole_end;
850
		return true;
851 852
	}

853
	return false;
854 855 856 857
}
EXPORT_SYMBOL(drm_mm_scan_add_block);

/**
858 859
 * drm_mm_scan_remove_block - remove a node from the scan list
 * @node: drm_mm_node to remove
860
 *
861 862 863 864
 * Nodes _must_ be removed in exactly the reverse order from the scan list as
 * they have been added (e.g. using list_add as they are added and then
 * list_for_each over that eviction list to remove), otherwise the internal
 * state of the memory manager will be corrupted.
865 866
 *
 * When the scan list is empty, the selected memory nodes can be freed. An
867 868
 * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then
 * return the just freed block (because its at the top of the free_stack list).
869
 *
870 871 872
 * Returns:
 * True if this block should be evicted, false otherwise. Will always
 * return false when no hole has been found.
873
 */
874 875
bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
			      struct drm_mm_node *node)
876
{
877
	struct drm_mm_node *prev_node;
878

879
	DRM_MM_BUG_ON(node->mm != scan->mm);
880
	DRM_MM_BUG_ON(!node->scanned_block);
881 882
	node->scanned_block = 0;

883 884 885 886
	DRM_MM_BUG_ON(!node->mm->scan_active);
	node->mm->scan_active--;

	prev_node = list_prev_entry(node, node_list);
887

888 889
	prev_node->hole_follows = node->scanned_preceeds_hole;
	list_add(&node->node_list, &prev_node->node_list);
890

891 892
	return (drm_mm_hole_node_end(node) > scan->hit_start &&
		node->start < scan->hit_end);
893 894 895
}
EXPORT_SYMBOL(drm_mm_scan_remove_block);

896 897 898 899 900 901 902 903
/**
 * drm_mm_init - initialize a drm-mm allocator
 * @mm: the drm_mm structure to initialize
 * @start: start of the range managed by @mm
 * @size: end of the range managed by @mm
 *
 * Note that @mm must be cleared to 0 before calling this function.
 */
C
Chris Wilson 已提交
904
void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
905
{
906 907
	DRM_MM_BUG_ON(start + size <= start);

908
	INIT_LIST_HEAD(&mm->hole_stack);
909
	mm->scan_active = 0;
910

911 912
	/* Clever trick to avoid a special case in the free hole tracking. */
	INIT_LIST_HEAD(&mm->head_node.node_list);
913
	mm->head_node.allocated = 0;
914 915 916 917 918 919 920 921 922
	mm->head_node.hole_follows = 1;
	mm->head_node.scanned_block = 0;
	mm->head_node.scanned_prev_free = 0;
	mm->head_node.scanned_next_free = 0;
	mm->head_node.mm = mm;
	mm->head_node.start = start + size;
	mm->head_node.size = start - mm->head_node.start;
	list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);

923 924
	mm->interval_tree = RB_ROOT;

925
	mm->color_adjust = NULL;
926
}
927
EXPORT_SYMBOL(drm_mm_init);
928

929 930 931 932 933 934 935
/**
 * drm_mm_takedown - clean up a drm_mm allocator
 * @mm: drm_mm allocator to clean up
 *
 * Note that it is a bug to call this function on an allocator which is not
 * clean.
 */
936
void drm_mm_takedown(struct drm_mm *mm)
937
{
C
Chris Wilson 已提交
938
	if (WARN(!drm_mm_clean(mm),
939 940
		 "Memory manager not clean during takedown.\n"))
		show_leaks(mm);
941
}
D
Dave Airlie 已提交
942
EXPORT_SYMBOL(drm_mm_takedown);
943

C
Chris Wilson 已提交
944 945
static u64 drm_mm_debug_hole(const struct drm_mm_node *entry,
			     const char *prefix)
946
{
947
	u64 hole_start, hole_end, hole_size;
948

D
Daniel Vetter 已提交
949 950 951 952
	if (entry->hole_follows) {
		hole_start = drm_mm_hole_node_start(entry);
		hole_end = drm_mm_hole_node_end(entry);
		hole_size = hole_end - hole_start;
953 954
		pr_debug("%s %#llx-%#llx: %llu: free\n", prefix, hole_start,
			 hole_end, hole_size);
D
Daniel Vetter 已提交
955 956 957 958 959 960
		return hole_size;
	}

	return 0;
}

961 962 963 964 965
/**
 * drm_mm_debug_table - dump allocator state to dmesg
 * @mm: drm_mm allocator to dump
 * @prefix: prefix to use for dumping to dmesg
 */
C
Chris Wilson 已提交
966
void drm_mm_debug_table(const struct drm_mm *mm, const char *prefix)
D
Daniel Vetter 已提交
967
{
C
Chris Wilson 已提交
968
	const struct drm_mm_node *entry;
969
	u64 total_used = 0, total_free = 0, total = 0;
D
Daniel Vetter 已提交
970 971

	total_free += drm_mm_debug_hole(&mm->head_node, prefix);
972 973

	drm_mm_for_each_node(entry, mm) {
974 975
		pr_debug("%s %#llx-%#llx: %llu: used\n", prefix, entry->start,
			 entry->start + entry->size, entry->size);
976
		total_used += entry->size;
D
Daniel Vetter 已提交
977
		total_free += drm_mm_debug_hole(entry, prefix);
978
	}
979 980
	total = total_free + total_used;

981 982
	pr_debug("%s total: %llu, used %llu free %llu\n", prefix, total,
		 total_used, total_free);
983 984 985
}
EXPORT_SYMBOL(drm_mm_debug_table);

986
#if defined(CONFIG_DEBUG_FS)
C
Chris Wilson 已提交
987
static u64 drm_mm_dump_hole(struct seq_file *m, const struct drm_mm_node *entry)
988
{
989
	u64 hole_start, hole_end, hole_size;
990

D
Daniel Vetter 已提交
991 992 993 994
	if (entry->hole_follows) {
		hole_start = drm_mm_hole_node_start(entry);
		hole_end = drm_mm_hole_node_end(entry);
		hole_size = hole_end - hole_start;
995
		seq_printf(m, "%#018llx-%#018llx: %llu: free\n", hole_start,
996
			   hole_end, hole_size);
D
Daniel Vetter 已提交
997 998 999 1000 1001 1002
		return hole_size;
	}

	return 0;
}

1003 1004 1005 1006 1007
/**
 * drm_mm_dump_table - dump allocator state to a seq_file
 * @m: seq_file to dump to
 * @mm: drm_mm allocator to dump
 */
C
Chris Wilson 已提交
1008
int drm_mm_dump_table(struct seq_file *m, const struct drm_mm *mm)
D
Daniel Vetter 已提交
1009
{
C
Chris Wilson 已提交
1010
	const struct drm_mm_node *entry;
1011
	u64 total_used = 0, total_free = 0, total = 0;
D
Daniel Vetter 已提交
1012 1013

	total_free += drm_mm_dump_hole(m, &mm->head_node);
1014 1015

	drm_mm_for_each_node(entry, mm) {
1016
		seq_printf(m, "%#018llx-%#018llx: %llu: used\n", entry->start,
1017
			   entry->start + entry->size, entry->size);
1018
		total_used += entry->size;
D
Daniel Vetter 已提交
1019
		total_free += drm_mm_dump_hole(m, entry);
1020
	}
1021 1022
	total = total_free + total_used;

1023 1024
	seq_printf(m, "total: %llu, used %llu free %llu\n", total,
		   total_used, total_free);
1025 1026 1027 1028
	return 0;
}
EXPORT_SYMBOL(drm_mm_dump_table);
#endif