drm_mm.c 29.4 KB
Newer Older
1 2 3
/**************************************************************************
 *
 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4
 * Copyright 2016 Intel Corporation
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 *
 **************************************************************************/

/*
 * Generic simple memory manager implementation. Intended to be used as a base
 * class implementation for more advanced memory managers.
 *
 * Note that the algorithm used is quite simple and there might be substantial
35 36 37
 * performance gains if a smarter free list is implemented. Currently it is
 * just an unordered stack of free regions. This could easily be improved if
 * an RB-tree is used instead. At least if we expect heavy fragmentation.
38 39 40 41
 *
 * Aligned allocations can also see improvement.
 *
 * Authors:
42
 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
43 44
 */

45 46
#include <drm/drmP.h>
#include <drm/drm_mm.h>
47
#include <linux/slab.h>
48
#include <linux/seq_file.h>
49
#include <linux/export.h>
50
#include <linux/interval_tree_generic.h>
51

52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70
/**
 * DOC: Overview
 *
 * drm_mm provides a simple range allocator. The drivers are free to use the
 * resource allocator from the linux core if it suits them, the upside of drm_mm
 * is that it's in the DRM core. Which means that it's easier to extend for
 * some of the crazier special purpose needs of gpus.
 *
 * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node.
 * Drivers are free to embed either of them into their own suitable
 * datastructures. drm_mm itself will not do any allocations of its own, so if
 * drivers choose not to embed nodes they need to still allocate them
 * themselves.
 *
 * The range allocator also supports reservation of preallocated blocks. This is
 * useful for taking over initial mode setting configurations from the firmware,
 * where an object needs to be created which exactly matches the firmware's
 * scanout target. As long as the range is still free it can be inserted anytime
 * after the allocator is initialized, which helps with avoiding looped
71
 * dependencies in the driver load sequence.
72 73 74 75 76 77 78 79 80 81
 *
 * drm_mm maintains a stack of most recently freed holes, which of all
 * simplistic datastructures seems to be a fairly decent approach to clustering
 * allocations and avoiding too much fragmentation. This means free space
 * searches are O(num_holes). Given that all the fancy features drm_mm supports
 * something better would be fairly complex and since gfx thrashing is a fairly
 * steep cliff not a real concern. Removing a node again is O(1).
 *
 * drm_mm supports a few features: Alignment and range restrictions can be
 * supplied. Further more every &drm_mm_node has a color value (which is just an
82
 * opaque unsigned long) which in conjunction with a driver callback can be used
83 84 85 86
 * to implement sophisticated placement restrictions. The i915 DRM driver uses
 * this to implement guard pages between incompatible caching domains in the
 * graphics TT.
 *
87 88 89
 * Two behaviors are supported for searching and allocating: bottom-up and
 * top-down. The default is bottom-up. Top-down allocation can be used if the
 * memory area has different restrictions, or just to reduce fragmentation.
90
 *
91 92 93 94
 * Finally iteration helpers to walk all nodes and all holes are provided as are
 * some basic allocator dumpers for debugging.
 */

D
David Herrmann 已提交
95
static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
96
						u64 size,
97
						u64 alignment,
D
David Herrmann 已提交
98 99 100
						unsigned long color,
						enum drm_mm_search_flags flags);
static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
101
						u64 size,
102
						u64 alignment,
D
David Herrmann 已提交
103
						unsigned long color,
104 105
						u64 start,
						u64 end,
D
David Herrmann 已提交
106
						enum drm_mm_search_flags flags);
107

108
#ifdef CONFIG_DRM_DEBUG_MM
109 110
#include <linux/stackdepot.h>

111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141
#define STACKDEPTH 32
#define BUFSZ 4096

static noinline void save_stack(struct drm_mm_node *node)
{
	unsigned long entries[STACKDEPTH];
	struct stack_trace trace = {
		.entries = entries,
		.max_entries = STACKDEPTH,
		.skip = 1
	};

	save_stack_trace(&trace);
	if (trace.nr_entries != 0 &&
	    trace.entries[trace.nr_entries-1] == ULONG_MAX)
		trace.nr_entries--;

	/* May be called under spinlock, so avoid sleeping */
	node->stack = depot_save_stack(&trace, GFP_NOWAIT);
}

static void show_leaks(struct drm_mm *mm)
{
	struct drm_mm_node *node;
	unsigned long entries[STACKDEPTH];
	char *buf;

	buf = kmalloc(BUFSZ, GFP_KERNEL);
	if (!buf)
		return;

142
	list_for_each_entry(node, drm_mm_nodes(mm), node_list) {
143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169
		struct stack_trace trace = {
			.entries = entries,
			.max_entries = STACKDEPTH
		};

		if (!node->stack) {
			DRM_ERROR("node [%08llx + %08llx]: unknown owner\n",
				  node->start, node->size);
			continue;
		}

		depot_fetch_stack(node->stack, &trace);
		snprint_stack_trace(buf, BUFSZ, &trace, 0);
		DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s",
			  node->start, node->size, buf);
	}

	kfree(buf);
}

#undef STACKDEPTH
#undef BUFSZ
#else
static void save_stack(struct drm_mm_node *node) { }
static void show_leaks(struct drm_mm *mm) { }
#endif

170 171 172 173 174 175 176 177
#define START(node) ((node)->start)
#define LAST(node)  ((node)->start + (node)->size - 1)

INTERVAL_TREE_DEFINE(struct drm_mm_node, rb,
		     u64, __subtree_last,
		     START, LAST, static inline, drm_mm_interval_tree)

struct drm_mm_node *
C
Chris Wilson 已提交
178
__drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last)
179
{
C
Chris Wilson 已提交
180
	return drm_mm_interval_tree_iter_first((struct rb_root *)&mm->interval_tree,
181 182
					       start, last);
}
183
EXPORT_SYMBOL(__drm_mm_interval_first);
184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228

static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
					  struct drm_mm_node *node)
{
	struct drm_mm *mm = hole_node->mm;
	struct rb_node **link, *rb;
	struct drm_mm_node *parent;

	node->__subtree_last = LAST(node);

	if (hole_node->allocated) {
		rb = &hole_node->rb;
		while (rb) {
			parent = rb_entry(rb, struct drm_mm_node, rb);
			if (parent->__subtree_last >= node->__subtree_last)
				break;

			parent->__subtree_last = node->__subtree_last;
			rb = rb_parent(rb);
		}

		rb = &hole_node->rb;
		link = &hole_node->rb.rb_right;
	} else {
		rb = NULL;
		link = &mm->interval_tree.rb_node;
	}

	while (*link) {
		rb = *link;
		parent = rb_entry(rb, struct drm_mm_node, rb);
		if (parent->__subtree_last < node->__subtree_last)
			parent->__subtree_last = node->__subtree_last;
		if (node->start < parent->start)
			link = &parent->rb.rb_left;
		else
			link = &parent->rb.rb_right;
	}

	rb_link_node(&node->rb, rb, link);
	rb_insert_augmented(&node->rb,
			    &mm->interval_tree,
			    &drm_mm_interval_tree_augment);
}

229 230
static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
				 struct drm_mm_node *node,
231
				 u64 size, u64 alignment,
232 233
				 unsigned long color,
				 enum drm_mm_allocator_flags flags)
234
{
235
	struct drm_mm *mm = hole_node->mm;
236 237 238 239
	u64 hole_start = drm_mm_hole_node_start(hole_node);
	u64 hole_end = drm_mm_hole_node_end(hole_node);
	u64 adj_start = hole_start;
	u64 adj_end = hole_end;
240

241
	DRM_MM_BUG_ON(node->allocated);
242

243 244
	if (mm->color_adjust)
		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
245

246 247 248
	if (flags & DRM_MM_CREATE_TOP)
		adj_start = adj_end - size;

249
	if (alignment) {
250
		u64 rem;
251

252
		div64_u64_rem(adj_start, alignment, &rem);
253
		if (rem) {
254
			if (flags & DRM_MM_CREATE_TOP)
255
				adj_start -= rem;
256
			else
257
				adj_start += alignment - rem;
258
		}
259 260
	}

261 262
	DRM_MM_BUG_ON(adj_start < hole_start);
	DRM_MM_BUG_ON(adj_end > hole_end);
263

264
	if (adj_start == hole_start) {
265
		hole_node->hole_follows = 0;
266 267
		list_del(&hole_node->hole_stack);
	}
268

269
	node->start = adj_start;
270 271
	node->size = size;
	node->mm = mm;
272
	node->color = color;
273
	node->allocated = 1;
274

275 276
	list_add(&node->node_list, &hole_node->node_list);

277 278
	drm_mm_interval_tree_add_node(hole_node, node);

279
	DRM_MM_BUG_ON(node->start + node->size > adj_end);
280

281
	node->hole_follows = 0;
282
	if (__drm_mm_hole_node_start(node) < hole_end) {
283 284
		list_add(&node->hole_stack, &mm->hole_stack);
		node->hole_follows = 1;
285
	}
286 287

	save_stack(node);
288 289
}

290 291 292 293 294 295 296 297 298 299 300 301 302 303
/**
 * drm_mm_reserve_node - insert an pre-initialized node
 * @mm: drm_mm allocator to insert @node into
 * @node: drm_mm_node to insert
 *
 * This functions inserts an already set-up drm_mm_node into the allocator,
 * meaning that start, size and color must be set by the caller. This is useful
 * to initialize the allocator with preallocated objects which must be set-up
 * before the range allocator can be set-up, e.g. when taking over a firmware
 * framebuffer.
 *
 * Returns:
 * 0 on success, -ENOSPC if there's no hole where @node is.
 */
304
int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
305
{
306
	u64 end = node->start + node->size;
307
	struct drm_mm_node *hole;
308
	u64 hole_start, hole_end;
309
	u64 adj_start, adj_end;
310

311
	end = node->start + node->size;
312 313
	if (unlikely(end <= node->start))
		return -ENOSPC;
314

315
	/* Find the relevant hole to add our node to */
316 317 318 319 320 321
	hole = drm_mm_interval_tree_iter_first(&mm->interval_tree,
					       node->start, ~(u64)0);
	if (hole) {
		if (hole->start < end)
			return -ENOSPC;
	} else {
322
		hole = list_entry(drm_mm_nodes(mm), typeof(*hole), node_list);
323
	}
324

325 326 327
	hole = list_last_entry(&hole->node_list, typeof(*hole), node_list);
	if (!hole->hole_follows)
		return -ENOSPC;
328

329 330 331 332 333 334 335
	adj_start = hole_start = __drm_mm_hole_node_start(hole);
	adj_end = hole_end = __drm_mm_hole_node_end(hole);

	if (mm->color_adjust)
		mm->color_adjust(hole, node->color, &adj_start, &adj_end);

	if (adj_start > node->start || adj_end < end)
336
		return -ENOSPC;
337

338 339
	node->mm = mm;
	node->allocated = 1;
340

341
	list_add(&node->node_list, &hole->node_list);
342

343 344 345 346
	drm_mm_interval_tree_add_node(hole, node);

	if (node->start == hole_start) {
		hole->hole_follows = 0;
347
		list_del(&hole->hole_stack);
348 349 350 351 352 353
	}

	node->hole_follows = 0;
	if (end != hole_end) {
		list_add(&node->hole_stack, &mm->hole_stack);
		node->hole_follows = 1;
354 355
	}

356 357
	save_stack(node);

358
	return 0;
359
}
360
EXPORT_SYMBOL(drm_mm_reserve_node);
361

362
/**
363 364 365 366 367 368
 * drm_mm_insert_node_generic - search for space and insert @node
 * @mm: drm_mm to allocate from
 * @node: preallocate node to insert
 * @size: size of the allocation
 * @alignment: alignment of the allocation
 * @color: opaque tag value to use for this node
369 370
 * @sflags: flags to fine-tune the allocation search
 * @aflags: flags to fine-tune the allocation behavior
371 372 373 374 375
 *
 * The preallocated node must be cleared to 0.
 *
 * Returns:
 * 0 on success, -ENOSPC if there's no suitable hole.
376
 */
377
int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
378
			       u64 size, u64 alignment,
379
			       unsigned long color,
380 381
			       enum drm_mm_search_flags sflags,
			       enum drm_mm_allocator_flags aflags)
382 383 384
{
	struct drm_mm_node *hole_node;

385 386 387
	if (WARN_ON(size == 0))
		return -EINVAL;

388
	hole_node = drm_mm_search_free_generic(mm, size, alignment,
389
					       color, sflags);
390 391 392
	if (!hole_node)
		return -ENOSPC;

393
	drm_mm_insert_helper(hole_node, node, size, alignment, color, aflags);
394 395
	return 0;
}
396 397
EXPORT_SYMBOL(drm_mm_insert_node_generic);

398 399
static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
				       struct drm_mm_node *node,
400
				       u64 size, u64 alignment,
401
				       unsigned long color,
402
				       u64 start, u64 end,
403
				       enum drm_mm_allocator_flags flags)
404
{
405
	struct drm_mm *mm = hole_node->mm;
406 407 408 409
	u64 hole_start = drm_mm_hole_node_start(hole_node);
	u64 hole_end = drm_mm_hole_node_end(hole_node);
	u64 adj_start = hole_start;
	u64 adj_end = hole_end;
410

411
	DRM_MM_BUG_ON(!hole_node->hole_follows || node->allocated);
412

413 414
	if (adj_start < start)
		adj_start = start;
415 416 417 418 419
	if (adj_end > end)
		adj_end = end;

	if (mm->color_adjust)
		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
420

421 422 423
	if (flags & DRM_MM_CREATE_TOP)
		adj_start = adj_end - size;

424
	if (alignment) {
425
		u64 rem;
426

427
		div64_u64_rem(adj_start, alignment, &rem);
428
		if (rem) {
429
			if (flags & DRM_MM_CREATE_TOP)
430
				adj_start -= rem;
431
			else
432
				adj_start += alignment - rem;
433
		}
434
	}
435

436
	if (adj_start == hole_start) {
437
		hole_node->hole_follows = 0;
438
		list_del(&hole_node->hole_stack);
439 440
	}

441
	node->start = adj_start;
442 443
	node->size = size;
	node->mm = mm;
444
	node->color = color;
445
	node->allocated = 1;
446 447 448

	list_add(&node->node_list, &hole_node->node_list);

449 450
	drm_mm_interval_tree_add_node(hole_node, node);

451 452 453 454
	DRM_MM_BUG_ON(node->start < start);
	DRM_MM_BUG_ON(node->start < adj_start);
	DRM_MM_BUG_ON(node->start + node->size > adj_end);
	DRM_MM_BUG_ON(node->start + node->size > end);
455

456
	node->hole_follows = 0;
457
	if (__drm_mm_hole_node_start(node) < hole_end) {
458 459
		list_add(&node->hole_stack, &mm->hole_stack);
		node->hole_follows = 1;
460
	}
461 462

	save_stack(node);
463 464
}

465
/**
466 467 468 469 470 471 472 473
 * drm_mm_insert_node_in_range_generic - ranged search for space and insert @node
 * @mm: drm_mm to allocate from
 * @node: preallocate node to insert
 * @size: size of the allocation
 * @alignment: alignment of the allocation
 * @color: opaque tag value to use for this node
 * @start: start of the allowed range for this node
 * @end: end of the allowed range for this node
474 475
 * @sflags: flags to fine-tune the allocation search
 * @aflags: flags to fine-tune the allocation behavior
476 477 478 479 480
 *
 * The preallocated node must be cleared to 0.
 *
 * Returns:
 * 0 on success, -ENOSPC if there's no suitable hole.
481
 */
482
int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
483
					u64 size, u64 alignment,
484
					unsigned long color,
485
					u64 start, u64 end,
486 487
					enum drm_mm_search_flags sflags,
					enum drm_mm_allocator_flags aflags)
488
{
489 490
	struct drm_mm_node *hole_node;

491 492 493
	if (WARN_ON(size == 0))
		return -EINVAL;

494 495
	hole_node = drm_mm_search_free_in_range_generic(mm,
							size, alignment, color,
496
							start, end, sflags);
497 498 499
	if (!hole_node)
		return -ENOSPC;

500 501
	drm_mm_insert_helper_range(hole_node, node,
				   size, alignment, color,
502
				   start, end, aflags);
503 504
	return 0;
}
505 506
EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);

507
/**
508 509 510 511 512
 * drm_mm_remove_node - Remove a memory node from the allocator.
 * @node: drm_mm_node to remove
 *
 * This just removes a node from its drm_mm allocator. The node does not need to
 * be cleared again before it can be re-inserted into this or any other drm_mm
513
 * allocator. It is a bug to call this function on a unallocated node.
514 515 516
 */
void drm_mm_remove_node(struct drm_mm_node *node)
{
517 518
	struct drm_mm *mm = node->mm;
	struct drm_mm_node *prev_node;
519

520
	DRM_MM_BUG_ON(!node->allocated);
521
	DRM_MM_BUG_ON(node->scanned_block);
522

523 524
	prev_node =
	    list_entry(node->node_list.prev, struct drm_mm_node, node_list);
525

526
	if (node->hole_follows) {
527 528
		DRM_MM_BUG_ON(__drm_mm_hole_node_start(node) ==
			      __drm_mm_hole_node_end(node));
529 530
		list_del(&node->hole_stack);
	} else
531 532
		DRM_MM_BUG_ON(__drm_mm_hole_node_start(node) !=
			      __drm_mm_hole_node_end(node));
533

534

535 536 537 538 539 540
	if (!prev_node->hole_follows) {
		prev_node->hole_follows = 1;
		list_add(&prev_node->hole_stack, &mm->hole_stack);
	} else
		list_move(&prev_node->hole_stack, &mm->hole_stack);

541
	drm_mm_interval_tree_remove(node, &mm->interval_tree);
542
	list_del(&node->node_list);
543 544 545 546
	node->allocated = 0;
}
EXPORT_SYMBOL(drm_mm_remove_node);

547
static int check_free_hole(u64 start, u64 end, u64 size, u64 alignment)
548
{
549
	if (end - start < size)
550 551 552
		return 0;

	if (alignment) {
553
		u64 rem;
554

555
		div64_u64_rem(start, alignment, &rem);
556
		if (rem)
557
			start += alignment - rem;
558 559
	}

560
	return end >= start + size;
561 562
}

D
David Herrmann 已提交
563
static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
564
						      u64 size,
565
						      u64 alignment,
D
David Herrmann 已提交
566 567
						      unsigned long color,
						      enum drm_mm_search_flags flags)
568
{
D
Dave Airlie 已提交
569 570
	struct drm_mm_node *entry;
	struct drm_mm_node *best;
571 572 573
	u64 adj_start;
	u64 adj_end;
	u64 best_size;
574

575
	DRM_MM_BUG_ON(mm->scan_active);
576

577 578 579
	best = NULL;
	best_size = ~0UL;

580 581
	__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
			       flags & DRM_MM_SEARCH_BELOW) {
582
		u64 hole_size = adj_end - adj_start;
583

584 585 586 587 588 589 590
		if (mm->color_adjust) {
			mm->color_adjust(entry, color, &adj_start, &adj_end);
			if (adj_end <= adj_start)
				continue;
		}

		if (!check_free_hole(adj_start, adj_end, size, alignment))
591 592
			continue;

593
		if (!(flags & DRM_MM_SEARCH_BEST))
594
			return entry;
595

596
		if (hole_size < best_size) {
597
			best = entry;
598
			best_size = hole_size;
599 600 601 602 603
		}
	}

	return best;
}
604

D
David Herrmann 已提交
605
static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
606
							u64 size,
607
							u64 alignment,
608
							unsigned long color,
609 610
							u64 start,
							u64 end,
611
							enum drm_mm_search_flags flags)
612 613 614
{
	struct drm_mm_node *entry;
	struct drm_mm_node *best;
615 616 617
	u64 adj_start;
	u64 adj_end;
	u64 best_size;
618

619
	DRM_MM_BUG_ON(mm->scan_active);
620

621 622 623
	best = NULL;
	best_size = ~0UL;

624 625
	__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
			       flags & DRM_MM_SEARCH_BELOW) {
626
		u64 hole_size = adj_end - adj_start;
627

628 629 630 631
		if (adj_start < start)
			adj_start = start;
		if (adj_end > end)
			adj_end = end;
632 633 634 635 636 637 638

		if (mm->color_adjust) {
			mm->color_adjust(entry, color, &adj_start, &adj_end);
			if (adj_end <= adj_start)
				continue;
		}

639
		if (!check_free_hole(adj_start, adj_end, size, alignment))
640 641
			continue;

642
		if (!(flags & DRM_MM_SEARCH_BEST))
643
			return entry;
644

645
		if (hole_size < best_size) {
646
			best = entry;
647
			best_size = hole_size;
648 649 650 651 652 653
		}
	}

	return best;
}

654
/**
655 656 657 658 659 660 661
 * drm_mm_replace_node - move an allocation from @old to @new
 * @old: drm_mm_node to remove from the allocator
 * @new: drm_mm_node which should inherit @old's allocation
 *
 * This is useful for when drivers embed the drm_mm_node structure and hence
 * can't move allocations by reassigning pointers. It's a combination of remove
 * and insert with the guarantee that the allocation start will match.
662 663 664
 */
void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
{
665 666
	DRM_MM_BUG_ON(!old->allocated);

667
	list_replace(&old->node_list, &new->node_list);
D
Daniel Vetter 已提交
668
	list_replace(&old->hole_stack, &new->hole_stack);
669
	rb_replace_node(&old->rb, &new->rb, &old->mm->interval_tree);
670 671 672 673
	new->hole_follows = old->hole_follows;
	new->mm = old->mm;
	new->start = old->start;
	new->size = old->size;
674
	new->color = old->color;
675
	new->__subtree_last = old->__subtree_last;
676 677 678 679 680 681

	old->allocated = 0;
	new->allocated = 1;
}
EXPORT_SYMBOL(drm_mm_replace_node);

682 683 684 685 686 687 688 689
/**
 * DOC: lru scan roaster
 *
 * Very often GPUs need to have continuous allocations for a given object. When
 * evicting objects to make space for a new one it is therefore not most
 * efficient when we simply start to select all objects from the tail of an LRU
 * until there's a suitable hole: Especially for big objects or nodes that
 * otherwise have special allocation constraints there's a good chance we evict
690
 * lots of (smaller) objects unnecessarily.
691 692 693
 *
 * The DRM range allocator supports this use-case through the scanning
 * interfaces. First a scan operation needs to be initialized with
694
 * drm_mm_scan_init() or drm_mm_scan_init_with_range(). The driver adds
695 696
 * objects to the roaster (probably by walking an LRU list, but this can be
 * freely implemented) until a suitable hole is found or there's no further
697
 * evictable object.
698
 *
699
 * The driver must walk through all objects again in exactly the reverse
700 701 702 703 704 705 706 707 708 709
 * order to restore the allocator state. Note that while the allocator is used
 * in the scan mode no other operation is allowed.
 *
 * Finally the driver evicts all objects selected in the scan. Adding and
 * removing an object is O(1), and since freeing a node is also O(1) the overall
 * complexity is O(scanned_objects). So like the free stack which needs to be
 * walked before a scan operation even begins this is linear in the number of
 * objects. It doesn't seem to hurt badly.
 */

710
/**
711 712
 * drm_mm_scan_init_with_range - initialize range-restricted lru scanning
 * @scan: scan state
713 714 715 716 717 718
 * @mm: drm_mm to scan
 * @size: size of the allocation
 * @alignment: alignment of the allocation
 * @color: opaque tag value to use for the allocation
 * @start: start of the allowed range for the allocation
 * @end: end of the allowed range for the allocation
719
 * @flags: flags to specify how the allocation will be performed afterwards
720 721
 *
 * This simply sets up the scanning routines with the parameters for the desired
722
 * hole.
723
 *
724 725
 * Warning:
 * As long as the scan list is non-empty, no other operations than
726 727
 * adding/removing nodes to/from the scan list are allowed.
 */
728 729
void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
				 struct drm_mm *mm,
730
				 u64 size,
731
				 u64 alignment,
732
				 unsigned long color,
733
				 u64 start,
734 735
				 u64 end,
				 unsigned int flags)
736
{
737 738
	DRM_MM_BUG_ON(start >= end);
	DRM_MM_BUG_ON(!size || size > end - start);
739 740 741 742
	DRM_MM_BUG_ON(mm->scan_active);

	scan->mm = mm;

743 744 745
	if (alignment <= 1)
		alignment = 0;

746 747
	scan->color = color;
	scan->alignment = alignment;
748
	scan->remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
749
	scan->size = size;
750
	scan->flags = flags;
751 752 753 754

	DRM_MM_BUG_ON(end <= start);
	scan->range_start = start;
	scan->range_end = end;
755

756 757
	scan->hit_start = U64_MAX;
	scan->hit_end = 0;
758
}
759
EXPORT_SYMBOL(drm_mm_scan_init_with_range);
760

761
/**
762 763 764
 * drm_mm_scan_add_block - add a node to the scan list
 * @node: drm_mm_node to add
 *
765 766 767
 * Add a node to the scan list that might be freed to make space for the desired
 * hole.
 *
768 769
 * Returns:
 * True if a hole has been found, false otherwise.
770
 */
771 772
bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
			   struct drm_mm_node *node)
773
{
774
	struct drm_mm *mm = scan->mm;
775
	struct drm_mm_node *hole;
776
	u64 hole_start, hole_end;
777
	u64 col_start, col_end;
778
	u64 adj_start, adj_end;
779

780 781
	DRM_MM_BUG_ON(node->mm != mm);
	DRM_MM_BUG_ON(!node->allocated);
782
	DRM_MM_BUG_ON(node->scanned_block);
783
	node->scanned_block = true;
784
	mm->scan_active++;
785

786 787 788 789 790
	/* Remove this block from the node_list so that we enlarge the hole
	 * (distance between the end of our previous node and the start of
	 * or next), without poisoning the link so that we can restore it
	 * later in drm_mm_scan_remove_block().
	 */
791
	hole = list_prev_entry(node, node_list);
792 793
	DRM_MM_BUG_ON(list_next_entry(hole, node_list) != node);
	__list_del_entry(&node->node_list);
794

795 796
	hole_start = __drm_mm_hole_node_start(hole);
	hole_end = __drm_mm_hole_node_end(hole);
797

798 799
	col_start = hole_start;
	col_end = hole_end;
800
	if (mm->color_adjust)
801 802 803 804
		mm->color_adjust(hole, scan->color, &col_start, &col_end);

	adj_start = max(col_start, scan->range_start);
	adj_end = min(col_end, scan->range_end);
805 806 807 808 809 810 811 812 813
	if (adj_end <= adj_start || adj_end - adj_start < scan->size)
		return false;

	if (scan->flags == DRM_MM_CREATE_TOP)
		adj_start = adj_end - scan->size;

	if (scan->alignment) {
		u64 rem;

814 815 816 817
		if (likely(scan->remainder_mask))
			rem = adj_start & scan->remainder_mask;
		else
			div64_u64_rem(adj_start, scan->alignment, &rem);
818 819 820 821 822 823 824 825 826 827 828 829 830
		if (rem) {
			adj_start -= rem;
			if (scan->flags != DRM_MM_CREATE_TOP)
				adj_start += scan->alignment;
			if (adj_start < max(col_start, scan->range_start) ||
			    min(col_end, scan->range_end) - adj_start < scan->size)
				return false;

			if (adj_end <= adj_start ||
			    adj_end - adj_start < scan->size)
				return false;
		}
	}
831

832 833 834 835 836 837 838 839 840 841 842
	if (mm->color_adjust) {
		/* If allocations need adjusting due to neighbouring colours,
		 * we do not have enough information to decide if we need
		 * to evict nodes on either side of [adj_start, adj_end].
		 * What almost works is
		 * hit_start = adj_start + (hole_start - col_start);
		 * hit_end = adj_start + scan->size + (hole_end - col_end);
		 * but because the decision is only made on the final hole,
		 * we may underestimate the required adjustments for an
		 * interior allocation.
		 */
843 844
		scan->hit_start = hole_start;
		scan->hit_end = hole_end;
845 846 847
	} else {
		scan->hit_start = adj_start;
		scan->hit_end = adj_start + scan->size;
848 849
	}

850 851 852 853 854
	DRM_MM_BUG_ON(scan->hit_start >= scan->hit_end);
	DRM_MM_BUG_ON(scan->hit_start < hole_start);
	DRM_MM_BUG_ON(scan->hit_end > hole_end);

	return true;
855 856 857 858
}
EXPORT_SYMBOL(drm_mm_scan_add_block);

/**
859 860
 * drm_mm_scan_remove_block - remove a node from the scan list
 * @node: drm_mm_node to remove
861
 *
862 863 864 865
 * Nodes _must_ be removed in exactly the reverse order from the scan list as
 * they have been added (e.g. using list_add as they are added and then
 * list_for_each over that eviction list to remove), otherwise the internal
 * state of the memory manager will be corrupted.
866 867
 *
 * When the scan list is empty, the selected memory nodes can be freed. An
868 869
 * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then
 * return the just freed block (because its at the top of the free_stack list).
870
 *
871 872 873
 * Returns:
 * True if this block should be evicted, false otherwise. Will always
 * return false when no hole has been found.
874
 */
875 876
bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
			      struct drm_mm_node *node)
877
{
878
	struct drm_mm_node *prev_node;
879

880
	DRM_MM_BUG_ON(node->mm != scan->mm);
881
	DRM_MM_BUG_ON(!node->scanned_block);
882
	node->scanned_block = false;
883

884 885 886
	DRM_MM_BUG_ON(!node->mm->scan_active);
	node->mm->scan_active--;

887 888 889 890 891 892 893 894
	/* During drm_mm_scan_add_block() we decoupled this node leaving
	 * its pointers intact. Now that the caller is walking back along
	 * the eviction list we can restore this block into its rightful
	 * place on the full node_list. To confirm that the caller is walking
	 * backwards correctly we check that prev_node->next == node->next,
	 * i.e. both believe the same node should be on the other side of the
	 * hole.
	 */
895
	prev_node = list_prev_entry(node, node_list);
896 897
	DRM_MM_BUG_ON(list_next_entry(prev_node, node_list) !=
		      list_next_entry(node, node_list));
898
	list_add(&node->node_list, &prev_node->node_list);
899

900
	return (node->start + node->size > scan->hit_start &&
901
		node->start < scan->hit_end);
902 903 904
}
EXPORT_SYMBOL(drm_mm_scan_remove_block);

905 906 907 908 909 910 911 912
/**
 * drm_mm_init - initialize a drm-mm allocator
 * @mm: the drm_mm structure to initialize
 * @start: start of the range managed by @mm
 * @size: end of the range managed by @mm
 *
 * Note that @mm must be cleared to 0 before calling this function.
 */
C
Chris Wilson 已提交
913
void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
914
{
915 916
	DRM_MM_BUG_ON(start + size <= start);

917
	INIT_LIST_HEAD(&mm->hole_stack);
918
	mm->scan_active = 0;
919

920 921
	/* Clever trick to avoid a special case in the free hole tracking. */
	INIT_LIST_HEAD(&mm->head_node.node_list);
922
	mm->head_node.allocated = 0;
923 924 925 926 927 928
	mm->head_node.hole_follows = 1;
	mm->head_node.mm = mm;
	mm->head_node.start = start + size;
	mm->head_node.size = start - mm->head_node.start;
	list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);

929 930
	mm->interval_tree = RB_ROOT;

931
	mm->color_adjust = NULL;
932
}
933
EXPORT_SYMBOL(drm_mm_init);
934

935 936 937 938 939 940 941
/**
 * drm_mm_takedown - clean up a drm_mm allocator
 * @mm: drm_mm allocator to clean up
 *
 * Note that it is a bug to call this function on an allocator which is not
 * clean.
 */
942
void drm_mm_takedown(struct drm_mm *mm)
943
{
C
Chris Wilson 已提交
944
	if (WARN(!drm_mm_clean(mm),
945 946
		 "Memory manager not clean during takedown.\n"))
		show_leaks(mm);
947
}
D
Dave Airlie 已提交
948
EXPORT_SYMBOL(drm_mm_takedown);
949

C
Chris Wilson 已提交
950 951
static u64 drm_mm_debug_hole(const struct drm_mm_node *entry,
			     const char *prefix)
952
{
953
	u64 hole_start, hole_end, hole_size;
954

D
Daniel Vetter 已提交
955 956 957 958
	if (entry->hole_follows) {
		hole_start = drm_mm_hole_node_start(entry);
		hole_end = drm_mm_hole_node_end(entry);
		hole_size = hole_end - hole_start;
959 960
		pr_debug("%s %#llx-%#llx: %llu: free\n", prefix, hole_start,
			 hole_end, hole_size);
D
Daniel Vetter 已提交
961 962 963 964 965 966
		return hole_size;
	}

	return 0;
}

967 968 969 970 971
/**
 * drm_mm_debug_table - dump allocator state to dmesg
 * @mm: drm_mm allocator to dump
 * @prefix: prefix to use for dumping to dmesg
 */
C
Chris Wilson 已提交
972
void drm_mm_debug_table(const struct drm_mm *mm, const char *prefix)
D
Daniel Vetter 已提交
973
{
C
Chris Wilson 已提交
974
	const struct drm_mm_node *entry;
975
	u64 total_used = 0, total_free = 0, total = 0;
D
Daniel Vetter 已提交
976 977

	total_free += drm_mm_debug_hole(&mm->head_node, prefix);
978 979

	drm_mm_for_each_node(entry, mm) {
980 981
		pr_debug("%s %#llx-%#llx: %llu: used\n", prefix, entry->start,
			 entry->start + entry->size, entry->size);
982
		total_used += entry->size;
D
Daniel Vetter 已提交
983
		total_free += drm_mm_debug_hole(entry, prefix);
984
	}
985 986
	total = total_free + total_used;

987 988
	pr_debug("%s total: %llu, used %llu free %llu\n", prefix, total,
		 total_used, total_free);
989 990 991
}
EXPORT_SYMBOL(drm_mm_debug_table);

992
#if defined(CONFIG_DEBUG_FS)
C
Chris Wilson 已提交
993
static u64 drm_mm_dump_hole(struct seq_file *m, const struct drm_mm_node *entry)
994
{
995
	u64 hole_start, hole_end, hole_size;
996

D
Daniel Vetter 已提交
997 998 999 1000
	if (entry->hole_follows) {
		hole_start = drm_mm_hole_node_start(entry);
		hole_end = drm_mm_hole_node_end(entry);
		hole_size = hole_end - hole_start;
1001
		seq_printf(m, "%#018llx-%#018llx: %llu: free\n", hole_start,
1002
			   hole_end, hole_size);
D
Daniel Vetter 已提交
1003 1004 1005 1006 1007 1008
		return hole_size;
	}

	return 0;
}

1009 1010 1011 1012 1013
/**
 * drm_mm_dump_table - dump allocator state to a seq_file
 * @m: seq_file to dump to
 * @mm: drm_mm allocator to dump
 */
C
Chris Wilson 已提交
1014
int drm_mm_dump_table(struct seq_file *m, const struct drm_mm *mm)
D
Daniel Vetter 已提交
1015
{
C
Chris Wilson 已提交
1016
	const struct drm_mm_node *entry;
1017
	u64 total_used = 0, total_free = 0, total = 0;
D
Daniel Vetter 已提交
1018 1019

	total_free += drm_mm_dump_hole(m, &mm->head_node);
1020 1021

	drm_mm_for_each_node(entry, mm) {
1022
		seq_printf(m, "%#018llx-%#018llx: %llu: used\n", entry->start,
1023
			   entry->start + entry->size, entry->size);
1024
		total_used += entry->size;
D
Daniel Vetter 已提交
1025
		total_free += drm_mm_dump_hole(m, entry);
1026
	}
1027 1028
	total = total_free + total_used;

1029 1030
	seq_printf(m, "total: %llu, used %llu free %llu\n", total,
		   total_used, total_free);
1031 1032 1033 1034
	return 0;
}
EXPORT_SYMBOL(drm_mm_dump_table);
#endif