drm_mm.c 28.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40
/**************************************************************************
 *
 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 *
 **************************************************************************/

/*
 * Generic simple memory manager implementation. Intended to be used as a base
 * class implementation for more advanced memory managers.
 *
 * Note that the algorithm used is quite simple and there might be substantial
 * performance gains if a smarter free list is implemented. Currently it is just an
 * unordered stack of free regions. This could easily be improved if an RB-tree
 * is used instead. At least if we expect heavy fragmentation.
 *
 * Aligned allocations can also see improvement.
 *
 * Authors:
41
 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
42 43
 */

44 45
#include <drm/drmP.h>
#include <drm/drm_mm.h>
46
#include <linux/slab.h>
47
#include <linux/seq_file.h>
48
#include <linux/export.h>
49
#include <linux/interval_tree_generic.h>
50

51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
/**
 * DOC: Overview
 *
 * drm_mm provides a simple range allocator. The drivers are free to use the
 * resource allocator from the linux core if it suits them, the upside of drm_mm
 * is that it's in the DRM core. Which means that it's easier to extend for
 * some of the crazier special purpose needs of gpus.
 *
 * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node.
 * Drivers are free to embed either of them into their own suitable
 * datastructures. drm_mm itself will not do any allocations of its own, so if
 * drivers choose not to embed nodes they need to still allocate them
 * themselves.
 *
 * The range allocator also supports reservation of preallocated blocks. This is
 * useful for taking over initial mode setting configurations from the firmware,
 * where an object needs to be created which exactly matches the firmware's
 * scanout target. As long as the range is still free it can be inserted anytime
 * after the allocator is initialized, which helps with avoiding looped
 * depencies in the driver load sequence.
 *
 * drm_mm maintains a stack of most recently freed holes, which of all
 * simplistic datastructures seems to be a fairly decent approach to clustering
 * allocations and avoiding too much fragmentation. This means free space
 * searches are O(num_holes). Given that all the fancy features drm_mm supports
 * something better would be fairly complex and since gfx thrashing is a fairly
 * steep cliff not a real concern. Removing a node again is O(1).
 *
 * drm_mm supports a few features: Alignment and range restrictions can be
 * supplied. Further more every &drm_mm_node has a color value (which is just an
 * opaqua unsigned long) which in conjunction with a driver callback can be used
 * to implement sophisticated placement restrictions. The i915 DRM driver uses
 * this to implement guard pages between incompatible caching domains in the
 * graphics TT.
 *
86 87 88 89
 * Two behaviors are supported for searching and allocating: bottom-up and top-down.
 * The default is bottom-up. Top-down allocation can be used if the memory area
 * has different restrictions, or just to reduce fragmentation.
 *
90 91 92 93
 * Finally iteration helpers to walk all nodes and all holes are provided as are
 * some basic allocator dumpers for debugging.
 */

D
David Herrmann 已提交
94
static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
95
						u64 size,
D
David Herrmann 已提交
96 97 98 99
						unsigned alignment,
						unsigned long color,
						enum drm_mm_search_flags flags);
static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
100
						u64 size,
D
David Herrmann 已提交
101 102
						unsigned alignment,
						unsigned long color,
103 104
						u64 start,
						u64 end,
D
David Herrmann 已提交
105
						enum drm_mm_search_flags flags);
106

107
#ifdef CONFIG_DRM_DEBUG_MM
108 109
#include <linux/stackdepot.h>

110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140
#define STACKDEPTH 32
#define BUFSZ 4096

static noinline void save_stack(struct drm_mm_node *node)
{
	unsigned long entries[STACKDEPTH];
	struct stack_trace trace = {
		.entries = entries,
		.max_entries = STACKDEPTH,
		.skip = 1
	};

	save_stack_trace(&trace);
	if (trace.nr_entries != 0 &&
	    trace.entries[trace.nr_entries-1] == ULONG_MAX)
		trace.nr_entries--;

	/* May be called under spinlock, so avoid sleeping */
	node->stack = depot_save_stack(&trace, GFP_NOWAIT);
}

static void show_leaks(struct drm_mm *mm)
{
	struct drm_mm_node *node;
	unsigned long entries[STACKDEPTH];
	char *buf;

	buf = kmalloc(BUFSZ, GFP_KERNEL);
	if (!buf)
		return;

141
	list_for_each_entry(node, __drm_mm_nodes(mm), node_list) {
142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168
		struct stack_trace trace = {
			.entries = entries,
			.max_entries = STACKDEPTH
		};

		if (!node->stack) {
			DRM_ERROR("node [%08llx + %08llx]: unknown owner\n",
				  node->start, node->size);
			continue;
		}

		depot_fetch_stack(node->stack, &trace);
		snprint_stack_trace(buf, BUFSZ, &trace, 0);
		DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s",
			  node->start, node->size, buf);
	}

	kfree(buf);
}

#undef STACKDEPTH
#undef BUFSZ
#else
static void save_stack(struct drm_mm_node *node) { }
static void show_leaks(struct drm_mm *mm) { }
#endif

169 170 171 172 173 174 175 176
#define START(node) ((node)->start)
#define LAST(node)  ((node)->start + (node)->size - 1)

INTERVAL_TREE_DEFINE(struct drm_mm_node, rb,
		     u64, __subtree_last,
		     START, LAST, static inline, drm_mm_interval_tree)

struct drm_mm_node *
C
Chris Wilson 已提交
177
__drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last)
178
{
C
Chris Wilson 已提交
179
	return drm_mm_interval_tree_iter_first((struct rb_root *)&mm->interval_tree,
180 181
					       start, last);
}
182
EXPORT_SYMBOL(__drm_mm_interval_first);
183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227

static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
					  struct drm_mm_node *node)
{
	struct drm_mm *mm = hole_node->mm;
	struct rb_node **link, *rb;
	struct drm_mm_node *parent;

	node->__subtree_last = LAST(node);

	if (hole_node->allocated) {
		rb = &hole_node->rb;
		while (rb) {
			parent = rb_entry(rb, struct drm_mm_node, rb);
			if (parent->__subtree_last >= node->__subtree_last)
				break;

			parent->__subtree_last = node->__subtree_last;
			rb = rb_parent(rb);
		}

		rb = &hole_node->rb;
		link = &hole_node->rb.rb_right;
	} else {
		rb = NULL;
		link = &mm->interval_tree.rb_node;
	}

	while (*link) {
		rb = *link;
		parent = rb_entry(rb, struct drm_mm_node, rb);
		if (parent->__subtree_last < node->__subtree_last)
			parent->__subtree_last = node->__subtree_last;
		if (node->start < parent->start)
			link = &parent->rb.rb_left;
		else
			link = &parent->rb.rb_right;
	}

	rb_link_node(&node->rb, rb, link);
	rb_insert_augmented(&node->rb,
			    &mm->interval_tree,
			    &drm_mm_interval_tree_augment);
}

228 229
static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
				 struct drm_mm_node *node,
230
				 u64 size, unsigned alignment,
231 232
				 unsigned long color,
				 enum drm_mm_allocator_flags flags)
233
{
234
	struct drm_mm *mm = hole_node->mm;
235 236 237 238
	u64 hole_start = drm_mm_hole_node_start(hole_node);
	u64 hole_end = drm_mm_hole_node_end(hole_node);
	u64 adj_start = hole_start;
	u64 adj_end = hole_end;
239

240
	BUG_ON(node->allocated);
241

242 243
	if (mm->color_adjust)
		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
244

245 246 247
	if (flags & DRM_MM_CREATE_TOP)
		adj_start = adj_end - size;

248
	if (alignment) {
249 250 251 252 253
		u64 tmp = adj_start;
		unsigned rem;

		rem = do_div(tmp, alignment);
		if (rem) {
254
			if (flags & DRM_MM_CREATE_TOP)
255
				adj_start -= rem;
256
			else
257
				adj_start += alignment - rem;
258
		}
259 260
	}

261 262 263
	BUG_ON(adj_start < hole_start);
	BUG_ON(adj_end > hole_end);

264
	if (adj_start == hole_start) {
265
		hole_node->hole_follows = 0;
266 267
		list_del(&hole_node->hole_stack);
	}
268

269
	node->start = adj_start;
270 271
	node->size = size;
	node->mm = mm;
272
	node->color = color;
273
	node->allocated = 1;
274

275 276
	list_add(&node->node_list, &hole_node->node_list);

277 278
	drm_mm_interval_tree_add_node(hole_node, node);

279
	BUG_ON(node->start + node->size > adj_end);
280

281
	node->hole_follows = 0;
282
	if (__drm_mm_hole_node_start(node) < hole_end) {
283 284
		list_add(&node->hole_stack, &mm->hole_stack);
		node->hole_follows = 1;
285
	}
286 287

	save_stack(node);
288 289
}

290 291 292 293 294 295 296 297 298 299 300 301 302 303
/**
 * drm_mm_reserve_node - insert an pre-initialized node
 * @mm: drm_mm allocator to insert @node into
 * @node: drm_mm_node to insert
 *
 * This functions inserts an already set-up drm_mm_node into the allocator,
 * meaning that start, size and color must be set by the caller. This is useful
 * to initialize the allocator with preallocated objects which must be set-up
 * before the range allocator can be set-up, e.g. when taking over a firmware
 * framebuffer.
 *
 * Returns:
 * 0 on success, -ENOSPC if there's no hole where @node is.
 */
304
int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
305
{
306
	u64 end = node->start + node->size;
307
	struct drm_mm_node *hole;
308
	u64 hole_start, hole_end;
309
	u64 adj_start, adj_end;
310

311 312 313
	if (WARN_ON(node->size == 0))
		return -EINVAL;

314 315
	end = node->start + node->size;

316
	/* Find the relevant hole to add our node to */
317 318 319 320 321 322
	hole = drm_mm_interval_tree_iter_first(&mm->interval_tree,
					       node->start, ~(u64)0);
	if (hole) {
		if (hole->start < end)
			return -ENOSPC;
	} else {
323
		hole = list_entry(__drm_mm_nodes(mm), typeof(*hole), node_list);
324
	}
325

326 327 328
	hole = list_last_entry(&hole->node_list, typeof(*hole), node_list);
	if (!hole->hole_follows)
		return -ENOSPC;
329

330 331 332 333 334 335 336
	adj_start = hole_start = __drm_mm_hole_node_start(hole);
	adj_end = hole_end = __drm_mm_hole_node_end(hole);

	if (mm->color_adjust)
		mm->color_adjust(hole, node->color, &adj_start, &adj_end);

	if (adj_start > node->start || adj_end < end)
337
		return -ENOSPC;
338

339 340
	node->mm = mm;
	node->allocated = 1;
341

342
	list_add(&node->node_list, &hole->node_list);
343

344 345 346 347
	drm_mm_interval_tree_add_node(hole, node);

	if (node->start == hole_start) {
		hole->hole_follows = 0;
348
		list_del(&hole->hole_stack);
349 350 351 352 353 354
	}

	node->hole_follows = 0;
	if (end != hole_end) {
		list_add(&node->hole_stack, &mm->hole_stack);
		node->hole_follows = 1;
355 356
	}

357 358
	save_stack(node);

359
	return 0;
360
}
361
EXPORT_SYMBOL(drm_mm_reserve_node);
362

363
/**
364 365 366 367 368 369
 * drm_mm_insert_node_generic - search for space and insert @node
 * @mm: drm_mm to allocate from
 * @node: preallocate node to insert
 * @size: size of the allocation
 * @alignment: alignment of the allocation
 * @color: opaque tag value to use for this node
370 371
 * @sflags: flags to fine-tune the allocation search
 * @aflags: flags to fine-tune the allocation behavior
372 373 374 375 376
 *
 * The preallocated node must be cleared to 0.
 *
 * Returns:
 * 0 on success, -ENOSPC if there's no suitable hole.
377
 */
378
int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
379
			       u64 size, unsigned alignment,
380
			       unsigned long color,
381 382
			       enum drm_mm_search_flags sflags,
			       enum drm_mm_allocator_flags aflags)
383 384 385
{
	struct drm_mm_node *hole_node;

386 387 388
	if (WARN_ON(size == 0))
		return -EINVAL;

389
	hole_node = drm_mm_search_free_generic(mm, size, alignment,
390
					       color, sflags);
391 392 393
	if (!hole_node)
		return -ENOSPC;

394
	drm_mm_insert_helper(hole_node, node, size, alignment, color, aflags);
395 396
	return 0;
}
397 398
EXPORT_SYMBOL(drm_mm_insert_node_generic);

399 400
static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
				       struct drm_mm_node *node,
401
				       u64 size, unsigned alignment,
402
				       unsigned long color,
403
				       u64 start, u64 end,
404
				       enum drm_mm_allocator_flags flags)
405
{
406
	struct drm_mm *mm = hole_node->mm;
407 408 409 410
	u64 hole_start = drm_mm_hole_node_start(hole_node);
	u64 hole_end = drm_mm_hole_node_end(hole_node);
	u64 adj_start = hole_start;
	u64 adj_end = hole_end;
411

412 413
	BUG_ON(!hole_node->hole_follows || node->allocated);

414 415
	if (adj_start < start)
		adj_start = start;
416 417 418 419 420
	if (adj_end > end)
		adj_end = end;

	if (mm->color_adjust)
		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
421

422 423 424
	if (flags & DRM_MM_CREATE_TOP)
		adj_start = adj_end - size;

425
	if (alignment) {
426 427 428 429 430
		u64 tmp = adj_start;
		unsigned rem;

		rem = do_div(tmp, alignment);
		if (rem) {
431
			if (flags & DRM_MM_CREATE_TOP)
432
				adj_start -= rem;
433
			else
434
				adj_start += alignment - rem;
435
		}
436
	}
437

438
	if (adj_start == hole_start) {
439
		hole_node->hole_follows = 0;
440
		list_del(&hole_node->hole_stack);
441 442
	}

443
	node->start = adj_start;
444 445
	node->size = size;
	node->mm = mm;
446
	node->color = color;
447
	node->allocated = 1;
448 449 450

	list_add(&node->node_list, &hole_node->node_list);

451 452
	drm_mm_interval_tree_add_node(hole_node, node);

453 454
	BUG_ON(node->start < start);
	BUG_ON(node->start < adj_start);
455
	BUG_ON(node->start + node->size > adj_end);
456 457
	BUG_ON(node->start + node->size > end);

458
	node->hole_follows = 0;
459
	if (__drm_mm_hole_node_start(node) < hole_end) {
460 461
		list_add(&node->hole_stack, &mm->hole_stack);
		node->hole_follows = 1;
462
	}
463 464

	save_stack(node);
465 466
}

467
/**
468 469 470 471 472 473 474 475
 * drm_mm_insert_node_in_range_generic - ranged search for space and insert @node
 * @mm: drm_mm to allocate from
 * @node: preallocate node to insert
 * @size: size of the allocation
 * @alignment: alignment of the allocation
 * @color: opaque tag value to use for this node
 * @start: start of the allowed range for this node
 * @end: end of the allowed range for this node
476 477
 * @sflags: flags to fine-tune the allocation search
 * @aflags: flags to fine-tune the allocation behavior
478 479 480 481 482
 *
 * The preallocated node must be cleared to 0.
 *
 * Returns:
 * 0 on success, -ENOSPC if there's no suitable hole.
483
 */
484
int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
485
					u64 size, unsigned alignment,
486
					unsigned long color,
487
					u64 start, u64 end,
488 489
					enum drm_mm_search_flags sflags,
					enum drm_mm_allocator_flags aflags)
490
{
491 492
	struct drm_mm_node *hole_node;

493 494 495
	if (WARN_ON(size == 0))
		return -EINVAL;

496 497
	hole_node = drm_mm_search_free_in_range_generic(mm,
							size, alignment, color,
498
							start, end, sflags);
499 500 501
	if (!hole_node)
		return -ENOSPC;

502 503
	drm_mm_insert_helper_range(hole_node, node,
				   size, alignment, color,
504
				   start, end, aflags);
505 506
	return 0;
}
507 508
EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);

509
/**
510 511 512 513 514 515
 * drm_mm_remove_node - Remove a memory node from the allocator.
 * @node: drm_mm_node to remove
 *
 * This just removes a node from its drm_mm allocator. The node does not need to
 * be cleared again before it can be re-inserted into this or any other drm_mm
 * allocator. It is a bug to call this function on a un-allocated node.
516 517 518
 */
void drm_mm_remove_node(struct drm_mm_node *node)
{
519 520
	struct drm_mm *mm = node->mm;
	struct drm_mm_node *prev_node;
521

522 523 524
	if (WARN_ON(!node->allocated))
		return;

525 526
	BUG_ON(node->scanned_block || node->scanned_prev_free
				   || node->scanned_next_free);
527

528 529
	prev_node =
	    list_entry(node->node_list.prev, struct drm_mm_node, node_list);
530

531
	if (node->hole_follows) {
532 533
		BUG_ON(__drm_mm_hole_node_start(node) ==
		       __drm_mm_hole_node_end(node));
534 535
		list_del(&node->hole_stack);
	} else
536 537 538
		BUG_ON(__drm_mm_hole_node_start(node) !=
		       __drm_mm_hole_node_end(node));

539

540 541 542 543 544 545
	if (!prev_node->hole_follows) {
		prev_node->hole_follows = 1;
		list_add(&prev_node->hole_stack, &mm->hole_stack);
	} else
		list_move(&prev_node->hole_stack, &mm->hole_stack);

546
	drm_mm_interval_tree_remove(node, &mm->interval_tree);
547
	list_del(&node->node_list);
548 549 550 551
	node->allocated = 0;
}
EXPORT_SYMBOL(drm_mm_remove_node);

552
static int check_free_hole(u64 start, u64 end, u64 size, unsigned alignment)
553
{
554
	if (end - start < size)
555 556 557
		return 0;

	if (alignment) {
558 559 560 561
		u64 tmp = start;
		unsigned rem;

		rem = do_div(tmp, alignment);
562
		if (rem)
563
			start += alignment - rem;
564 565
	}

566
	return end >= start + size;
567 568
}

D
David Herrmann 已提交
569
static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
570
						      u64 size,
D
David Herrmann 已提交
571 572 573
						      unsigned alignment,
						      unsigned long color,
						      enum drm_mm_search_flags flags)
574
{
D
Dave Airlie 已提交
575 576
	struct drm_mm_node *entry;
	struct drm_mm_node *best;
577 578 579
	u64 adj_start;
	u64 adj_end;
	u64 best_size;
580

581 582
	BUG_ON(mm->scanned_blocks);

583 584 585
	best = NULL;
	best_size = ~0UL;

586 587
	__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
			       flags & DRM_MM_SEARCH_BELOW) {
588
		u64 hole_size = adj_end - adj_start;
589

590 591 592 593 594 595 596
		if (mm->color_adjust) {
			mm->color_adjust(entry, color, &adj_start, &adj_end);
			if (adj_end <= adj_start)
				continue;
		}

		if (!check_free_hole(adj_start, adj_end, size, alignment))
597 598
			continue;

599
		if (!(flags & DRM_MM_SEARCH_BEST))
600
			return entry;
601

602
		if (hole_size < best_size) {
603
			best = entry;
604
			best_size = hole_size;
605 606 607 608 609
		}
	}

	return best;
}
610

D
David Herrmann 已提交
611
static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
612
							u64 size,
613 614
							unsigned alignment,
							unsigned long color,
615 616
							u64 start,
							u64 end,
617
							enum drm_mm_search_flags flags)
618 619 620
{
	struct drm_mm_node *entry;
	struct drm_mm_node *best;
621 622 623
	u64 adj_start;
	u64 adj_end;
	u64 best_size;
624

625 626
	BUG_ON(mm->scanned_blocks);

627 628 629
	best = NULL;
	best_size = ~0UL;

630 631
	__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
			       flags & DRM_MM_SEARCH_BELOW) {
632
		u64 hole_size = adj_end - adj_start;
633

634 635 636 637
		if (adj_start < start)
			adj_start = start;
		if (adj_end > end)
			adj_end = end;
638 639 640 641 642 643 644

		if (mm->color_adjust) {
			mm->color_adjust(entry, color, &adj_start, &adj_end);
			if (adj_end <= adj_start)
				continue;
		}

645
		if (!check_free_hole(adj_start, adj_end, size, alignment))
646 647
			continue;

648
		if (!(flags & DRM_MM_SEARCH_BEST))
649
			return entry;
650

651
		if (hole_size < best_size) {
652
			best = entry;
653
			best_size = hole_size;
654 655 656 657 658 659
		}
	}

	return best;
}

660
/**
661 662 663 664 665 666 667
 * drm_mm_replace_node - move an allocation from @old to @new
 * @old: drm_mm_node to remove from the allocator
 * @new: drm_mm_node which should inherit @old's allocation
 *
 * This is useful for when drivers embed the drm_mm_node structure and hence
 * can't move allocations by reassigning pointers. It's a combination of remove
 * and insert with the guarantee that the allocation start will match.
668 669 670 671
 */
void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
{
	list_replace(&old->node_list, &new->node_list);
D
Daniel Vetter 已提交
672
	list_replace(&old->hole_stack, &new->hole_stack);
673
	rb_replace_node(&old->rb, &new->rb, &old->mm->interval_tree);
674 675 676 677
	new->hole_follows = old->hole_follows;
	new->mm = old->mm;
	new->start = old->start;
	new->size = old->size;
678
	new->color = old->color;
679
	new->__subtree_last = old->__subtree_last;
680 681 682 683 684 685

	old->allocated = 0;
	new->allocated = 1;
}
EXPORT_SYMBOL(drm_mm_replace_node);

686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713
/**
 * DOC: lru scan roaster
 *
 * Very often GPUs need to have continuous allocations for a given object. When
 * evicting objects to make space for a new one it is therefore not most
 * efficient when we simply start to select all objects from the tail of an LRU
 * until there's a suitable hole: Especially for big objects or nodes that
 * otherwise have special allocation constraints there's a good chance we evict
 * lots of (smaller) objects unecessarily.
 *
 * The DRM range allocator supports this use-case through the scanning
 * interfaces. First a scan operation needs to be initialized with
 * drm_mm_init_scan() or drm_mm_init_scan_with_range(). The the driver adds
 * objects to the roaster (probably by walking an LRU list, but this can be
 * freely implemented) until a suitable hole is found or there's no further
 * evitable object.
 *
 * The the driver must walk through all objects again in exactly the reverse
 * order to restore the allocator state. Note that while the allocator is used
 * in the scan mode no other operation is allowed.
 *
 * Finally the driver evicts all objects selected in the scan. Adding and
 * removing an object is O(1), and since freeing a node is also O(1) the overall
 * complexity is O(scanned_objects). So like the free stack which needs to be
 * walked before a scan operation even begins this is linear in the number of
 * objects. It doesn't seem to hurt badly.
 */

714
/**
715 716 717 718 719
 * drm_mm_init_scan - initialize lru scanning
 * @mm: drm_mm to scan
 * @size: size of the allocation
 * @alignment: alignment of the allocation
 * @color: opaque tag value to use for the allocation
720 721
 *
 * This simply sets up the scanning routines with the parameters for the desired
722 723
 * hole. Note that there's no need to specify allocation flags, since they only
 * change the place a node is allocated from within a suitable hole.
724
 *
725 726
 * Warning:
 * As long as the scan list is non-empty, no other operations than
727 728
 * adding/removing nodes to/from the scan list are allowed.
 */
729
void drm_mm_init_scan(struct drm_mm *mm,
730
		      u64 size,
731 732
		      unsigned alignment,
		      unsigned long color)
733
{
734
	mm->scan_color = color;
735 736 737 738
	mm->scan_alignment = alignment;
	mm->scan_size = size;
	mm->scanned_blocks = 0;
	mm->scan_hit_start = 0;
739
	mm->scan_hit_end = 0;
740
	mm->scan_check_range = 0;
741
	mm->prev_scanned_node = NULL;
742 743 744
}
EXPORT_SYMBOL(drm_mm_init_scan);

745
/**
746 747 748 749 750 751 752
 * drm_mm_init_scan - initialize range-restricted lru scanning
 * @mm: drm_mm to scan
 * @size: size of the allocation
 * @alignment: alignment of the allocation
 * @color: opaque tag value to use for the allocation
 * @start: start of the allowed range for the allocation
 * @end: end of the allowed range for the allocation
753 754
 *
 * This simply sets up the scanning routines with the parameters for the desired
755 756
 * hole. Note that there's no need to specify allocation flags, since they only
 * change the place a node is allocated from within a suitable hole.
757
 *
758 759
 * Warning:
 * As long as the scan list is non-empty, no other operations than
760 761
 * adding/removing nodes to/from the scan list are allowed.
 */
762
void drm_mm_init_scan_with_range(struct drm_mm *mm,
763
				 u64 size,
764
				 unsigned alignment,
765
				 unsigned long color,
766 767
				 u64 start,
				 u64 end)
768
{
769
	mm->scan_color = color;
770 771 772 773
	mm->scan_alignment = alignment;
	mm->scan_size = size;
	mm->scanned_blocks = 0;
	mm->scan_hit_start = 0;
774
	mm->scan_hit_end = 0;
775 776 777
	mm->scan_start = start;
	mm->scan_end = end;
	mm->scan_check_range = 1;
778
	mm->prev_scanned_node = NULL;
779 780 781
}
EXPORT_SYMBOL(drm_mm_init_scan_with_range);

782
/**
783 784 785
 * drm_mm_scan_add_block - add a node to the scan list
 * @node: drm_mm_node to add
 *
786 787 788
 * Add a node to the scan list that might be freed to make space for the desired
 * hole.
 *
789 790
 * Returns:
 * True if a hole has been found, false otherwise.
791
 */
792
bool drm_mm_scan_add_block(struct drm_mm_node *node)
793 794
{
	struct drm_mm *mm = node->mm;
795
	struct drm_mm_node *prev_node;
796 797
	u64 hole_start, hole_end;
	u64 adj_start, adj_end;
798 799 800

	mm->scanned_blocks++;

801
	BUG_ON(node->scanned_block);
802 803
	node->scanned_block = 1;

804 805
	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
			       node_list);
806

807 808 809 810
	node->scanned_preceeds_hole = prev_node->hole_follows;
	prev_node->hole_follows = 1;
	list_del(&node->node_list);
	node->node_list.prev = &prev_node->node_list;
811 812
	node->node_list.next = &mm->prev_scanned_node->node_list;
	mm->prev_scanned_node = node;
813

814 815
	adj_start = hole_start = drm_mm_hole_node_start(prev_node);
	adj_end = hole_end = drm_mm_hole_node_end(prev_node);
816

817
	if (mm->scan_check_range) {
818 819 820 821
		if (adj_start < mm->scan_start)
			adj_start = mm->scan_start;
		if (adj_end > mm->scan_end)
			adj_end = mm->scan_end;
822 823
	}

824 825 826 827
	if (mm->color_adjust)
		mm->color_adjust(prev_node, mm->scan_color,
				 &adj_start, &adj_end);

828
	if (check_free_hole(adj_start, adj_end,
829
			    mm->scan_size, mm->scan_alignment)) {
830
		mm->scan_hit_start = hole_start;
831
		mm->scan_hit_end = hole_end;
832
		return true;
833 834
	}

835
	return false;
836 837 838 839
}
EXPORT_SYMBOL(drm_mm_scan_add_block);

/**
840 841
 * drm_mm_scan_remove_block - remove a node from the scan list
 * @node: drm_mm_node to remove
842 843 844 845 846 847
 *
 * Nodes _must_ be removed in the exact same order from the scan list as they
 * have been added, otherwise the internal state of the memory manager will be
 * corrupted.
 *
 * When the scan list is empty, the selected memory nodes can be freed. An
848 849
 * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then
 * return the just freed block (because its at the top of the free_stack list).
850
 *
851 852 853
 * Returns:
 * True if this block should be evicted, false otherwise. Will always
 * return false when no hole has been found.
854
 */
855
bool drm_mm_scan_remove_block(struct drm_mm_node *node)
856 857
{
	struct drm_mm *mm = node->mm;
858
	struct drm_mm_node *prev_node;
859 860 861 862 863 864

	mm->scanned_blocks--;

	BUG_ON(!node->scanned_block);
	node->scanned_block = 0;

865 866
	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
			       node_list);
867

868 869
	prev_node->hole_follows = node->scanned_preceeds_hole;
	list_add(&node->node_list, &prev_node->node_list);
870

871 872
	 return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
		 node->start < mm->scan_hit_end);
873 874 875
}
EXPORT_SYMBOL(drm_mm_scan_remove_block);

876 877 878 879 880 881 882 883
/**
 * drm_mm_clean - checks whether an allocator is clean
 * @mm: drm_mm allocator to check
 *
 * Returns:
 * True if the allocator is completely free, false if there's still a node
 * allocated in it.
 */
C
Chris Wilson 已提交
884
bool drm_mm_clean(const struct drm_mm *mm)
885
{
C
Chris Wilson 已提交
886
	const struct list_head *head = __drm_mm_nodes(mm);
887

888 889
	return (head->next->next == head);
}
890
EXPORT_SYMBOL(drm_mm_clean);
891

892 893 894 895 896 897 898 899
/**
 * drm_mm_init - initialize a drm-mm allocator
 * @mm: the drm_mm structure to initialize
 * @start: start of the range managed by @mm
 * @size: end of the range managed by @mm
 *
 * Note that @mm must be cleared to 0 before calling this function.
 */
C
Chris Wilson 已提交
900
void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
901
{
902
	INIT_LIST_HEAD(&mm->hole_stack);
903
	mm->scanned_blocks = 0;
904

905 906
	/* Clever trick to avoid a special case in the free hole tracking. */
	INIT_LIST_HEAD(&mm->head_node.node_list);
907
	mm->head_node.allocated = 0;
908 909 910 911 912 913 914 915 916
	mm->head_node.hole_follows = 1;
	mm->head_node.scanned_block = 0;
	mm->head_node.scanned_prev_free = 0;
	mm->head_node.scanned_next_free = 0;
	mm->head_node.mm = mm;
	mm->head_node.start = start + size;
	mm->head_node.size = start - mm->head_node.start;
	list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);

917 918
	mm->interval_tree = RB_ROOT;

919
	mm->color_adjust = NULL;
920
}
921
EXPORT_SYMBOL(drm_mm_init);
922

923 924 925 926 927 928 929
/**
 * drm_mm_takedown - clean up a drm_mm allocator
 * @mm: drm_mm allocator to clean up
 *
 * Note that it is a bug to call this function on an allocator which is not
 * clean.
 */
930
void drm_mm_takedown(struct drm_mm *mm)
931
{
932
	if (WARN(!list_empty(__drm_mm_nodes(mm)),
933 934 935
		 "Memory manager not clean during takedown.\n"))
		show_leaks(mm);

936
}
D
Dave Airlie 已提交
937
EXPORT_SYMBOL(drm_mm_takedown);
938

C
Chris Wilson 已提交
939 940
static u64 drm_mm_debug_hole(const struct drm_mm_node *entry,
			     const char *prefix)
941
{
942
	u64 hole_start, hole_end, hole_size;
943

D
Daniel Vetter 已提交
944 945 946 947
	if (entry->hole_follows) {
		hole_start = drm_mm_hole_node_start(entry);
		hole_end = drm_mm_hole_node_end(entry);
		hole_size = hole_end - hole_start;
948 949
		pr_debug("%s %#llx-%#llx: %llu: free\n", prefix, hole_start,
			 hole_end, hole_size);
D
Daniel Vetter 已提交
950 951 952 953 954 955
		return hole_size;
	}

	return 0;
}

956 957 958 959 960
/**
 * drm_mm_debug_table - dump allocator state to dmesg
 * @mm: drm_mm allocator to dump
 * @prefix: prefix to use for dumping to dmesg
 */
C
Chris Wilson 已提交
961
void drm_mm_debug_table(const struct drm_mm *mm, const char *prefix)
D
Daniel Vetter 已提交
962
{
C
Chris Wilson 已提交
963
	const struct drm_mm_node *entry;
964
	u64 total_used = 0, total_free = 0, total = 0;
D
Daniel Vetter 已提交
965 966

	total_free += drm_mm_debug_hole(&mm->head_node, prefix);
967 968

	drm_mm_for_each_node(entry, mm) {
969 970
		pr_debug("%s %#llx-%#llx: %llu: used\n", prefix, entry->start,
			 entry->start + entry->size, entry->size);
971
		total_used += entry->size;
D
Daniel Vetter 已提交
972
		total_free += drm_mm_debug_hole(entry, prefix);
973
	}
974 975
	total = total_free + total_used;

976 977
	pr_debug("%s total: %llu, used %llu free %llu\n", prefix, total,
		 total_used, total_free);
978 979 980
}
EXPORT_SYMBOL(drm_mm_debug_table);

981
#if defined(CONFIG_DEBUG_FS)
C
Chris Wilson 已提交
982
static u64 drm_mm_dump_hole(struct seq_file *m, const struct drm_mm_node *entry)
983
{
984
	u64 hole_start, hole_end, hole_size;
985

D
Daniel Vetter 已提交
986 987 988 989
	if (entry->hole_follows) {
		hole_start = drm_mm_hole_node_start(entry);
		hole_end = drm_mm_hole_node_end(entry);
		hole_size = hole_end - hole_start;
990
		seq_printf(m, "%#018llx-%#018llx: %llu: free\n", hole_start,
991
			   hole_end, hole_size);
D
Daniel Vetter 已提交
992 993 994 995 996 997
		return hole_size;
	}

	return 0;
}

998 999 1000 1001 1002
/**
 * drm_mm_dump_table - dump allocator state to a seq_file
 * @m: seq_file to dump to
 * @mm: drm_mm allocator to dump
 */
C
Chris Wilson 已提交
1003
int drm_mm_dump_table(struct seq_file *m, const struct drm_mm *mm)
D
Daniel Vetter 已提交
1004
{
C
Chris Wilson 已提交
1005
	const struct drm_mm_node *entry;
1006
	u64 total_used = 0, total_free = 0, total = 0;
D
Daniel Vetter 已提交
1007 1008

	total_free += drm_mm_dump_hole(m, &mm->head_node);
1009 1010

	drm_mm_for_each_node(entry, mm) {
1011
		seq_printf(m, "%#018llx-%#018llx: %llu: used\n", entry->start,
1012
			   entry->start + entry->size, entry->size);
1013
		total_used += entry->size;
D
Daniel Vetter 已提交
1014
		total_free += drm_mm_dump_hole(m, entry);
1015
	}
1016 1017
	total = total_free + total_used;

1018 1019
	seq_printf(m, "total: %llu, used %llu free %llu\n", total,
		   total_used, total_free);
1020 1021 1022 1023
	return 0;
}
EXPORT_SYMBOL(drm_mm_dump_table);
#endif