drm_mm.c 28.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40
/**************************************************************************
 *
 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 *
 **************************************************************************/

/*
 * Generic simple memory manager implementation. Intended to be used as a base
 * class implementation for more advanced memory managers.
 *
 * Note that the algorithm used is quite simple and there might be substantial
 * performance gains if a smarter free list is implemented. Currently it is just an
 * unordered stack of free regions. This could easily be improved if an RB-tree
 * is used instead. At least if we expect heavy fragmentation.
 *
 * Aligned allocations can also see improvement.
 *
 * Authors:
41
 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
42 43
 */

44 45
#include <drm/drmP.h>
#include <drm/drm_mm.h>
46
#include <linux/slab.h>
47
#include <linux/seq_file.h>
48
#include <linux/export.h>
49
#include <linux/interval_tree_generic.h>
50

51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
/**
 * DOC: Overview
 *
 * drm_mm provides a simple range allocator. The drivers are free to use the
 * resource allocator from the linux core if it suits them, the upside of drm_mm
 * is that it's in the DRM core. Which means that it's easier to extend for
 * some of the crazier special purpose needs of gpus.
 *
 * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node.
 * Drivers are free to embed either of them into their own suitable
 * datastructures. drm_mm itself will not do any allocations of its own, so if
 * drivers choose not to embed nodes they need to still allocate them
 * themselves.
 *
 * The range allocator also supports reservation of preallocated blocks. This is
 * useful for taking over initial mode setting configurations from the firmware,
 * where an object needs to be created which exactly matches the firmware's
 * scanout target. As long as the range is still free it can be inserted anytime
 * after the allocator is initialized, which helps with avoiding looped
 * depencies in the driver load sequence.
 *
 * drm_mm maintains a stack of most recently freed holes, which of all
 * simplistic datastructures seems to be a fairly decent approach to clustering
 * allocations and avoiding too much fragmentation. This means free space
 * searches are O(num_holes). Given that all the fancy features drm_mm supports
 * something better would be fairly complex and since gfx thrashing is a fairly
 * steep cliff not a real concern. Removing a node again is O(1).
 *
 * drm_mm supports a few features: Alignment and range restrictions can be
 * supplied. Further more every &drm_mm_node has a color value (which is just an
 * opaqua unsigned long) which in conjunction with a driver callback can be used
 * to implement sophisticated placement restrictions. The i915 DRM driver uses
 * this to implement guard pages between incompatible caching domains in the
 * graphics TT.
 *
86 87 88 89
 * Two behaviors are supported for searching and allocating: bottom-up and top-down.
 * The default is bottom-up. Top-down allocation can be used if the memory area
 * has different restrictions, or just to reduce fragmentation.
 *
90 91 92 93
 * Finally iteration helpers to walk all nodes and all holes are provided as are
 * some basic allocator dumpers for debugging.
 */

D
David Herrmann 已提交
94
static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
95
						u64 size,
D
David Herrmann 已提交
96 97 98 99
						unsigned alignment,
						unsigned long color,
						enum drm_mm_search_flags flags);
static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
100
						u64 size,
D
David Herrmann 已提交
101 102
						unsigned alignment,
						unsigned long color,
103 104
						u64 start,
						u64 end,
D
David Herrmann 已提交
105
						enum drm_mm_search_flags flags);
106

107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166
#ifdef CONFIG_DRM_DEBUG_MM
#define STACKDEPTH 32
#define BUFSZ 4096

static noinline void save_stack(struct drm_mm_node *node)
{
	unsigned long entries[STACKDEPTH];
	struct stack_trace trace = {
		.entries = entries,
		.max_entries = STACKDEPTH,
		.skip = 1
	};

	save_stack_trace(&trace);
	if (trace.nr_entries != 0 &&
	    trace.entries[trace.nr_entries-1] == ULONG_MAX)
		trace.nr_entries--;

	/* May be called under spinlock, so avoid sleeping */
	node->stack = depot_save_stack(&trace, GFP_NOWAIT);
}

static void show_leaks(struct drm_mm *mm)
{
	struct drm_mm_node *node;
	unsigned long entries[STACKDEPTH];
	char *buf;

	buf = kmalloc(BUFSZ, GFP_KERNEL);
	if (!buf)
		return;

	list_for_each_entry(node, &mm->head_node.node_list, node_list) {
		struct stack_trace trace = {
			.entries = entries,
			.max_entries = STACKDEPTH
		};

		if (!node->stack) {
			DRM_ERROR("node [%08llx + %08llx]: unknown owner\n",
				  node->start, node->size);
			continue;
		}

		depot_fetch_stack(node->stack, &trace);
		snprint_stack_trace(buf, BUFSZ, &trace, 0);
		DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s",
			  node->start, node->size, buf);
	}

	kfree(buf);
}

#undef STACKDEPTH
#undef BUFSZ
#else
static void save_stack(struct drm_mm_node *node) { }
static void show_leaks(struct drm_mm *mm) { }
#endif

167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232
#define START(node) ((node)->start)
#define LAST(node)  ((node)->start + (node)->size - 1)

INTERVAL_TREE_DEFINE(struct drm_mm_node, rb,
		     u64, __subtree_last,
		     START, LAST, static inline, drm_mm_interval_tree)

struct drm_mm_node *
drm_mm_interval_first(struct drm_mm *mm, u64 start, u64 last)
{
	return drm_mm_interval_tree_iter_first(&mm->interval_tree,
					       start, last);
}
EXPORT_SYMBOL(drm_mm_interval_first);

struct drm_mm_node *
drm_mm_interval_next(struct drm_mm_node *node, u64 start, u64 last)
{
	return drm_mm_interval_tree_iter_next(node, start, last);
}
EXPORT_SYMBOL(drm_mm_interval_next);

static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
					  struct drm_mm_node *node)
{
	struct drm_mm *mm = hole_node->mm;
	struct rb_node **link, *rb;
	struct drm_mm_node *parent;

	node->__subtree_last = LAST(node);

	if (hole_node->allocated) {
		rb = &hole_node->rb;
		while (rb) {
			parent = rb_entry(rb, struct drm_mm_node, rb);
			if (parent->__subtree_last >= node->__subtree_last)
				break;

			parent->__subtree_last = node->__subtree_last;
			rb = rb_parent(rb);
		}

		rb = &hole_node->rb;
		link = &hole_node->rb.rb_right;
	} else {
		rb = NULL;
		link = &mm->interval_tree.rb_node;
	}

	while (*link) {
		rb = *link;
		parent = rb_entry(rb, struct drm_mm_node, rb);
		if (parent->__subtree_last < node->__subtree_last)
			parent->__subtree_last = node->__subtree_last;
		if (node->start < parent->start)
			link = &parent->rb.rb_left;
		else
			link = &parent->rb.rb_right;
	}

	rb_link_node(&node->rb, rb, link);
	rb_insert_augmented(&node->rb,
			    &mm->interval_tree,
			    &drm_mm_interval_tree_augment);
}

233 234
static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
				 struct drm_mm_node *node,
235
				 u64 size, unsigned alignment,
236 237
				 unsigned long color,
				 enum drm_mm_allocator_flags flags)
238
{
239
	struct drm_mm *mm = hole_node->mm;
240 241 242 243
	u64 hole_start = drm_mm_hole_node_start(hole_node);
	u64 hole_end = drm_mm_hole_node_end(hole_node);
	u64 adj_start = hole_start;
	u64 adj_end = hole_end;
244

245
	BUG_ON(node->allocated);
246

247 248
	if (mm->color_adjust)
		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
249

250 251 252
	if (flags & DRM_MM_CREATE_TOP)
		adj_start = adj_end - size;

253
	if (alignment) {
254 255 256 257 258
		u64 tmp = adj_start;
		unsigned rem;

		rem = do_div(tmp, alignment);
		if (rem) {
259
			if (flags & DRM_MM_CREATE_TOP)
260
				adj_start -= rem;
261
			else
262
				adj_start += alignment - rem;
263
		}
264 265
	}

266 267 268
	BUG_ON(adj_start < hole_start);
	BUG_ON(adj_end > hole_end);

269
	if (adj_start == hole_start) {
270
		hole_node->hole_follows = 0;
271 272
		list_del(&hole_node->hole_stack);
	}
273

274
	node->start = adj_start;
275 276
	node->size = size;
	node->mm = mm;
277
	node->color = color;
278
	node->allocated = 1;
279

280 281
	list_add(&node->node_list, &hole_node->node_list);

282 283
	drm_mm_interval_tree_add_node(hole_node, node);

284
	BUG_ON(node->start + node->size > adj_end);
285

286
	node->hole_follows = 0;
287
	if (__drm_mm_hole_node_start(node) < hole_end) {
288 289
		list_add(&node->hole_stack, &mm->hole_stack);
		node->hole_follows = 1;
290
	}
291 292

	save_stack(node);
293 294
}

295 296 297 298 299 300 301 302 303 304 305 306 307 308
/**
 * drm_mm_reserve_node - insert an pre-initialized node
 * @mm: drm_mm allocator to insert @node into
 * @node: drm_mm_node to insert
 *
 * This functions inserts an already set-up drm_mm_node into the allocator,
 * meaning that start, size and color must be set by the caller. This is useful
 * to initialize the allocator with preallocated objects which must be set-up
 * before the range allocator can be set-up, e.g. when taking over a firmware
 * framebuffer.
 *
 * Returns:
 * 0 on success, -ENOSPC if there's no hole where @node is.
 */
309
int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
310
{
311
	u64 end = node->start + node->size;
312
	struct drm_mm_node *hole;
313
	u64 hole_start, hole_end;
314

315 316 317
	if (WARN_ON(node->size == 0))
		return -EINVAL;

318 319
	end = node->start + node->size;

320
	/* Find the relevant hole to add our node to */
321 322 323 324 325 326 327 328 329
	hole = drm_mm_interval_tree_iter_first(&mm->interval_tree,
					       node->start, ~(u64)0);
	if (hole) {
		if (hole->start < end)
			return -ENOSPC;
	} else {
		hole = list_entry(&mm->head_node.node_list,
				  typeof(*hole), node_list);
	}
330

331 332 333
	hole = list_last_entry(&hole->node_list, typeof(*hole), node_list);
	if (!hole->hole_follows)
		return -ENOSPC;
334

335 336 337 338
	hole_start = __drm_mm_hole_node_start(hole);
	hole_end = __drm_mm_hole_node_end(hole);
	if (hole_start > node->start || hole_end < end)
		return -ENOSPC;
339

340 341
	node->mm = mm;
	node->allocated = 1;
342

343
	list_add(&node->node_list, &hole->node_list);
344

345 346 347 348
	drm_mm_interval_tree_add_node(hole, node);

	if (node->start == hole_start) {
		hole->hole_follows = 0;
349
		list_del(&hole->hole_stack);
350 351 352 353 354 355
	}

	node->hole_follows = 0;
	if (end != hole_end) {
		list_add(&node->hole_stack, &mm->hole_stack);
		node->hole_follows = 1;
356 357
	}

358 359
	save_stack(node);

360
	return 0;
361
}
362
EXPORT_SYMBOL(drm_mm_reserve_node);
363

364
/**
365 366 367 368 369 370
 * drm_mm_insert_node_generic - search for space and insert @node
 * @mm: drm_mm to allocate from
 * @node: preallocate node to insert
 * @size: size of the allocation
 * @alignment: alignment of the allocation
 * @color: opaque tag value to use for this node
371 372
 * @sflags: flags to fine-tune the allocation search
 * @aflags: flags to fine-tune the allocation behavior
373 374 375 376 377
 *
 * The preallocated node must be cleared to 0.
 *
 * Returns:
 * 0 on success, -ENOSPC if there's no suitable hole.
378
 */
379
int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
380
			       u64 size, unsigned alignment,
381
			       unsigned long color,
382 383
			       enum drm_mm_search_flags sflags,
			       enum drm_mm_allocator_flags aflags)
384 385 386
{
	struct drm_mm_node *hole_node;

387 388 389
	if (WARN_ON(size == 0))
		return -EINVAL;

390
	hole_node = drm_mm_search_free_generic(mm, size, alignment,
391
					       color, sflags);
392 393 394
	if (!hole_node)
		return -ENOSPC;

395
	drm_mm_insert_helper(hole_node, node, size, alignment, color, aflags);
396 397
	return 0;
}
398 399
EXPORT_SYMBOL(drm_mm_insert_node_generic);

400 401
static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
				       struct drm_mm_node *node,
402
				       u64 size, unsigned alignment,
403
				       unsigned long color,
404
				       u64 start, u64 end,
405
				       enum drm_mm_allocator_flags flags)
406
{
407
	struct drm_mm *mm = hole_node->mm;
408 409 410 411
	u64 hole_start = drm_mm_hole_node_start(hole_node);
	u64 hole_end = drm_mm_hole_node_end(hole_node);
	u64 adj_start = hole_start;
	u64 adj_end = hole_end;
412

413 414
	BUG_ON(!hole_node->hole_follows || node->allocated);

415 416
	if (adj_start < start)
		adj_start = start;
417 418 419 420 421
	if (adj_end > end)
		adj_end = end;

	if (mm->color_adjust)
		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
422

423 424 425
	if (flags & DRM_MM_CREATE_TOP)
		adj_start = adj_end - size;

426
	if (alignment) {
427 428 429 430 431
		u64 tmp = adj_start;
		unsigned rem;

		rem = do_div(tmp, alignment);
		if (rem) {
432
			if (flags & DRM_MM_CREATE_TOP)
433
				adj_start -= rem;
434
			else
435
				adj_start += alignment - rem;
436
		}
437
	}
438

439
	if (adj_start == hole_start) {
440
		hole_node->hole_follows = 0;
441
		list_del(&hole_node->hole_stack);
442 443
	}

444
	node->start = adj_start;
445 446
	node->size = size;
	node->mm = mm;
447
	node->color = color;
448
	node->allocated = 1;
449 450 451

	list_add(&node->node_list, &hole_node->node_list);

452 453
	drm_mm_interval_tree_add_node(hole_node, node);

454 455
	BUG_ON(node->start < start);
	BUG_ON(node->start < adj_start);
456
	BUG_ON(node->start + node->size > adj_end);
457 458
	BUG_ON(node->start + node->size > end);

459
	node->hole_follows = 0;
460
	if (__drm_mm_hole_node_start(node) < hole_end) {
461 462
		list_add(&node->hole_stack, &mm->hole_stack);
		node->hole_follows = 1;
463
	}
464 465

	save_stack(node);
466 467
}

468
/**
469 470 471 472 473 474 475 476
 * drm_mm_insert_node_in_range_generic - ranged search for space and insert @node
 * @mm: drm_mm to allocate from
 * @node: preallocate node to insert
 * @size: size of the allocation
 * @alignment: alignment of the allocation
 * @color: opaque tag value to use for this node
 * @start: start of the allowed range for this node
 * @end: end of the allowed range for this node
477 478
 * @sflags: flags to fine-tune the allocation search
 * @aflags: flags to fine-tune the allocation behavior
479 480 481 482 483
 *
 * The preallocated node must be cleared to 0.
 *
 * Returns:
 * 0 on success, -ENOSPC if there's no suitable hole.
484
 */
485
int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
486
					u64 size, unsigned alignment,
487
					unsigned long color,
488
					u64 start, u64 end,
489 490
					enum drm_mm_search_flags sflags,
					enum drm_mm_allocator_flags aflags)
491
{
492 493
	struct drm_mm_node *hole_node;

494 495 496
	if (WARN_ON(size == 0))
		return -EINVAL;

497 498
	hole_node = drm_mm_search_free_in_range_generic(mm,
							size, alignment, color,
499
							start, end, sflags);
500 501 502
	if (!hole_node)
		return -ENOSPC;

503 504
	drm_mm_insert_helper_range(hole_node, node,
				   size, alignment, color,
505
				   start, end, aflags);
506 507
	return 0;
}
508 509
EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);

510
/**
511 512 513 514 515 516
 * drm_mm_remove_node - Remove a memory node from the allocator.
 * @node: drm_mm_node to remove
 *
 * This just removes a node from its drm_mm allocator. The node does not need to
 * be cleared again before it can be re-inserted into this or any other drm_mm
 * allocator. It is a bug to call this function on a un-allocated node.
517 518 519
 */
void drm_mm_remove_node(struct drm_mm_node *node)
{
520 521
	struct drm_mm *mm = node->mm;
	struct drm_mm_node *prev_node;
522

523 524 525
	if (WARN_ON(!node->allocated))
		return;

526 527
	BUG_ON(node->scanned_block || node->scanned_prev_free
				   || node->scanned_next_free);
528

529 530
	prev_node =
	    list_entry(node->node_list.prev, struct drm_mm_node, node_list);
531

532
	if (node->hole_follows) {
533 534
		BUG_ON(__drm_mm_hole_node_start(node) ==
		       __drm_mm_hole_node_end(node));
535 536
		list_del(&node->hole_stack);
	} else
537 538 539
		BUG_ON(__drm_mm_hole_node_start(node) !=
		       __drm_mm_hole_node_end(node));

540

541 542 543 544 545 546
	if (!prev_node->hole_follows) {
		prev_node->hole_follows = 1;
		list_add(&prev_node->hole_stack, &mm->hole_stack);
	} else
		list_move(&prev_node->hole_stack, &mm->hole_stack);

547
	drm_mm_interval_tree_remove(node, &mm->interval_tree);
548
	list_del(&node->node_list);
549 550 551 552
	node->allocated = 0;
}
EXPORT_SYMBOL(drm_mm_remove_node);

553
static int check_free_hole(u64 start, u64 end, u64 size, unsigned alignment)
554
{
555
	if (end - start < size)
556 557 558
		return 0;

	if (alignment) {
559 560 561 562
		u64 tmp = start;
		unsigned rem;

		rem = do_div(tmp, alignment);
563
		if (rem)
564
			start += alignment - rem;
565 566
	}

567
	return end >= start + size;
568 569
}

D
David Herrmann 已提交
570
static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
571
						      u64 size,
D
David Herrmann 已提交
572 573 574
						      unsigned alignment,
						      unsigned long color,
						      enum drm_mm_search_flags flags)
575
{
D
Dave Airlie 已提交
576 577
	struct drm_mm_node *entry;
	struct drm_mm_node *best;
578 579 580
	u64 adj_start;
	u64 adj_end;
	u64 best_size;
581

582 583
	BUG_ON(mm->scanned_blocks);

584 585 586
	best = NULL;
	best_size = ~0UL;

587 588
	__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
			       flags & DRM_MM_SEARCH_BELOW) {
589
		u64 hole_size = adj_end - adj_start;
590

591 592 593 594 595 596 597
		if (mm->color_adjust) {
			mm->color_adjust(entry, color, &adj_start, &adj_end);
			if (adj_end <= adj_start)
				continue;
		}

		if (!check_free_hole(adj_start, adj_end, size, alignment))
598 599
			continue;

600
		if (!(flags & DRM_MM_SEARCH_BEST))
601
			return entry;
602

603
		if (hole_size < best_size) {
604
			best = entry;
605
			best_size = hole_size;
606 607 608 609 610
		}
	}

	return best;
}
611

D
David Herrmann 已提交
612
static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
613
							u64 size,
614 615
							unsigned alignment,
							unsigned long color,
616 617
							u64 start,
							u64 end,
618
							enum drm_mm_search_flags flags)
619 620 621
{
	struct drm_mm_node *entry;
	struct drm_mm_node *best;
622 623 624
	u64 adj_start;
	u64 adj_end;
	u64 best_size;
625

626 627
	BUG_ON(mm->scanned_blocks);

628 629 630
	best = NULL;
	best_size = ~0UL;

631 632
	__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
			       flags & DRM_MM_SEARCH_BELOW) {
633
		u64 hole_size = adj_end - adj_start;
634

635 636 637 638
		if (adj_start < start)
			adj_start = start;
		if (adj_end > end)
			adj_end = end;
639 640 641 642 643 644 645

		if (mm->color_adjust) {
			mm->color_adjust(entry, color, &adj_start, &adj_end);
			if (adj_end <= adj_start)
				continue;
		}

646
		if (!check_free_hole(adj_start, adj_end, size, alignment))
647 648
			continue;

649
		if (!(flags & DRM_MM_SEARCH_BEST))
650
			return entry;
651

652
		if (hole_size < best_size) {
653
			best = entry;
654
			best_size = hole_size;
655 656 657 658 659 660
		}
	}

	return best;
}

661
/**
662 663 664 665 666 667 668
 * drm_mm_replace_node - move an allocation from @old to @new
 * @old: drm_mm_node to remove from the allocator
 * @new: drm_mm_node which should inherit @old's allocation
 *
 * This is useful for when drivers embed the drm_mm_node structure and hence
 * can't move allocations by reassigning pointers. It's a combination of remove
 * and insert with the guarantee that the allocation start will match.
669 670 671 672
 */
void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
{
	list_replace(&old->node_list, &new->node_list);
D
Daniel Vetter 已提交
673
	list_replace(&old->hole_stack, &new->hole_stack);
674
	rb_replace_node(&old->rb, &new->rb, &old->mm->interval_tree);
675 676 677 678
	new->hole_follows = old->hole_follows;
	new->mm = old->mm;
	new->start = old->start;
	new->size = old->size;
679
	new->color = old->color;
680
	new->__subtree_last = old->__subtree_last;
681 682 683 684 685 686

	old->allocated = 0;
	new->allocated = 1;
}
EXPORT_SYMBOL(drm_mm_replace_node);

687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714
/**
 * DOC: lru scan roaster
 *
 * Very often GPUs need to have continuous allocations for a given object. When
 * evicting objects to make space for a new one it is therefore not most
 * efficient when we simply start to select all objects from the tail of an LRU
 * until there's a suitable hole: Especially for big objects or nodes that
 * otherwise have special allocation constraints there's a good chance we evict
 * lots of (smaller) objects unecessarily.
 *
 * The DRM range allocator supports this use-case through the scanning
 * interfaces. First a scan operation needs to be initialized with
 * drm_mm_init_scan() or drm_mm_init_scan_with_range(). The the driver adds
 * objects to the roaster (probably by walking an LRU list, but this can be
 * freely implemented) until a suitable hole is found or there's no further
 * evitable object.
 *
 * The the driver must walk through all objects again in exactly the reverse
 * order to restore the allocator state. Note that while the allocator is used
 * in the scan mode no other operation is allowed.
 *
 * Finally the driver evicts all objects selected in the scan. Adding and
 * removing an object is O(1), and since freeing a node is also O(1) the overall
 * complexity is O(scanned_objects). So like the free stack which needs to be
 * walked before a scan operation even begins this is linear in the number of
 * objects. It doesn't seem to hurt badly.
 */

715
/**
716 717 718 719 720
 * drm_mm_init_scan - initialize lru scanning
 * @mm: drm_mm to scan
 * @size: size of the allocation
 * @alignment: alignment of the allocation
 * @color: opaque tag value to use for the allocation
721 722
 *
 * This simply sets up the scanning routines with the parameters for the desired
723 724
 * hole. Note that there's no need to specify allocation flags, since they only
 * change the place a node is allocated from within a suitable hole.
725
 *
726 727
 * Warning:
 * As long as the scan list is non-empty, no other operations than
728 729
 * adding/removing nodes to/from the scan list are allowed.
 */
730
void drm_mm_init_scan(struct drm_mm *mm,
731
		      u64 size,
732 733
		      unsigned alignment,
		      unsigned long color)
734
{
735
	mm->scan_color = color;
736 737 738 739
	mm->scan_alignment = alignment;
	mm->scan_size = size;
	mm->scanned_blocks = 0;
	mm->scan_hit_start = 0;
740
	mm->scan_hit_end = 0;
741
	mm->scan_check_range = 0;
742
	mm->prev_scanned_node = NULL;
743 744 745
}
EXPORT_SYMBOL(drm_mm_init_scan);

746
/**
747 748 749 750 751 752 753
 * drm_mm_init_scan - initialize range-restricted lru scanning
 * @mm: drm_mm to scan
 * @size: size of the allocation
 * @alignment: alignment of the allocation
 * @color: opaque tag value to use for the allocation
 * @start: start of the allowed range for the allocation
 * @end: end of the allowed range for the allocation
754 755
 *
 * This simply sets up the scanning routines with the parameters for the desired
756 757
 * hole. Note that there's no need to specify allocation flags, since they only
 * change the place a node is allocated from within a suitable hole.
758
 *
759 760
 * Warning:
 * As long as the scan list is non-empty, no other operations than
761 762
 * adding/removing nodes to/from the scan list are allowed.
 */
763
void drm_mm_init_scan_with_range(struct drm_mm *mm,
764
				 u64 size,
765
				 unsigned alignment,
766
				 unsigned long color,
767 768
				 u64 start,
				 u64 end)
769
{
770
	mm->scan_color = color;
771 772 773 774
	mm->scan_alignment = alignment;
	mm->scan_size = size;
	mm->scanned_blocks = 0;
	mm->scan_hit_start = 0;
775
	mm->scan_hit_end = 0;
776 777 778
	mm->scan_start = start;
	mm->scan_end = end;
	mm->scan_check_range = 1;
779
	mm->prev_scanned_node = NULL;
780 781 782
}
EXPORT_SYMBOL(drm_mm_init_scan_with_range);

783
/**
784 785 786
 * drm_mm_scan_add_block - add a node to the scan list
 * @node: drm_mm_node to add
 *
787 788 789
 * Add a node to the scan list that might be freed to make space for the desired
 * hole.
 *
790 791
 * Returns:
 * True if a hole has been found, false otherwise.
792
 */
793
bool drm_mm_scan_add_block(struct drm_mm_node *node)
794 795
{
	struct drm_mm *mm = node->mm;
796
	struct drm_mm_node *prev_node;
797 798
	u64 hole_start, hole_end;
	u64 adj_start, adj_end;
799 800 801

	mm->scanned_blocks++;

802
	BUG_ON(node->scanned_block);
803 804
	node->scanned_block = 1;

805 806
	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
			       node_list);
807

808 809 810 811
	node->scanned_preceeds_hole = prev_node->hole_follows;
	prev_node->hole_follows = 1;
	list_del(&node->node_list);
	node->node_list.prev = &prev_node->node_list;
812 813
	node->node_list.next = &mm->prev_scanned_node->node_list;
	mm->prev_scanned_node = node;
814

815 816
	adj_start = hole_start = drm_mm_hole_node_start(prev_node);
	adj_end = hole_end = drm_mm_hole_node_end(prev_node);
817

818
	if (mm->scan_check_range) {
819 820 821 822
		if (adj_start < mm->scan_start)
			adj_start = mm->scan_start;
		if (adj_end > mm->scan_end)
			adj_end = mm->scan_end;
823 824
	}

825 826 827 828
	if (mm->color_adjust)
		mm->color_adjust(prev_node, mm->scan_color,
				 &adj_start, &adj_end);

829
	if (check_free_hole(adj_start, adj_end,
830
			    mm->scan_size, mm->scan_alignment)) {
831
		mm->scan_hit_start = hole_start;
832
		mm->scan_hit_end = hole_end;
833
		return true;
834 835
	}

836
	return false;
837 838 839 840
}
EXPORT_SYMBOL(drm_mm_scan_add_block);

/**
841 842
 * drm_mm_scan_remove_block - remove a node from the scan list
 * @node: drm_mm_node to remove
843 844 845 846 847 848
 *
 * Nodes _must_ be removed in the exact same order from the scan list as they
 * have been added, otherwise the internal state of the memory manager will be
 * corrupted.
 *
 * When the scan list is empty, the selected memory nodes can be freed. An
849 850
 * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then
 * return the just freed block (because its at the top of the free_stack list).
851
 *
852 853 854
 * Returns:
 * True if this block should be evicted, false otherwise. Will always
 * return false when no hole has been found.
855
 */
856
bool drm_mm_scan_remove_block(struct drm_mm_node *node)
857 858
{
	struct drm_mm *mm = node->mm;
859
	struct drm_mm_node *prev_node;
860 861 862 863 864 865

	mm->scanned_blocks--;

	BUG_ON(!node->scanned_block);
	node->scanned_block = 0;

866 867
	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
			       node_list);
868

869 870
	prev_node->hole_follows = node->scanned_preceeds_hole;
	list_add(&node->node_list, &prev_node->node_list);
871

872 873
	 return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
		 node->start < mm->scan_hit_end);
874 875 876
}
EXPORT_SYMBOL(drm_mm_scan_remove_block);

877 878 879 880 881 882 883 884 885
/**
 * drm_mm_clean - checks whether an allocator is clean
 * @mm: drm_mm allocator to check
 *
 * Returns:
 * True if the allocator is completely free, false if there's still a node
 * allocated in it.
 */
bool drm_mm_clean(struct drm_mm * mm)
886
{
887
	struct list_head *head = &mm->head_node.node_list;
888

889 890
	return (head->next->next == head);
}
891
EXPORT_SYMBOL(drm_mm_clean);
892

893 894 895 896 897 898 899 900
/**
 * drm_mm_init - initialize a drm-mm allocator
 * @mm: the drm_mm structure to initialize
 * @start: start of the range managed by @mm
 * @size: end of the range managed by @mm
 *
 * Note that @mm must be cleared to 0 before calling this function.
 */
901
void drm_mm_init(struct drm_mm * mm, u64 start, u64 size)
902
{
903
	INIT_LIST_HEAD(&mm->hole_stack);
904
	mm->scanned_blocks = 0;
905

906 907 908 909 910 911 912 913 914 915 916
	/* Clever trick to avoid a special case in the free hole tracking. */
	INIT_LIST_HEAD(&mm->head_node.node_list);
	mm->head_node.hole_follows = 1;
	mm->head_node.scanned_block = 0;
	mm->head_node.scanned_prev_free = 0;
	mm->head_node.scanned_next_free = 0;
	mm->head_node.mm = mm;
	mm->head_node.start = start + size;
	mm->head_node.size = start - mm->head_node.start;
	list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);

917 918
	mm->interval_tree = RB_ROOT;

919
	mm->color_adjust = NULL;
920
}
921
EXPORT_SYMBOL(drm_mm_init);
922

923 924 925 926 927 928 929
/**
 * drm_mm_takedown - clean up a drm_mm allocator
 * @mm: drm_mm allocator to clean up
 *
 * Note that it is a bug to call this function on an allocator which is not
 * clean.
 */
930
void drm_mm_takedown(struct drm_mm *mm)
931
{
932 933 934 935
	if (WARN(!list_empty(&mm->head_node.node_list),
		 "Memory manager not clean during takedown.\n"))
		show_leaks(mm);

936
}
D
Dave Airlie 已提交
937
EXPORT_SYMBOL(drm_mm_takedown);
938

939 940
static u64 drm_mm_debug_hole(struct drm_mm_node *entry,
				     const char *prefix)
941
{
942
	u64 hole_start, hole_end, hole_size;
943

D
Daniel Vetter 已提交
944 945 946 947
	if (entry->hole_follows) {
		hole_start = drm_mm_hole_node_start(entry);
		hole_end = drm_mm_hole_node_end(entry);
		hole_size = hole_end - hole_start;
948 949
		pr_debug("%s %#llx-%#llx: %llu: free\n", prefix, hole_start,
			 hole_end, hole_size);
D
Daniel Vetter 已提交
950 951 952 953 954 955
		return hole_size;
	}

	return 0;
}

956 957 958 959 960
/**
 * drm_mm_debug_table - dump allocator state to dmesg
 * @mm: drm_mm allocator to dump
 * @prefix: prefix to use for dumping to dmesg
 */
D
Daniel Vetter 已提交
961 962 963
void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
{
	struct drm_mm_node *entry;
964
	u64 total_used = 0, total_free = 0, total = 0;
D
Daniel Vetter 已提交
965 966

	total_free += drm_mm_debug_hole(&mm->head_node, prefix);
967 968

	drm_mm_for_each_node(entry, mm) {
969 970
		pr_debug("%s %#llx-%#llx: %llu: used\n", prefix, entry->start,
			 entry->start + entry->size, entry->size);
971
		total_used += entry->size;
D
Daniel Vetter 已提交
972
		total_free += drm_mm_debug_hole(entry, prefix);
973
	}
974 975
	total = total_free + total_used;

976 977
	pr_debug("%s total: %llu, used %llu free %llu\n", prefix, total,
		 total_used, total_free);
978 979 980
}
EXPORT_SYMBOL(drm_mm_debug_table);

981
#if defined(CONFIG_DEBUG_FS)
982
static u64 drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
983
{
984
	u64 hole_start, hole_end, hole_size;
985

D
Daniel Vetter 已提交
986 987 988 989
	if (entry->hole_follows) {
		hole_start = drm_mm_hole_node_start(entry);
		hole_end = drm_mm_hole_node_end(entry);
		hole_size = hole_end - hole_start;
990
		seq_printf(m, "%#018llx-%#018llx: %llu: free\n", hole_start,
991
			   hole_end, hole_size);
D
Daniel Vetter 已提交
992 993 994 995 996 997
		return hole_size;
	}

	return 0;
}

998 999 1000 1001 1002
/**
 * drm_mm_dump_table - dump allocator state to a seq_file
 * @m: seq_file to dump to
 * @mm: drm_mm allocator to dump
 */
D
Daniel Vetter 已提交
1003 1004 1005
int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
{
	struct drm_mm_node *entry;
1006
	u64 total_used = 0, total_free = 0, total = 0;
D
Daniel Vetter 已提交
1007 1008

	total_free += drm_mm_dump_hole(m, &mm->head_node);
1009 1010

	drm_mm_for_each_node(entry, mm) {
1011
		seq_printf(m, "%#018llx-%#018llx: %llu: used\n", entry->start,
1012
			   entry->start + entry->size, entry->size);
1013
		total_used += entry->size;
D
Daniel Vetter 已提交
1014
		total_free += drm_mm_dump_hole(m, entry);
1015
	}
1016 1017
	total = total_free + total_used;

1018 1019
	seq_printf(m, "total: %llu, used %llu free %llu\n", total,
		   total_used, total_free);
1020 1021 1022 1023
	return 0;
}
EXPORT_SYMBOL(drm_mm_dump_table);
#endif