i915_active.c 24.0 KB
Newer Older
1 2 3 4 5 6
/*
 * SPDX-License-Identifier: MIT
 *
 * Copyright © 2019 Intel Corporation
 */

7 8
#include <linux/debugobjects.h>

9
#include "gt/intel_context.h"
10
#include "gt/intel_engine_heartbeat.h"
11
#include "gt/intel_engine_pm.h"
12
#include "gt/intel_ring.h"
13

14 15
#include "i915_drv.h"
#include "i915_active.h"
16
#include "i915_globals.h"
17

18 19 20 21 22 23 24 25
/*
 * Active refs memory management
 *
 * To be more economical with memory, we reap all the i915_active trees as
 * they idle (when we know the active requests are inactive) and allocate the
 * nodes from a local slab cache to hopefully reduce the fragmentation.
 */
static struct i915_global_active {
26
	struct i915_global base;
27 28 29
	struct kmem_cache *slab_cache;
} global;

30
struct active_node {
31
	struct i915_active_fence base;
32 33 34 35 36
	struct i915_active *ref;
	struct rb_node node;
	u64 timeline;
};

37
static inline struct active_node *
38
node_from_active(struct i915_active_fence *active)
39 40 41 42 43 44
{
	return container_of(active, struct active_node, base);
}

#define take_preallocated_barriers(x) llist_del_all(&(x)->preallocated_barriers)

45
static inline bool is_barrier(const struct i915_active_fence *active)
46
{
47
	return IS_ERR(rcu_access_pointer(active->fence));
48 49 50 51 52
}

static inline struct llist_node *barrier_to_ll(struct active_node *node)
{
	GEM_BUG_ON(!is_barrier(&node->base));
53
	return (struct llist_node *)&node->base.cb.node;
54 55
}

56 57 58
static inline struct intel_engine_cs *
__barrier_to_engine(struct active_node *node)
{
59
	return (struct intel_engine_cs *)READ_ONCE(node->base.cb.node.prev);
60 61
}

62 63 64 65
static inline struct intel_engine_cs *
barrier_to_engine(struct active_node *node)
{
	GEM_BUG_ON(!is_barrier(&node->base));
66
	return __barrier_to_engine(node);
67 68 69 70 71
}

static inline struct active_node *barrier_from_ll(struct llist_node *x)
{
	return container_of((struct list_head *)x,
72
			    struct active_node, base.cb.node);
73 74
}

75 76 77 78 79 80
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && IS_ENABLED(CONFIG_DEBUG_OBJECTS)

static void *active_debug_hint(void *addr)
{
	struct i915_active *ref = addr;

81
	return (void *)ref->active ?: (void *)ref->retire ?: (void *)ref;
82 83 84 85 86 87 88 89 90 91 92 93 94 95
}

static struct debug_obj_descr active_debug_desc = {
	.name = "i915_active",
	.debug_hint = active_debug_hint,
};

static void debug_active_init(struct i915_active *ref)
{
	debug_object_init(ref, &active_debug_desc);
}

static void debug_active_activate(struct i915_active *ref)
{
96
	lockdep_assert_held(&ref->tree_lock);
97 98
	if (!atomic_read(&ref->count)) /* before the first inc */
		debug_object_activate(ref, &active_debug_desc);
99 100 101 102
}

static void debug_active_deactivate(struct i915_active *ref)
{
103
	lockdep_assert_held(&ref->tree_lock);
104 105
	if (!atomic_read(&ref->count)) /* after the last dec */
		debug_object_deactivate(ref, &active_debug_desc);
106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127
}

static void debug_active_fini(struct i915_active *ref)
{
	debug_object_free(ref, &active_debug_desc);
}

static void debug_active_assert(struct i915_active *ref)
{
	debug_object_assert_init(ref, &active_debug_desc);
}

#else

static inline void debug_active_init(struct i915_active *ref) { }
static inline void debug_active_activate(struct i915_active *ref) { }
static inline void debug_active_deactivate(struct i915_active *ref) { }
static inline void debug_active_fini(struct i915_active *ref) { }
static inline void debug_active_assert(struct i915_active *ref) { }

#endif

128
static void
129
__active_retire(struct i915_active *ref)
130 131
{
	struct active_node *it, *n;
132
	struct rb_root root;
133
	unsigned long flags;
134

135
	GEM_BUG_ON(i915_active_is_idle(ref));
136 137

	/* return the unused nodes to our slabcache -- flushing the allocator */
138
	if (!atomic_dec_and_lock_irqsave(&ref->count, &ref->tree_lock, flags))
139 140
		return;

141
	GEM_BUG_ON(rcu_access_pointer(ref->excl.fence));
142 143 144 145 146 147 148
	debug_active_deactivate(ref);

	root = ref->tree;
	ref->tree = RB_ROOT;
	ref->cache = NULL;

	spin_unlock_irqrestore(&ref->tree_lock, flags);
149 150 151 152

	/* After the final retire, the entire struct may be freed */
	if (ref->retire)
		ref->retire(ref);
153 154 155

	/* ... except if you wait on it, you must manage your own references! */
	wake_up_var(ref);
156 157 158 159 160

	rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
		GEM_BUG_ON(i915_active_fence_isset(&it->base));
		kmem_cache_free(global.slab_cache, it);
	}
161 162
}

163 164 165 166 167 168 169 170 171 172 173 174
static void
active_work(struct work_struct *wrk)
{
	struct i915_active *ref = container_of(wrk, typeof(*ref), work);

	GEM_BUG_ON(!atomic_read(&ref->count));
	if (atomic_add_unless(&ref->count, -1, 1))
		return;

	__active_retire(ref);
}

175
static void
176
active_retire(struct i915_active *ref)
177
{
178 179
	GEM_BUG_ON(!atomic_read(&ref->count));
	if (atomic_add_unless(&ref->count, -1, 1))
180 181
		return;

182
	if (ref->flags & I915_ACTIVE_RETIRE_SLEEPS) {
183 184 185 186
		queue_work(system_unbound_wq, &ref->work);
		return;
	}

187
	__active_retire(ref);
188 189
}

190 191 192 193 194 195 196 197 198 199 200 201 202 203 204
static inline struct dma_fence **
__active_fence_slot(struct i915_active_fence *active)
{
	return (struct dma_fence ** __force)&active->fence;
}

static inline bool
active_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
{
	struct i915_active_fence *active =
		container_of(cb, typeof(*active), cb);

	return cmpxchg(__active_fence_slot(active), fence, NULL) == fence;
}

205
static void
206
node_retire(struct dma_fence *fence, struct dma_fence_cb *cb)
207
{
208 209
	if (active_fence_cb(fence, cb))
		active_retire(container_of(cb, struct active_node, base.cb)->ref);
210 211
}

212 213 214
static void
excl_retire(struct dma_fence *fence, struct dma_fence_cb *cb)
{
215 216
	if (active_fence_cb(fence, cb))
		active_retire(container_of(cb, struct i915_active, excl.cb));
217 218 219
}

static struct i915_active_fence *
220
active_instance(struct i915_active *ref, struct intel_timeline *tl)
221
{
222
	struct active_node *node, *prealloc;
223
	struct rb_node **p, *parent;
224
	u64 idx = tl->fence_context;
225 226 227 228 229 230 231 232

	/*
	 * We track the most recently used timeline to skip a rbtree search
	 * for the common case, under typical loads we never need the rbtree
	 * at all. We can reuse the last slot if it is empty, that is
	 * after the previous activity has been retired, or if it matches the
	 * current timeline.
	 */
233 234 235 236 237 238 239 240
	node = READ_ONCE(ref->cache);
	if (node && node->timeline == idx)
		return &node->base;

	/* Preallocate a replacement, just in case */
	prealloc = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
	if (!prealloc)
		return NULL;
241

242
	spin_lock_irq(&ref->tree_lock);
243
	GEM_BUG_ON(i915_active_is_idle(ref));
244 245 246 247 248 249 250

	parent = NULL;
	p = &ref->tree.rb_node;
	while (*p) {
		parent = *p;

		node = rb_entry(parent, struct active_node, node);
251 252 253 254
		if (node->timeline == idx) {
			kmem_cache_free(global.slab_cache, prealloc);
			goto out;
		}
255 256 257 258 259 260 261

		if (node->timeline < idx)
			p = &parent->rb_right;
		else
			p = &parent->rb_left;
	}

262
	node = prealloc;
263
	__i915_active_fence_init(&node->base, NULL, node_retire);
264 265 266 267 268 269 270
	node->ref = ref;
	node->timeline = idx;

	rb_link_node(&node->node, parent, p);
	rb_insert_color(&node->node, &ref->tree);

out:
271
	ref->cache = node;
272
	spin_unlock_irq(&ref->tree_lock);
273

274
	BUILD_BUG_ON(offsetof(typeof(*node), base));
275
	return &node->base;
276 277
}

278
void __i915_active_init(struct i915_active *ref,
279 280
			int (*active)(struct i915_active *ref),
			void (*retire)(struct i915_active *ref),
281 282
			struct lock_class_key *mkey,
			struct lock_class_key *wkey)
283
{
284 285
	unsigned long bits;

286 287
	debug_active_init(ref);

288
	ref->flags = 0;
289
	ref->active = active;
290 291 292
	ref->retire = ptr_unpack_bits(retire, &bits, 2);
	if (bits & I915_ACTIVE_MAY_SLEEP)
		ref->flags |= I915_ACTIVE_RETIRE_SLEEPS;
293

294
	spin_lock_init(&ref->tree_lock);
295
	ref->tree = RB_ROOT;
296
	ref->cache = NULL;
297

298
	init_llist_head(&ref->preallocated_barriers);
299
	atomic_set(&ref->count, 0);
300
	__mutex_init(&ref->mutex, "i915_active", mkey);
301
	__i915_active_fence_init(&ref->excl, NULL, excl_retire);
302
	INIT_WORK(&ref->work, active_work);
303 304 305
#if IS_ENABLED(CONFIG_LOCKDEP)
	lockdep_init_map(&ref->work.lockdep_map, "i915_active.work", wkey, 0);
#endif
306 307
}

308 309 310 311
static bool ____active_del_barrier(struct i915_active *ref,
				   struct active_node *node,
				   struct intel_engine_cs *engine)

312 313 314 315
{
	struct llist_node *head = NULL, *tail = NULL;
	struct llist_node *pos, *next;

316
	GEM_BUG_ON(node->timeline != engine->kernel_context->timeline->fence_context);
317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349

	/*
	 * Rebuild the llist excluding our node. We may perform this
	 * outside of the kernel_context timeline mutex and so someone
	 * else may be manipulating the engine->barrier_tasks, in
	 * which case either we or they will be upset :)
	 *
	 * A second __active_del_barrier() will report failure to claim
	 * the active_node and the caller will just shrug and know not to
	 * claim ownership of its node.
	 *
	 * A concurrent i915_request_add_active_barriers() will miss adding
	 * any of the tasks, but we will try again on the next -- and since
	 * we are actively using the barrier, we know that there will be
	 * at least another opportunity when we idle.
	 */
	llist_for_each_safe(pos, next, llist_del_all(&engine->barrier_tasks)) {
		if (node == barrier_from_ll(pos)) {
			node = NULL;
			continue;
		}

		pos->next = head;
		head = pos;
		if (!tail)
			tail = pos;
	}
	if (head)
		llist_add_batch(head, tail, &engine->barrier_tasks);

	return !node;
}

350 351 352 353 354 355
static bool
__active_del_barrier(struct i915_active *ref, struct active_node *node)
{
	return ____active_del_barrier(ref, node, barrier_to_engine(node));
}

356
int i915_active_ref(struct i915_active *ref,
357
		    struct intel_timeline *tl,
358
		    struct dma_fence *fence)
359
{
360
	struct i915_active_fence *active;
361
	int err;
362

363 364
	lockdep_assert_held(&tl->mutex);

365
	/* Prevent reaping in case we malloc/wait while building the tree */
366 367 368
	err = i915_active_acquire(ref);
	if (err)
		return err;
369

370
	active = active_instance(ref, tl);
371 372
	if (!active) {
		err = -ENOMEM;
373 374
		goto out;
	}
375

376 377 378 379 380 381 382
	if (is_barrier(active)) { /* proto-node used by our idle barrier */
		/*
		 * This request is on the kernel_context timeline, and so
		 * we can use it to substitute for the pending idle-barrer
		 * request that we want to emit on the kernel_context.
		 */
		__active_del_barrier(ref, node_from_active(active));
383 384
		RCU_INIT_POINTER(active->fence, NULL);
		atomic_dec(&ref->count);
385
	}
386 387
	if (!__i915_active_fence_set(active, fence))
		atomic_inc(&ref->count);
388

389 390 391
out:
	i915_active_release(ref);
	return err;
392 393
}

394 395
struct dma_fence *
i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f)
396
{
397 398
	struct dma_fence *prev;

399 400 401
	/* We expect the caller to manage the exclusive timeline ordering */
	GEM_BUG_ON(i915_active_is_idle(ref));

402
	rcu_read_lock();
403
	prev = __i915_active_fence_set(&ref->excl, f);
404 405 406
	if (prev)
		prev = dma_fence_get_rcu(prev);
	else
407
		atomic_inc(&ref->count);
408
	rcu_read_unlock();
409 410

	return prev;
411
}
412

413 414 415 416
bool i915_active_acquire_if_busy(struct i915_active *ref)
{
	debug_active_assert(ref);
	return atomic_add_unless(&ref->count, 1, 0);
417 418
}

419
int i915_active_acquire(struct i915_active *ref)
420
{
421 422
	int err;

423
	if (i915_active_acquire_if_busy(ref))
424
		return 0;
425

426 427 428
	err = mutex_lock_interruptible(&ref->mutex);
	if (err)
		return err;
429

430 431 432 433 434 435 436 437 438
	if (likely(!i915_active_acquire_if_busy(ref))) {
		if (ref->active)
			err = ref->active(ref);
		if (!err) {
			spin_lock_irq(&ref->tree_lock); /* __active_retire() */
			debug_active_activate(ref);
			atomic_inc(&ref->count);
			spin_unlock_irq(&ref->tree_lock);
		}
439 440 441 442 443
	}

	mutex_unlock(&ref->mutex);

	return err;
444 445 446 447
}

void i915_active_release(struct i915_active *ref)
{
448
	debug_active_assert(ref);
449
	active_retire(ref);
450 451
}

452
static void enable_signaling(struct i915_active_fence *active)
453
{
454
	struct dma_fence *fence;
455

456 457 458
	if (unlikely(is_barrier(active)))
		return;

459 460 461
	fence = i915_active_fence_get(active);
	if (!fence)
		return;
462

463 464
	dma_fence_enable_sw_signaling(fence);
	dma_fence_put(fence);
465 466
}

467
static int flush_barrier(struct active_node *it)
468
{
469
	struct intel_engine_cs *engine;
470

471 472
	if (likely(!is_barrier(&it->base)))
		return 0;
473

474 475 476
	engine = __barrier_to_engine(it);
	smp_rmb(); /* serialise with add_active_barriers */
	if (!is_barrier(&it->base))
477
		return 0;
478

479 480 481 482 483 484 485 486
	return intel_engine_flush_barriers(engine);
}

static int flush_lazy_signals(struct i915_active *ref)
{
	struct active_node *it, *n;
	int err = 0;

487
	enable_signaling(&ref->excl);
488
	rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
489 490 491
		err = flush_barrier(it); /* unconnected idle barrier? */
		if (err)
			break;
492

493
		enable_signaling(&it->base);
494 495
	}

496 497 498 499 500 501 502 503 504 505 506 507 508 509
	return err;
}

int i915_active_wait(struct i915_active *ref)
{
	int err;

	might_sleep();

	if (!i915_active_acquire_if_busy(ref))
		return 0;

	/* Any fence added after the wait begins will not be auto-signaled */
	err = flush_lazy_signals(ref);
510
	i915_active_release(ref);
511 512 513
	if (err)
		return err;

514
	if (wait_var_event_interruptible(ref, i915_active_is_idle(ref)))
515 516
		return -EINTR;

517
	flush_work(&ref->work);
518
	return 0;
519 520
}

521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546
static int __await_active(struct i915_active_fence *active,
			  int (*fn)(void *arg, struct dma_fence *fence),
			  void *arg)
{
	struct dma_fence *fence;

	if (is_barrier(active)) /* XXX flush the barrier? */
		return 0;

	fence = i915_active_fence_get(active);
	if (fence) {
		int err;

		err = fn(arg, fence);
		dma_fence_put(fence);
		if (err < 0)
			return err;
	}

	return 0;
}

static int await_active(struct i915_active *ref,
			unsigned int flags,
			int (*fn)(void *arg, struct dma_fence *fence),
			void *arg)
547
{
548
	int err = 0;
549

550
	/* We must always wait for the exclusive fence! */
551
	if (rcu_access_pointer(ref->excl.fence)) {
552 553 554 555 556 557 558 559 560 561 562 563
		err = __await_active(&ref->excl, fn, arg);
		if (err)
			return err;
	}

	if (flags & I915_ACTIVE_AWAIT_ALL && i915_active_acquire_if_busy(ref)) {
		struct active_node *it, *n;

		rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
			err = __await_active(&it->base, fn, arg);
			if (err)
				break;
564
		}
565 566 567
		i915_active_release(ref);
		if (err)
			return err;
568 569
	}

570 571 572 573 574 575 576
	return 0;
}

static int rq_await_fence(void *arg, struct dma_fence *fence)
{
	return i915_request_await_dma_fence(arg, fence);
}
577

578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595
int i915_request_await_active(struct i915_request *rq,
			      struct i915_active *ref,
			      unsigned int flags)
{
	return await_active(ref, flags, rq_await_fence, rq);
}

static int sw_await_fence(void *arg, struct dma_fence *fence)
{
	return i915_sw_fence_await_dma_fence(arg, fence, 0,
					     GFP_NOWAIT | __GFP_NOWARN);
}

int i915_sw_fence_await_active(struct i915_sw_fence *fence,
			       struct i915_active *ref,
			       unsigned int flags)
{
	return await_active(ref, flags, sw_await_fence, fence);
596 597
}

598
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
599 600
void i915_active_fini(struct i915_active *ref)
{
601
	debug_active_fini(ref);
602
	GEM_BUG_ON(atomic_read(&ref->count));
603 604
	GEM_BUG_ON(work_pending(&ref->work));
	GEM_BUG_ON(!RB_EMPTY_ROOT(&ref->tree));
605
	mutex_destroy(&ref->mutex);
606
}
607
#endif
608

609 610
static inline bool is_idle_barrier(struct active_node *node, u64 idx)
{
611
	return node->timeline == idx && !i915_active_fence_isset(&node->base);
612 613 614 615 616 617 618 619 620
}

static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
{
	struct rb_node *prev, *p;

	if (RB_EMPTY_ROOT(&ref->tree))
		return NULL;

621
	spin_lock_irq(&ref->tree_lock);
622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660
	GEM_BUG_ON(i915_active_is_idle(ref));

	/*
	 * Try to reuse any existing barrier nodes already allocated for this
	 * i915_active, due to overlapping active phases there is likely a
	 * node kept alive (as we reuse before parking). We prefer to reuse
	 * completely idle barriers (less hassle in manipulating the llists),
	 * but otherwise any will do.
	 */
	if (ref->cache && is_idle_barrier(ref->cache, idx)) {
		p = &ref->cache->node;
		goto match;
	}

	prev = NULL;
	p = ref->tree.rb_node;
	while (p) {
		struct active_node *node =
			rb_entry(p, struct active_node, node);

		if (is_idle_barrier(node, idx))
			goto match;

		prev = p;
		if (node->timeline < idx)
			p = p->rb_right;
		else
			p = p->rb_left;
	}

	/*
	 * No quick match, but we did find the leftmost rb_node for the
	 * kernel_context. Walk the rb_tree in-order to see if there were
	 * any idle-barriers on this timeline that we missed, or just use
	 * the first pending barrier.
	 */
	for (p = prev; p; p = rb_next(p)) {
		struct active_node *node =
			rb_entry(p, struct active_node, node);
661
		struct intel_engine_cs *engine;
662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678

		if (node->timeline > idx)
			break;

		if (node->timeline < idx)
			continue;

		if (is_idle_barrier(node, idx))
			goto match;

		/*
		 * The list of pending barriers is protected by the
		 * kernel_context timeline, which notably we do not hold
		 * here. i915_request_add_active_barriers() may consume
		 * the barrier before we claim it, so we have to check
		 * for success.
		 */
679 680 681 682
		engine = __barrier_to_engine(node);
		smp_rmb(); /* serialise with add_active_barriers */
		if (is_barrier(&node->base) &&
		    ____active_del_barrier(ref, node, engine))
683 684 685
			goto match;
	}

686
	spin_unlock_irq(&ref->tree_lock);
687 688 689 690 691 692 693

	return NULL;

match:
	rb_erase(p, &ref->tree); /* Hide from waits and sibling allocations */
	if (p == &ref->cache->node)
		ref->cache = NULL;
694
	spin_unlock_irq(&ref->tree_lock);
695 696 697 698

	return rb_entry(p, struct active_node, node);
}

699 700 701
int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
					    struct intel_engine_cs *engine)
{
702
	intel_engine_mask_t tmp, mask = engine->mask;
703
	struct llist_node *first = NULL, *last = NULL;
704
	struct intel_gt *gt = engine->gt;
705
	int err;
706

707
	GEM_BUG_ON(i915_active_is_idle(ref));
708 709 710 711

	/* Wait until the previous preallocation is completed */
	while (!llist_empty(&ref->preallocated_barriers))
		cond_resched();
712 713 714 715 716 717 718

	/*
	 * Preallocate a node for each physical engine supporting the target
	 * engine (remember virtual engines have more than one sibling).
	 * We can then use the preallocated nodes in
	 * i915_active_acquire_barrier()
	 */
719
	GEM_BUG_ON(!mask);
720
	for_each_engine_masked(engine, gt, mask, tmp) {
721
		u64 idx = engine->kernel_context->timeline->fence_context;
722
		struct llist_node *prev = first;
723 724
		struct active_node *node;

725 726 727 728 729 730 731 732
		node = reuse_idle_barrier(ref, idx);
		if (!node) {
			node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
			if (!node) {
				err = ENOMEM;
				goto unwind;
			}

733 734
			RCU_INIT_POINTER(node->base.fence, NULL);
			node->base.cb.func = node_retire;
735 736
			node->timeline = idx;
			node->ref = ref;
737 738
		}

739
		if (!i915_active_fence_isset(&node->base)) {
740 741 742 743 744 745 746 747 748
			/*
			 * Mark this as being *our* unconnected proto-node.
			 *
			 * Since this node is not in any list, and we have
			 * decoupled it from the rbtree, we can reuse the
			 * request to indicate this is an idle-barrier node
			 * and then we can use the rb_node and list pointers
			 * for our tracking of the pending barrier.
			 */
749 750
			RCU_INIT_POINTER(node->base.fence, ERR_PTR(-EAGAIN));
			node->base.cb.node.prev = (void *)engine;
751 752
			atomic_inc(&ref->count);
		}
753
		GEM_BUG_ON(rcu_access_pointer(node->base.fence) != ERR_PTR(-EAGAIN));
754

755
		GEM_BUG_ON(barrier_to_engine(node) != engine);
756 757 758 759
		first = barrier_to_ll(node);
		first->next = prev;
		if (!last)
			last = first;
760
		intel_engine_pm_get(engine);
761 762
	}

763
	GEM_BUG_ON(!llist_empty(&ref->preallocated_barriers));
764
	llist_add_batch(first, last, &ref->preallocated_barriers);
765

766 767 768
	return 0;

unwind:
769 770
	while (first) {
		struct active_node *node = barrier_from_ll(first);
771

772
		first = first->next;
773

774 775
		atomic_dec(&ref->count);
		intel_engine_pm_put(barrier_to_engine(node));
776 777 778

		kmem_cache_free(global.slab_cache, node);
	}
779 780 781 782 783 784
	return err;
}

void i915_active_acquire_barrier(struct i915_active *ref)
{
	struct llist_node *pos, *next;
785
	unsigned long flags;
786

787
	GEM_BUG_ON(i915_active_is_idle(ref));
788

789 790 791 792 793 794 795 796 797
	/*
	 * Transfer the list of preallocated barriers into the
	 * i915_active rbtree, but only as proto-nodes. They will be
	 * populated by i915_request_add_active_barriers() to point to the
	 * request that will eventually release them.
	 */
	llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) {
		struct active_node *node = barrier_from_ll(pos);
		struct intel_engine_cs *engine = barrier_to_engine(node);
798 799
		struct rb_node **p, *parent;

800 801
		spin_lock_irqsave_nested(&ref->tree_lock, flags,
					 SINGLE_DEPTH_NESTING);
802 803 804
		parent = NULL;
		p = &ref->tree.rb_node;
		while (*p) {
805 806
			struct active_node *it;

807
			parent = *p;
808 809 810

			it = rb_entry(parent, struct active_node, node);
			if (it->timeline < node->timeline)
811 812 813 814 815 816
				p = &parent->rb_right;
			else
				p = &parent->rb_left;
		}
		rb_link_node(&node->node, parent, p);
		rb_insert_color(&node->node, &ref->tree);
817
		spin_unlock_irqrestore(&ref->tree_lock, flags);
818

819
		GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
820
		llist_add(barrier_to_ll(node), &engine->barrier_tasks);
821
		intel_engine_pm_put(engine);
822 823 824
	}
}

825 826 827 828 829
static struct dma_fence **ll_to_fence_slot(struct llist_node *node)
{
	return __active_fence_slot(&barrier_from_ll(node)->base);
}

830
void i915_request_add_active_barriers(struct i915_request *rq)
831 832 833
{
	struct intel_engine_cs *engine = rq->engine;
	struct llist_node *node, *next;
834
	unsigned long flags;
835

836
	GEM_BUG_ON(!intel_context_is_barrier(rq->context));
837
	GEM_BUG_ON(intel_engine_is_virtual(engine));
838
	GEM_BUG_ON(i915_request_timeline(rq) != engine->kernel_context->timeline);
839

840 841 842
	node = llist_del_all(&engine->barrier_tasks);
	if (!node)
		return;
843 844 845 846 847
	/*
	 * Attach the list of proto-fences to the in-flight request such
	 * that the parent i915_active will be released when this request
	 * is retired.
	 */
848 849
	spin_lock_irqsave(&rq->lock, flags);
	llist_for_each_safe(node, next, node) {
850 851
		/* serialise with reuse_idle_barrier */
		smp_store_mb(*ll_to_fence_slot(node), &rq->fence);
852 853 854 855 856 857 858 859 860 861 862 863 864 865 866
		list_add_tail((struct list_head *)node, &rq->fence.cb_list);
	}
	spin_unlock_irqrestore(&rq->lock, flags);
}

/*
 * __i915_active_fence_set: Update the last active fence along its timeline
 * @active: the active tracker
 * @fence: the new fence (under construction)
 *
 * Records the new @fence as the last active fence along its timeline in
 * this active tracker, moving the tracking callbacks from the previous
 * fence onto this one. Returns the previous fence (if not already completed),
 * which the caller must ensure is executed before the new fence. To ensure
 * that the order of fences within the timeline of the i915_active_fence is
867
 * understood, it should be locked by the caller.
868 869 870 871 872 873 874 875
 */
struct dma_fence *
__i915_active_fence_set(struct i915_active_fence *active,
			struct dma_fence *fence)
{
	struct dma_fence *prev;
	unsigned long flags;

876 877 878
	if (fence == rcu_access_pointer(active->fence))
		return fence;

879 880
	GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));

881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902
	/*
	 * Consider that we have two threads arriving (A and B), with
	 * C already resident as the active->fence.
	 *
	 * A does the xchg first, and so it sees C or NULL depending
	 * on the timing of the interrupt handler. If it is NULL, the
	 * previous fence must have been signaled and we know that
	 * we are first on the timeline. If it is still present,
	 * we acquire the lock on that fence and serialise with the interrupt
	 * handler, in the process removing it from any future interrupt
	 * callback. A will then wait on C before executing (if present).
	 *
	 * As B is second, it sees A as the previous fence and so waits for
	 * it to complete its transition and takes over the occupancy for
	 * itself -- remembering that it needs to wait on A before executing.
	 *
	 * Note the strong ordering of the timeline also provides consistent
	 * nesting rules for the fence->lock; the inner lock is always the
	 * older lock.
	 */
	spin_lock_irqsave(fence->lock, flags);
	prev = xchg(__active_fence_slot(active), fence);
903 904 905 906 907
	if (prev) {
		GEM_BUG_ON(prev == fence);
		spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING);
		__list_del_entry(&active->cb.node);
		spin_unlock(prev->lock); /* serialise with prev->cb_list */
908
	}
909 910 911 912
	list_add_tail(&active->cb.node, &fence->cb_list);
	spin_unlock_irqrestore(fence->lock, flags);

	return prev;
913 914
}

915 916
int i915_active_fence_set(struct i915_active_fence *active,
			  struct i915_request *rq)
917
{
918 919
	struct dma_fence *fence;
	int err = 0;
920

921 922 923 924 925 926 927 928 929 930
	/* Must maintain timeline ordering wrt previous active requests */
	rcu_read_lock();
	fence = __i915_active_fence_set(active, &rq->fence);
	if (fence) /* but the previous fence may not belong to that timeline! */
		fence = dma_fence_get_rcu(fence);
	rcu_read_unlock();
	if (fence) {
		err = i915_request_await_dma_fence(rq, fence);
		dma_fence_put(fence);
	}
931

932
	return err;
933 934
}

935
void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb)
936
{
937
	active_fence_cb(fence, cb);
938 939
}

940 941 942
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/i915_active.c"
#endif
943

944
static void i915_global_active_shrink(void)
945
{
946
	kmem_cache_shrink(global.slab_cache);
947 948
}

949
static void i915_global_active_exit(void)
950
{
951
	kmem_cache_destroy(global.slab_cache);
952 953
}

954 955 956 957 958 959
static struct i915_global_active global = { {
	.shrink = i915_global_active_shrink,
	.exit = i915_global_active_exit,
} };

int __init i915_global_active_init(void)
960
{
961 962 963 964 965 966
	global.slab_cache = KMEM_CACHE(active_node, SLAB_HWCACHE_ALIGN);
	if (!global.slab_cache)
		return -ENOMEM;

	i915_global_register(&global.base);
	return 0;
967
}