i915_active.c 22.2 KB
Newer Older
1 2 3 4 5 6
/*
 * SPDX-License-Identifier: MIT
 *
 * Copyright © 2019 Intel Corporation
 */

7 8
#include <linux/debugobjects.h>

9
#include "gt/intel_context.h"
10
#include "gt/intel_engine_pm.h"
11
#include "gt/intel_ring.h"
12

13 14
#include "i915_drv.h"
#include "i915_active.h"
15
#include "i915_globals.h"
16

17 18 19 20 21 22 23 24
/*
 * Active refs memory management
 *
 * To be more economical with memory, we reap all the i915_active trees as
 * they idle (when we know the active requests are inactive) and allocate the
 * nodes from a local slab cache to hopefully reduce the fragmentation.
 */
static struct i915_global_active {
25
	struct i915_global base;
26 27 28
	struct kmem_cache *slab_cache;
} global;

29
struct active_node {
30
	struct i915_active_fence base;
31 32 33 34 35
	struct i915_active *ref;
	struct rb_node node;
	u64 timeline;
};

36
static inline struct active_node *
37
node_from_active(struct i915_active_fence *active)
38 39 40 41 42 43
{
	return container_of(active, struct active_node, base);
}

#define take_preallocated_barriers(x) llist_del_all(&(x)->preallocated_barriers)

44
static inline bool is_barrier(const struct i915_active_fence *active)
45
{
46
	return IS_ERR(rcu_access_pointer(active->fence));
47 48 49 50 51
}

static inline struct llist_node *barrier_to_ll(struct active_node *node)
{
	GEM_BUG_ON(!is_barrier(&node->base));
52
	return (struct llist_node *)&node->base.cb.node;
53 54
}

55 56 57
static inline struct intel_engine_cs *
__barrier_to_engine(struct active_node *node)
{
58
	return (struct intel_engine_cs *)READ_ONCE(node->base.cb.node.prev);
59 60
}

61 62 63 64
static inline struct intel_engine_cs *
barrier_to_engine(struct active_node *node)
{
	GEM_BUG_ON(!is_barrier(&node->base));
65
	return __barrier_to_engine(node);
66 67 68 69 70
}

static inline struct active_node *barrier_from_ll(struct llist_node *x)
{
	return container_of((struct list_head *)x,
71
			    struct active_node, base.cb.node);
72 73
}

74 75 76 77 78 79
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && IS_ENABLED(CONFIG_DEBUG_OBJECTS)

static void *active_debug_hint(void *addr)
{
	struct i915_active *ref = addr;

80
	return (void *)ref->active ?: (void *)ref->retire ?: (void *)ref;
81 82 83 84 85 86 87 88 89 90 91 92 93 94
}

static struct debug_obj_descr active_debug_desc = {
	.name = "i915_active",
	.debug_hint = active_debug_hint,
};

static void debug_active_init(struct i915_active *ref)
{
	debug_object_init(ref, &active_debug_desc);
}

static void debug_active_activate(struct i915_active *ref)
{
95
	lockdep_assert_held(&ref->tree_lock);
96 97
	if (!atomic_read(&ref->count)) /* before the first inc */
		debug_object_activate(ref, &active_debug_desc);
98 99 100 101
}

static void debug_active_deactivate(struct i915_active *ref)
{
102
	lockdep_assert_held(&ref->tree_lock);
103 104
	if (!atomic_read(&ref->count)) /* after the last dec */
		debug_object_deactivate(ref, &active_debug_desc);
105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
}

static void debug_active_fini(struct i915_active *ref)
{
	debug_object_free(ref, &active_debug_desc);
}

static void debug_active_assert(struct i915_active *ref)
{
	debug_object_assert_init(ref, &active_debug_desc);
}

#else

static inline void debug_active_init(struct i915_active *ref) { }
static inline void debug_active_activate(struct i915_active *ref) { }
static inline void debug_active_deactivate(struct i915_active *ref) { }
static inline void debug_active_fini(struct i915_active *ref) { }
static inline void debug_active_assert(struct i915_active *ref) { }

#endif

127
static void
128
__active_retire(struct i915_active *ref)
129 130
{
	struct active_node *it, *n;
131
	struct rb_root root;
132
	unsigned long flags;
133

134
	GEM_BUG_ON(i915_active_is_idle(ref));
135 136

	/* return the unused nodes to our slabcache -- flushing the allocator */
137
	if (!atomic_dec_and_lock_irqsave(&ref->count, &ref->tree_lock, flags))
138 139
		return;

140
	GEM_BUG_ON(rcu_access_pointer(ref->excl.fence));
141 142 143 144 145 146 147
	debug_active_deactivate(ref);

	root = ref->tree;
	ref->tree = RB_ROOT;
	ref->cache = NULL;

	spin_unlock_irqrestore(&ref->tree_lock, flags);
148 149 150 151

	/* After the final retire, the entire struct may be freed */
	if (ref->retire)
		ref->retire(ref);
152 153 154

	/* ... except if you wait on it, you must manage your own references! */
	wake_up_var(ref);
155 156 157 158 159

	rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
		GEM_BUG_ON(i915_active_fence_isset(&it->base));
		kmem_cache_free(global.slab_cache, it);
	}
160 161
}

162 163 164 165 166 167 168 169 170 171 172 173
static void
active_work(struct work_struct *wrk)
{
	struct i915_active *ref = container_of(wrk, typeof(*ref), work);

	GEM_BUG_ON(!atomic_read(&ref->count));
	if (atomic_add_unless(&ref->count, -1, 1))
		return;

	__active_retire(ref);
}

174
static void
175
active_retire(struct i915_active *ref)
176
{
177 178
	GEM_BUG_ON(!atomic_read(&ref->count));
	if (atomic_add_unless(&ref->count, -1, 1))
179 180
		return;

181
	if (ref->flags & I915_ACTIVE_RETIRE_SLEEPS) {
182 183 184 185
		queue_work(system_unbound_wq, &ref->work);
		return;
	}

186
	__active_retire(ref);
187 188
}

189 190 191 192 193 194 195 196 197 198 199 200 201 202 203
static inline struct dma_fence **
__active_fence_slot(struct i915_active_fence *active)
{
	return (struct dma_fence ** __force)&active->fence;
}

static inline bool
active_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
{
	struct i915_active_fence *active =
		container_of(cb, typeof(*active), cb);

	return cmpxchg(__active_fence_slot(active), fence, NULL) == fence;
}

204
static void
205
node_retire(struct dma_fence *fence, struct dma_fence_cb *cb)
206
{
207 208
	if (active_fence_cb(fence, cb))
		active_retire(container_of(cb, struct active_node, base.cb)->ref);
209 210
}

211 212 213
static void
excl_retire(struct dma_fence *fence, struct dma_fence_cb *cb)
{
214 215
	if (active_fence_cb(fence, cb))
		active_retire(container_of(cb, struct i915_active, excl.cb));
216 217 218
}

static struct i915_active_fence *
219
active_instance(struct i915_active *ref, struct intel_timeline *tl)
220
{
221
	struct active_node *node, *prealloc;
222
	struct rb_node **p, *parent;
223
	u64 idx = tl->fence_context;
224 225 226 227 228 229 230 231

	/*
	 * We track the most recently used timeline to skip a rbtree search
	 * for the common case, under typical loads we never need the rbtree
	 * at all. We can reuse the last slot if it is empty, that is
	 * after the previous activity has been retired, or if it matches the
	 * current timeline.
	 */
232 233 234 235 236 237 238 239
	node = READ_ONCE(ref->cache);
	if (node && node->timeline == idx)
		return &node->base;

	/* Preallocate a replacement, just in case */
	prealloc = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
	if (!prealloc)
		return NULL;
240

241
	spin_lock_irq(&ref->tree_lock);
242
	GEM_BUG_ON(i915_active_is_idle(ref));
243 244 245 246 247 248 249

	parent = NULL;
	p = &ref->tree.rb_node;
	while (*p) {
		parent = *p;

		node = rb_entry(parent, struct active_node, node);
250 251 252 253
		if (node->timeline == idx) {
			kmem_cache_free(global.slab_cache, prealloc);
			goto out;
		}
254 255 256 257 258 259 260

		if (node->timeline < idx)
			p = &parent->rb_right;
		else
			p = &parent->rb_left;
	}

261
	node = prealloc;
262
	__i915_active_fence_init(&node->base, NULL, node_retire);
263 264 265 266 267 268 269
	node->ref = ref;
	node->timeline = idx;

	rb_link_node(&node->node, parent, p);
	rb_insert_color(&node->node, &ref->tree);

out:
270
	ref->cache = node;
271
	spin_unlock_irq(&ref->tree_lock);
272

273
	BUILD_BUG_ON(offsetof(typeof(*node), base));
274
	return &node->base;
275 276
}

277
void __i915_active_init(struct i915_active *ref,
278 279
			int (*active)(struct i915_active *ref),
			void (*retire)(struct i915_active *ref),
280 281
			struct lock_class_key *mkey,
			struct lock_class_key *wkey)
282
{
283 284
	unsigned long bits;

285 286
	debug_active_init(ref);

287
	ref->flags = 0;
288
	ref->active = active;
289 290 291
	ref->retire = ptr_unpack_bits(retire, &bits, 2);
	if (bits & I915_ACTIVE_MAY_SLEEP)
		ref->flags |= I915_ACTIVE_RETIRE_SLEEPS;
292

293
	spin_lock_init(&ref->tree_lock);
294
	ref->tree = RB_ROOT;
295
	ref->cache = NULL;
296

297
	init_llist_head(&ref->preallocated_barriers);
298
	atomic_set(&ref->count, 0);
299
	__mutex_init(&ref->mutex, "i915_active", mkey);
300
	__i915_active_fence_init(&ref->excl, NULL, excl_retire);
301
	INIT_WORK(&ref->work, active_work);
302 303 304
#if IS_ENABLED(CONFIG_LOCKDEP)
	lockdep_init_map(&ref->work.lockdep_map, "i915_active.work", wkey, 0);
#endif
305 306
}

307 308 309 310
static bool ____active_del_barrier(struct i915_active *ref,
				   struct active_node *node,
				   struct intel_engine_cs *engine)

311 312 313 314
{
	struct llist_node *head = NULL, *tail = NULL;
	struct llist_node *pos, *next;

315
	GEM_BUG_ON(node->timeline != engine->kernel_context->timeline->fence_context);
316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348

	/*
	 * Rebuild the llist excluding our node. We may perform this
	 * outside of the kernel_context timeline mutex and so someone
	 * else may be manipulating the engine->barrier_tasks, in
	 * which case either we or they will be upset :)
	 *
	 * A second __active_del_barrier() will report failure to claim
	 * the active_node and the caller will just shrug and know not to
	 * claim ownership of its node.
	 *
	 * A concurrent i915_request_add_active_barriers() will miss adding
	 * any of the tasks, but we will try again on the next -- and since
	 * we are actively using the barrier, we know that there will be
	 * at least another opportunity when we idle.
	 */
	llist_for_each_safe(pos, next, llist_del_all(&engine->barrier_tasks)) {
		if (node == barrier_from_ll(pos)) {
			node = NULL;
			continue;
		}

		pos->next = head;
		head = pos;
		if (!tail)
			tail = pos;
	}
	if (head)
		llist_add_batch(head, tail, &engine->barrier_tasks);

	return !node;
}

349 350 351 352 353 354
static bool
__active_del_barrier(struct i915_active *ref, struct active_node *node)
{
	return ____active_del_barrier(ref, node, barrier_to_engine(node));
}

355
int i915_active_ref(struct i915_active *ref,
356
		    struct intel_timeline *tl,
357
		    struct dma_fence *fence)
358
{
359
	struct i915_active_fence *active;
360
	int err;
361

362 363
	lockdep_assert_held(&tl->mutex);

364
	/* Prevent reaping in case we malloc/wait while building the tree */
365 366 367
	err = i915_active_acquire(ref);
	if (err)
		return err;
368

369
	active = active_instance(ref, tl);
370 371
	if (!active) {
		err = -ENOMEM;
372 373
		goto out;
	}
374

375 376 377 378 379 380 381
	if (is_barrier(active)) { /* proto-node used by our idle barrier */
		/*
		 * This request is on the kernel_context timeline, and so
		 * we can use it to substitute for the pending idle-barrer
		 * request that we want to emit on the kernel_context.
		 */
		__active_del_barrier(ref, node_from_active(active));
382 383
		RCU_INIT_POINTER(active->fence, NULL);
		atomic_dec(&ref->count);
384
	}
385 386
	if (!__i915_active_fence_set(active, fence))
		atomic_inc(&ref->count);
387

388 389 390
out:
	i915_active_release(ref);
	return err;
391 392
}

393 394 395 396 397
void i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f)
{
	/* We expect the caller to manage the exclusive timeline ordering */
	GEM_BUG_ON(i915_active_is_idle(ref));

398 399 400
	if (!__i915_active_fence_set(&ref->excl, f))
		atomic_inc(&ref->count);
}
401

402 403 404 405
bool i915_active_acquire_if_busy(struct i915_active *ref)
{
	debug_active_assert(ref);
	return atomic_add_unless(&ref->count, 1, 0);
406 407
}

408
int i915_active_acquire(struct i915_active *ref)
409
{
410 411
	int err;

412
	if (i915_active_acquire_if_busy(ref))
413
		return 0;
414

415 416 417
	err = mutex_lock_interruptible(&ref->mutex);
	if (err)
		return err;
418

419 420 421
	if (!atomic_read(&ref->count) && ref->active)
		err = ref->active(ref);
	if (!err) {
422
		spin_lock_irq(&ref->tree_lock); /* vs __active_retire() */
423 424
		debug_active_activate(ref);
		atomic_inc(&ref->count);
425
		spin_unlock_irq(&ref->tree_lock);
426 427 428 429 430
	}

	mutex_unlock(&ref->mutex);

	return err;
431 432 433 434
}

void i915_active_release(struct i915_active *ref)
{
435
	debug_active_assert(ref);
436
	active_retire(ref);
437 438
}

439
static void enable_signaling(struct i915_active_fence *active)
440
{
441
	struct dma_fence *fence;
442

443 444 445
	fence = i915_active_fence_get(active);
	if (!fence)
		return;
446

447 448
	dma_fence_enable_sw_signaling(fence);
	dma_fence_put(fence);
449 450
}

451 452 453
int i915_active_wait(struct i915_active *ref)
{
	struct active_node *it, *n;
454
	int err = 0;
455

456 457
	might_sleep();

458
	if (!i915_active_acquire_if_busy(ref))
459
		return 0;
460

461 462
	/* Flush lazy signals */
	enable_signaling(&ref->excl);
463
	rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
464 465
		if (is_barrier(&it->base)) /* unconnected idle barrier */
			continue;
466

467
		enable_signaling(&it->base);
468
	}
469
	/* Any fence added after the wait begins will not be auto-signaled */
470

471
	i915_active_release(ref);
472 473 474
	if (err)
		return err;

475
	if (wait_var_event_interruptible(ref, i915_active_is_idle(ref)))
476 477
		return -EINTR;

478
	flush_work(&ref->work);
479
	return 0;
480 481 482 483
}

int i915_request_await_active(struct i915_request *rq, struct i915_active *ref)
{
484
	int err = 0;
485

486
	if (rcu_access_pointer(ref->excl.fence)) {
487
		struct dma_fence *fence;
488

489
		rcu_read_lock();
490
		fence = dma_fence_get_rcu_safe(&ref->excl.fence);
491 492 493 494 495
		rcu_read_unlock();
		if (fence) {
			err = i915_request_await_dma_fence(rq, fence);
			dma_fence_put(fence);
		}
496 497
	}

498 499
	/* In the future we may choose to await on all fences */

500
	return err;
501 502
}

503
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
504 505
void i915_active_fini(struct i915_active *ref)
{
506
	debug_active_fini(ref);
507
	GEM_BUG_ON(atomic_read(&ref->count));
508 509
	GEM_BUG_ON(work_pending(&ref->work));
	GEM_BUG_ON(!RB_EMPTY_ROOT(&ref->tree));
510
	mutex_destroy(&ref->mutex);
511
}
512
#endif
513

514 515
static inline bool is_idle_barrier(struct active_node *node, u64 idx)
{
516
	return node->timeline == idx && !i915_active_fence_isset(&node->base);
517 518 519 520 521 522 523 524 525
}

static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
{
	struct rb_node *prev, *p;

	if (RB_EMPTY_ROOT(&ref->tree))
		return NULL;

526
	spin_lock_irq(&ref->tree_lock);
527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565
	GEM_BUG_ON(i915_active_is_idle(ref));

	/*
	 * Try to reuse any existing barrier nodes already allocated for this
	 * i915_active, due to overlapping active phases there is likely a
	 * node kept alive (as we reuse before parking). We prefer to reuse
	 * completely idle barriers (less hassle in manipulating the llists),
	 * but otherwise any will do.
	 */
	if (ref->cache && is_idle_barrier(ref->cache, idx)) {
		p = &ref->cache->node;
		goto match;
	}

	prev = NULL;
	p = ref->tree.rb_node;
	while (p) {
		struct active_node *node =
			rb_entry(p, struct active_node, node);

		if (is_idle_barrier(node, idx))
			goto match;

		prev = p;
		if (node->timeline < idx)
			p = p->rb_right;
		else
			p = p->rb_left;
	}

	/*
	 * No quick match, but we did find the leftmost rb_node for the
	 * kernel_context. Walk the rb_tree in-order to see if there were
	 * any idle-barriers on this timeline that we missed, or just use
	 * the first pending barrier.
	 */
	for (p = prev; p; p = rb_next(p)) {
		struct active_node *node =
			rb_entry(p, struct active_node, node);
566
		struct intel_engine_cs *engine;
567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583

		if (node->timeline > idx)
			break;

		if (node->timeline < idx)
			continue;

		if (is_idle_barrier(node, idx))
			goto match;

		/*
		 * The list of pending barriers is protected by the
		 * kernel_context timeline, which notably we do not hold
		 * here. i915_request_add_active_barriers() may consume
		 * the barrier before we claim it, so we have to check
		 * for success.
		 */
584 585 586 587
		engine = __barrier_to_engine(node);
		smp_rmb(); /* serialise with add_active_barriers */
		if (is_barrier(&node->base) &&
		    ____active_del_barrier(ref, node, engine))
588 589 590
			goto match;
	}

591
	spin_unlock_irq(&ref->tree_lock);
592 593 594 595 596 597 598

	return NULL;

match:
	rb_erase(p, &ref->tree); /* Hide from waits and sibling allocations */
	if (p == &ref->cache->node)
		ref->cache = NULL;
599
	spin_unlock_irq(&ref->tree_lock);
600 601 602 603

	return rb_entry(p, struct active_node, node);
}

604 605 606
int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
					    struct intel_engine_cs *engine)
{
607
	intel_engine_mask_t tmp, mask = engine->mask;
608
	struct llist_node *pos = NULL, *next;
609
	struct intel_gt *gt = engine->gt;
610
	int err;
611

612
	GEM_BUG_ON(i915_active_is_idle(ref));
613 614 615 616

	/* Wait until the previous preallocation is completed */
	while (!llist_empty(&ref->preallocated_barriers))
		cond_resched();
617 618 619 620 621 622 623

	/*
	 * Preallocate a node for each physical engine supporting the target
	 * engine (remember virtual engines have more than one sibling).
	 * We can then use the preallocated nodes in
	 * i915_active_acquire_barrier()
	 */
624
	for_each_engine_masked(engine, gt, mask, tmp) {
625
		u64 idx = engine->kernel_context->timeline->fence_context;
626 627
		struct active_node *node;

628 629 630 631 632 633 634 635
		node = reuse_idle_barrier(ref, idx);
		if (!node) {
			node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
			if (!node) {
				err = ENOMEM;
				goto unwind;
			}

636 637
			RCU_INIT_POINTER(node->base.fence, NULL);
			node->base.cb.func = node_retire;
638 639
			node->timeline = idx;
			node->ref = ref;
640 641
		}

642
		if (!i915_active_fence_isset(&node->base)) {
643 644 645 646 647 648 649 650 651
			/*
			 * Mark this as being *our* unconnected proto-node.
			 *
			 * Since this node is not in any list, and we have
			 * decoupled it from the rbtree, we can reuse the
			 * request to indicate this is an idle-barrier node
			 * and then we can use the rb_node and list pointers
			 * for our tracking of the pending barrier.
			 */
652 653
			RCU_INIT_POINTER(node->base.fence, ERR_PTR(-EAGAIN));
			node->base.cb.node.prev = (void *)engine;
654 655
			atomic_inc(&ref->count);
		}
656
		GEM_BUG_ON(rcu_access_pointer(node->base.fence) != ERR_PTR(-EAGAIN));
657

658
		GEM_BUG_ON(barrier_to_engine(node) != engine);
659 660 661 662
		next = barrier_to_ll(node);
		next->next = pos;
		if (!pos)
			pos = next;
663
		intel_engine_pm_get(engine);
664 665
	}

666 667 668
	GEM_BUG_ON(!llist_empty(&ref->preallocated_barriers));
	llist_add_batch(next, pos, &ref->preallocated_barriers);

669 670 671
	return 0;

unwind:
672
	while (pos) {
673
		struct active_node *node = barrier_from_ll(pos);
674

675 676
		pos = pos->next;

677 678
		atomic_dec(&ref->count);
		intel_engine_pm_put(barrier_to_engine(node));
679 680 681

		kmem_cache_free(global.slab_cache, node);
	}
682 683 684 685 686 687
	return err;
}

void i915_active_acquire_barrier(struct i915_active *ref)
{
	struct llist_node *pos, *next;
688
	unsigned long flags;
689

690
	GEM_BUG_ON(i915_active_is_idle(ref));
691

692 693 694 695 696 697 698 699 700
	/*
	 * Transfer the list of preallocated barriers into the
	 * i915_active rbtree, but only as proto-nodes. They will be
	 * populated by i915_request_add_active_barriers() to point to the
	 * request that will eventually release them.
	 */
	llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) {
		struct active_node *node = barrier_from_ll(pos);
		struct intel_engine_cs *engine = barrier_to_engine(node);
701 702
		struct rb_node **p, *parent;

703 704
		spin_lock_irqsave_nested(&ref->tree_lock, flags,
					 SINGLE_DEPTH_NESTING);
705 706 707
		parent = NULL;
		p = &ref->tree.rb_node;
		while (*p) {
708 709
			struct active_node *it;

710
			parent = *p;
711 712 713

			it = rb_entry(parent, struct active_node, node);
			if (it->timeline < node->timeline)
714 715 716 717 718 719
				p = &parent->rb_right;
			else
				p = &parent->rb_left;
		}
		rb_link_node(&node->node, parent, p);
		rb_insert_color(&node->node, &ref->tree);
720
		spin_unlock_irqrestore(&ref->tree_lock, flags);
721

722
		GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
723
		llist_add(barrier_to_ll(node), &engine->barrier_tasks);
724
		intel_engine_pm_put(engine);
725 726 727
	}
}

728 729 730 731 732
static struct dma_fence **ll_to_fence_slot(struct llist_node *node)
{
	return __active_fence_slot(&barrier_from_ll(node)->base);
}

733
void i915_request_add_active_barriers(struct i915_request *rq)
734 735 736
{
	struct intel_engine_cs *engine = rq->engine;
	struct llist_node *node, *next;
737
	unsigned long flags;
738

739
	GEM_BUG_ON(!intel_context_is_barrier(rq->context));
740
	GEM_BUG_ON(intel_engine_is_virtual(engine));
741
	GEM_BUG_ON(i915_request_timeline(rq) != engine->kernel_context->timeline);
742

743 744 745
	node = llist_del_all(&engine->barrier_tasks);
	if (!node)
		return;
746 747 748 749 750
	/*
	 * Attach the list of proto-fences to the in-flight request such
	 * that the parent i915_active will be released when this request
	 * is retired.
	 */
751 752
	spin_lock_irqsave(&rq->lock, flags);
	llist_for_each_safe(node, next, node) {
753 754
		/* serialise with reuse_idle_barrier */
		smp_store_mb(*ll_to_fence_slot(node), &rq->fence);
755 756 757 758 759 760 761 762 763 764 765 766 767 768 769
		list_add_tail((struct list_head *)node, &rq->fence.cb_list);
	}
	spin_unlock_irqrestore(&rq->lock, flags);
}

/*
 * __i915_active_fence_set: Update the last active fence along its timeline
 * @active: the active tracker
 * @fence: the new fence (under construction)
 *
 * Records the new @fence as the last active fence along its timeline in
 * this active tracker, moving the tracking callbacks from the previous
 * fence onto this one. Returns the previous fence (if not already completed),
 * which the caller must ensure is executed before the new fence. To ensure
 * that the order of fences within the timeline of the i915_active_fence is
770
 * understood, it should be locked by the caller.
771 772 773 774 775 776 777 778
 */
struct dma_fence *
__i915_active_fence_set(struct i915_active_fence *active,
			struct dma_fence *fence)
{
	struct dma_fence *prev;
	unsigned long flags;

779 780 781
	if (fence == rcu_access_pointer(active->fence))
		return fence;

782 783
	GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));

784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805
	/*
	 * Consider that we have two threads arriving (A and B), with
	 * C already resident as the active->fence.
	 *
	 * A does the xchg first, and so it sees C or NULL depending
	 * on the timing of the interrupt handler. If it is NULL, the
	 * previous fence must have been signaled and we know that
	 * we are first on the timeline. If it is still present,
	 * we acquire the lock on that fence and serialise with the interrupt
	 * handler, in the process removing it from any future interrupt
	 * callback. A will then wait on C before executing (if present).
	 *
	 * As B is second, it sees A as the previous fence and so waits for
	 * it to complete its transition and takes over the occupancy for
	 * itself -- remembering that it needs to wait on A before executing.
	 *
	 * Note the strong ordering of the timeline also provides consistent
	 * nesting rules for the fence->lock; the inner lock is always the
	 * older lock.
	 */
	spin_lock_irqsave(fence->lock, flags);
	prev = xchg(__active_fence_slot(active), fence);
806 807 808 809 810
	if (prev) {
		GEM_BUG_ON(prev == fence);
		spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING);
		__list_del_entry(&active->cb.node);
		spin_unlock(prev->lock); /* serialise with prev->cb_list */
811
	}
812
	GEM_BUG_ON(rcu_access_pointer(active->fence) != fence);
813 814 815 816
	list_add_tail(&active->cb.node, &fence->cb_list);
	spin_unlock_irqrestore(fence->lock, flags);

	return prev;
817 818
}

819 820
int i915_active_fence_set(struct i915_active_fence *active,
			  struct i915_request *rq)
821
{
822 823
	struct dma_fence *fence;
	int err = 0;
824

825 826 827 828 829 830 831 832 833 834
	/* Must maintain timeline ordering wrt previous active requests */
	rcu_read_lock();
	fence = __i915_active_fence_set(active, &rq->fence);
	if (fence) /* but the previous fence may not belong to that timeline! */
		fence = dma_fence_get_rcu(fence);
	rcu_read_unlock();
	if (fence) {
		err = i915_request_await_dma_fence(rq, fence);
		dma_fence_put(fence);
	}
835

836
	return err;
837 838
}

839
void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb)
840
{
841
	active_fence_cb(fence, cb);
842 843
}

844 845 846
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/i915_active.c"
#endif
847

848
static void i915_global_active_shrink(void)
849
{
850
	kmem_cache_shrink(global.slab_cache);
851 852
}

853
static void i915_global_active_exit(void)
854
{
855
	kmem_cache_destroy(global.slab_cache);
856 857
}

858 859 860 861 862 863
static struct i915_global_active global = { {
	.shrink = i915_global_active_shrink,
	.exit = i915_global_active_exit,
} };

int __init i915_global_active_init(void)
864
{
865 866 867 868 869 870
	global.slab_cache = KMEM_CACHE(active_node, SLAB_HWCACHE_ALIGN);
	if (!global.slab_cache)
		return -ENOMEM;

	i915_global_register(&global.base);
	return 0;
871
}