intel_breadcrumbs.c 24.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 * Copyright © 2015 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 */

25
#include <linux/kthread.h>
26
#include <uapi/linux/sched/types.h>
27

28 29
#include "i915_drv.h"

30
#define task_asleep(tsk) ((tsk)->state & TASK_NORMAL && !(tsk)->on_rq)
31

32
static unsigned int __intel_breadcrumbs_wakeup(struct intel_breadcrumbs *b)
33
{
34
	struct intel_wait *wait;
35 36
	unsigned int result = 0;

37 38 39
	lockdep_assert_held(&b->irq_lock);

	wait = b->irq_wait;
40
	if (wait) {
41 42 43 44 45 46 47 48 49 50 51 52
		/*
		 * N.B. Since task_asleep() and ttwu are not atomic, the
		 * waiter may actually go to sleep after the check, causing
		 * us to suppress a valid wakeup. We prefer to reduce the
		 * number of false positive missed_breadcrumb() warnings
		 * at the expense of a few false negatives, as it it easy
		 * to trigger a false positive under heavy load. Enough
		 * signal should remain from genuine missed_breadcrumb()
		 * for us to detect in CI.
		 */
		bool was_asleep = task_asleep(wait->tsk);

53
		result = ENGINE_WAKEUP_WAITER;
54
		if (wake_up_process(wait->tsk) && was_asleep)
55
			result |= ENGINE_WAKEUP_ASLEEP;
56
	}
57 58 59 60 61 62 63

	return result;
}

unsigned int intel_engine_wakeup(struct intel_engine_cs *engine)
{
	struct intel_breadcrumbs *b = &engine->breadcrumbs;
64
	unsigned long flags;
65 66
	unsigned int result;

67
	spin_lock_irqsave(&b->irq_lock, flags);
68
	result = __intel_breadcrumbs_wakeup(b);
69
	spin_unlock_irqrestore(&b->irq_lock, flags);
70 71 72 73

	return result;
}

74 75 76 77 78
static unsigned long wait_timeout(void)
{
	return round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES);
}

79 80
static noinline void missed_breadcrumb(struct intel_engine_cs *engine)
{
81
	if (GEM_SHOW_DEBUG()) {
82 83 84 85 86 87
		struct drm_printer p = drm_debug_printer(__func__);

		intel_engine_dump(engine, &p,
				  "%s missed breadcrumb at %pS\n",
				  engine->name, __builtin_return_address(0));
	}
88 89 90 91

	set_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
}

92
static void intel_breadcrumbs_hangcheck(struct timer_list *t)
93
{
94 95
	struct intel_engine_cs *engine =
		from_timer(engine, t, breadcrumbs.hangcheck);
96
	struct intel_breadcrumbs *b = &engine->breadcrumbs;
97
	unsigned int irq_count;
98

99
	if (!b->irq_armed)
100 101
		return;

102 103 104
	irq_count = READ_ONCE(b->irq_count);
	if (b->hangcheck_interrupts != irq_count) {
		b->hangcheck_interrupts = irq_count;
105
		mod_timer(&b->hangcheck, wait_timeout());
106 107 108
		return;
	}

109
	/* We keep the hangcheck timer alive until we disarm the irq, even
110 111 112
	 * if there are no waiters at present.
	 *
	 * If the waiter was currently running, assume it hasn't had a chance
113 114
	 * to process the pending interrupt (e.g, low priority task on a loaded
	 * system) and wait until it sleeps before declaring a missed interrupt.
115 116 117 118 119
	 *
	 * If the waiter was asleep (and not even pending a wakeup), then we
	 * must have missed an interrupt as the GPU has stopped advancing
	 * but we still have a waiter. Assuming all batches complete within
	 * DRM_I915_HANGCHECK_JIFFIES [1.5s]!
120
	 */
121
	if (intel_engine_wakeup(engine) & ENGINE_WAKEUP_ASLEEP) {
122
		missed_breadcrumb(engine);
123
		mod_timer(&b->fake_irq, jiffies + 1);
124
	} else {
125 126
		mod_timer(&b->hangcheck, wait_timeout());
	}
127 128
}

129
static void intel_breadcrumbs_fake_irq(struct timer_list *t)
130
{
131 132
	struct intel_engine_cs *engine =
		from_timer(engine, t, breadcrumbs.fake_irq);
133
	struct intel_breadcrumbs *b = &engine->breadcrumbs;
134

135 136
	/*
	 * The timer persists in case we cannot enable interrupts,
137
	 * or if we have previously seen seqno/interrupt incoherency
138 139 140
	 * ("missed interrupt" syndrome, better known as a "missed breadcrumb").
	 * Here the worker will wake up every jiffie in order to kick the
	 * oldest waiter to do the coherent seqno check.
141
	 */
142

143
	spin_lock_irq(&b->irq_lock);
144
	if (b->irq_armed && !__intel_breadcrumbs_wakeup(b))
145
		__intel_engine_disarm_breadcrumbs(engine);
146
	spin_unlock_irq(&b->irq_lock);
147
	if (!b->irq_armed)
148 149
		return;

150 151 152 153 154 155
	/* If the user has disabled the fake-irq, restore the hangchecking */
	if (!test_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings)) {
		mod_timer(&b->hangcheck, wait_timeout());
		return;
	}

156
	mod_timer(&b->fake_irq, jiffies + 1);
157 158 159 160
}

static void irq_enable(struct intel_engine_cs *engine)
{
161 162
	if (!engine->irq_enable)
		return;
163

164
	/* Caller disables interrupts */
165 166 167
	spin_lock(&engine->i915->irq_lock);
	engine->irq_enable(engine);
	spin_unlock(&engine->i915->irq_lock);
168 169 170 171
}

static void irq_disable(struct intel_engine_cs *engine)
{
172 173 174
	if (!engine->irq_disable)
		return;

175
	/* Caller disables interrupts */
176 177 178
	spin_lock(&engine->i915->irq_lock);
	engine->irq_disable(engine);
	spin_unlock(&engine->i915->irq_lock);
179 180
}

181 182 183 184
void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
{
	struct intel_breadcrumbs *b = &engine->breadcrumbs;

185
	lockdep_assert_held(&b->irq_lock);
186
	GEM_BUG_ON(b->irq_wait);
187
	GEM_BUG_ON(!b->irq_armed);
188

189 190
	GEM_BUG_ON(!b->irq_enabled);
	if (!--b->irq_enabled)
191 192 193 194 195
		irq_disable(engine);

	b->irq_armed = false;
}

196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217
void intel_engine_pin_breadcrumbs_irq(struct intel_engine_cs *engine)
{
	struct intel_breadcrumbs *b = &engine->breadcrumbs;

	spin_lock_irq(&b->irq_lock);
	if (!b->irq_enabled++)
		irq_enable(engine);
	GEM_BUG_ON(!b->irq_enabled); /* no overflow! */
	spin_unlock_irq(&b->irq_lock);
}

void intel_engine_unpin_breadcrumbs_irq(struct intel_engine_cs *engine)
{
	struct intel_breadcrumbs *b = &engine->breadcrumbs;

	spin_lock_irq(&b->irq_lock);
	GEM_BUG_ON(!b->irq_enabled); /* no underflow! */
	if (!--b->irq_enabled)
		irq_disable(engine);
	spin_unlock_irq(&b->irq_lock);
}

218 219 220
void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
{
	struct intel_breadcrumbs *b = &engine->breadcrumbs;
221
	struct intel_wait *wait, *n;
222 223

	if (!b->irq_armed)
224
		return;
225

226 227
	/*
	 * We only disarm the irq when we are idle (all requests completed),
228
	 * so if the bottom-half remains asleep, it missed the request
229 230
	 * completion.
	 */
231 232
	if (intel_engine_wakeup(engine) & ENGINE_WAKEUP_ASLEEP)
		missed_breadcrumb(engine);
233

234
	spin_lock_irq(&b->rb_lock);
235 236

	spin_lock(&b->irq_lock);
237
	b->irq_wait = NULL;
238 239
	if (b->irq_armed)
		__intel_engine_disarm_breadcrumbs(engine);
240 241
	spin_unlock(&b->irq_lock);

242
	rbtree_postorder_for_each_entry_safe(wait, n, &b->waiters, node) {
243
		GEM_BUG_ON(!intel_engine_signaled(engine, wait->seqno));
244
		RB_CLEAR_NODE(&wait->node);
245
		wake_up_process(wait->tsk);
246 247 248 249
	}
	b->waiters = RB_ROOT;

	spin_unlock_irq(&b->rb_lock);
250 251
}

252 253 254 255 256 257 258 259
static bool use_fake_irq(const struct intel_breadcrumbs *b)
{
	const struct intel_engine_cs *engine =
		container_of(b, struct intel_engine_cs, breadcrumbs);

	if (!test_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings))
		return false;

260 261
	/*
	 * Only start with the heavy weight fake irq timer if we have not
262 263 264 265 266
	 * seen any interrupts since enabling it the first time. If the
	 * interrupts are still arriving, it means we made a mistake in our
	 * engine->seqno_barrier(), a timing error that should be transient
	 * and unlikely to reoccur.
	 */
267
	return READ_ONCE(b->irq_count) == b->hangcheck_interrupts;
268 269
}

270 271 272 273 274 275 276 277 278
static void enable_fake_irq(struct intel_breadcrumbs *b)
{
	/* Ensure we never sleep indefinitely */
	if (!b->irq_enabled || use_fake_irq(b))
		mod_timer(&b->fake_irq, jiffies + 1);
	else
		mod_timer(&b->hangcheck, wait_timeout());
}

279
static bool __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b)
280 281 282 283
{
	struct intel_engine_cs *engine =
		container_of(b, struct intel_engine_cs, breadcrumbs);
	struct drm_i915_private *i915 = engine->i915;
284 285
	bool enabled;

286
	lockdep_assert_held(&b->irq_lock);
287
	if (b->irq_armed)
288
		return false;
289

290 291
	/*
	 * The breadcrumb irq will be disarmed on the interrupt after the
292 293 294 295 296 297
	 * waiters are signaled. This gives us a single interrupt window in
	 * which we can add a new waiter and avoid the cost of re-enabling
	 * the irq.
	 */
	b->irq_armed = true;

298 299
	/*
	 * Since we are waiting on a request, the GPU should be busy
300 301 302 303
	 * and should have its own rpm reference. This is tracked
	 * by i915->gt.awake, we can forgo holding our own wakref
	 * for the interrupt as before i915->gt.awake is released (when
	 * the driver is idle) we disarm the breadcrumbs.
304 305 306
	 */

	/* No interrupts? Kick the waiter every jiffie! */
307 308 309 310 311
	enabled = false;
	if (!b->irq_enabled++ &&
	    !test_bit(engine->id, &i915->gpu_error.test_irq_rings)) {
		irq_enable(engine);
		enabled = true;
312 313
	}

314
	enable_fake_irq(b);
315
	return enabled;
316 317 318 319
}

static inline struct intel_wait *to_wait(struct rb_node *node)
{
320
	return rb_entry(node, struct intel_wait, node);
321 322 323 324 325
}

static inline void __intel_breadcrumbs_finish(struct intel_breadcrumbs *b,
					      struct intel_wait *wait)
{
326
	lockdep_assert_held(&b->rb_lock);
327
	GEM_BUG_ON(b->irq_wait == wait);
328

329 330
	/*
	 * This request is completed, so remove it from the tree, mark it as
331 332 333 334 335 336
	 * complete, and *then* wake up the associated task. N.B. when the
	 * task wakes up, it will find the empty rb_node, discern that it
	 * has already been removed from the tree and skip the serialisation
	 * of the b->rb_lock and b->irq_lock. This means that the destruction
	 * of the intel_wait is not serialised with the interrupt handler
	 * by the waiter - it must instead be serialised by the caller.
337 338 339 340
	 */
	rb_erase(&wait->node, &b->waiters);
	RB_CLEAR_NODE(&wait->node);

341 342
	if (wait->tsk->state != TASK_RUNNING)
		wake_up_process(wait->tsk); /* implicit smp_wmb() */
343 344
}

345 346 347 348 349
static inline void __intel_breadcrumbs_next(struct intel_engine_cs *engine,
					    struct rb_node *next)
{
	struct intel_breadcrumbs *b = &engine->breadcrumbs;

350
	spin_lock(&b->irq_lock);
351
	GEM_BUG_ON(!b->irq_armed);
352
	GEM_BUG_ON(!b->irq_wait);
353 354
	b->irq_wait = to_wait(next);
	spin_unlock(&b->irq_lock);
355 356 357 358 359 360 361 362 363

	/* We always wake up the next waiter that takes over as the bottom-half
	 * as we may delegate not only the irq-seqno barrier to the next waiter
	 * but also the task of waking up concurrent waiters.
	 */
	if (next)
		wake_up_process(to_wait(next)->tsk);
}

364 365 366 367 368
static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
				    struct intel_wait *wait)
{
	struct intel_breadcrumbs *b = &engine->breadcrumbs;
	struct rb_node **p, *parent, *completed;
369
	bool first, armed;
370 371
	u32 seqno;

372 373
	GEM_BUG_ON(!wait->seqno);

374 375 376 377 378 379 380 381 382 383 384 385
	/* Insert the request into the retirement ordered list
	 * of waiters by walking the rbtree. If we are the oldest
	 * seqno in the tree (the first to be retired), then
	 * set ourselves as the bottom-half.
	 *
	 * As we descend the tree, prune completed branches since we hold the
	 * spinlock we know that the first_waiter must be delayed and can
	 * reduce some of the sequential wake up latency if we take action
	 * ourselves and wake up the completed tasks in parallel. Also, by
	 * removing stale elements in the tree, we may be able to reduce the
	 * ping-pong between the old bottom-half and ourselves as first-waiter.
	 */
386
	armed = false;
387 388 389
	first = true;
	parent = NULL;
	completed = NULL;
390
	seqno = intel_engine_get_seqno(engine);
391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431

	 /* If the request completed before we managed to grab the spinlock,
	  * return now before adding ourselves to the rbtree. We let the
	  * current bottom-half handle any pending wakeups and instead
	  * try and get out of the way quickly.
	  */
	if (i915_seqno_passed(seqno, wait->seqno)) {
		RB_CLEAR_NODE(&wait->node);
		return first;
	}

	p = &b->waiters.rb_node;
	while (*p) {
		parent = *p;
		if (wait->seqno == to_wait(parent)->seqno) {
			/* We have multiple waiters on the same seqno, select
			 * the highest priority task (that with the smallest
			 * task->prio) to serve as the bottom-half for this
			 * group.
			 */
			if (wait->tsk->prio > to_wait(parent)->tsk->prio) {
				p = &parent->rb_right;
				first = false;
			} else {
				p = &parent->rb_left;
			}
		} else if (i915_seqno_passed(wait->seqno,
					     to_wait(parent)->seqno)) {
			p = &parent->rb_right;
			if (i915_seqno_passed(seqno, to_wait(parent)->seqno))
				completed = parent;
			else
				first = false;
		} else {
			p = &parent->rb_left;
		}
	}
	rb_link_node(&wait->node, parent, p);
	rb_insert_color(&wait->node, &b->waiters);

	if (first) {
432 433
		spin_lock(&b->irq_lock);
		b->irq_wait = wait;
434 435 436 437 438
		/* After assigning ourselves as the new bottom-half, we must
		 * perform a cursory check to prevent a missed interrupt.
		 * Either we miss the interrupt whilst programming the hardware,
		 * or if there was a previous waiter (for a later seqno) they
		 * may be woken instead of us (due to the inherent race
439 440
		 * in the unlocked read of b->irq_seqno_bh in the irq handler)
		 * and so we miss the wake up.
441
		 */
442
		armed = __intel_breadcrumbs_enable_irq(b);
443
		spin_unlock(&b->irq_lock);
444
	}
445 446

	if (completed) {
447 448 449 450 451
		/* Advance the bottom-half (b->irq_wait) before we wake up
		 * the waiters who may scribble over their intel_wait
		 * just as the interrupt handler is dereferencing it via
		 * b->irq_wait.
		 */
452 453 454 455 456 457 458 459 460 461 462 463 464
		if (!first) {
			struct rb_node *next = rb_next(completed);
			GEM_BUG_ON(next == &wait->node);
			__intel_breadcrumbs_next(engine, next);
		}

		do {
			struct intel_wait *crumb = to_wait(completed);
			completed = rb_prev(completed);
			__intel_breadcrumbs_finish(b, crumb);
		} while (completed);
	}

465
	GEM_BUG_ON(!b->irq_wait);
466
	GEM_BUG_ON(!b->irq_armed);
467
	GEM_BUG_ON(rb_first(&b->waiters) != &b->irq_wait->node);
468

469
	return armed;
470 471 472 473 474 475
}

bool intel_engine_add_wait(struct intel_engine_cs *engine,
			   struct intel_wait *wait)
{
	struct intel_breadcrumbs *b = &engine->breadcrumbs;
476
	bool armed;
477

478
	spin_lock_irq(&b->rb_lock);
479
	armed = __intel_engine_add_wait(engine, wait);
480
	spin_unlock_irq(&b->rb_lock);
481 482
	if (armed)
		return armed;
483

484
	/* Make the caller recheck if its request has already started. */
485
	return intel_engine_has_started(engine, wait->seqno);
486 487 488 489 490 491 492
}

static inline bool chain_wakeup(struct rb_node *rb, int priority)
{
	return rb && to_wait(rb)->tsk->prio <= priority;
}

493 494 495 496 497 498 499 500 501
static inline int wakeup_priority(struct intel_breadcrumbs *b,
				  struct task_struct *tsk)
{
	if (tsk == b->signaler)
		return INT_MIN;
	else
		return tsk->prio;
}

502 503
static void __intel_engine_remove_wait(struct intel_engine_cs *engine,
				       struct intel_wait *wait)
504 505 506
{
	struct intel_breadcrumbs *b = &engine->breadcrumbs;

507
	lockdep_assert_held(&b->rb_lock);
508 509

	if (RB_EMPTY_NODE(&wait->node))
510
		goto out;
511

512
	if (b->irq_wait == wait) {
513
		const int priority = wakeup_priority(b, wait->tsk);
514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535
		struct rb_node *next;

		/* We are the current bottom-half. Find the next candidate,
		 * the first waiter in the queue on the remaining oldest
		 * request. As multiple seqnos may complete in the time it
		 * takes us to wake up and find the next waiter, we have to
		 * wake up that waiter for it to perform its own coherent
		 * completion check.
		 */
		next = rb_next(&wait->node);
		if (chain_wakeup(next, priority)) {
			/* If the next waiter is already complete,
			 * wake it up and continue onto the next waiter. So
			 * if have a small herd, they will wake up in parallel
			 * rather than sequentially, which should reduce
			 * the overall latency in waking all the completed
			 * clients.
			 *
			 * However, waking up a chain adds extra latency to
			 * the first_waiter. This is undesirable if that
			 * waiter is a high priority task.
			 */
536
			u32 seqno = intel_engine_get_seqno(engine);
537 538 539 540 541 542 543 544 545 546 547

			while (i915_seqno_passed(seqno, to_wait(next)->seqno)) {
				struct rb_node *n = rb_next(next);

				__intel_breadcrumbs_finish(b, to_wait(next));
				next = n;
				if (!chain_wakeup(next, priority))
					break;
			}
		}

548
		__intel_breadcrumbs_next(engine, next);
549 550 551 552 553 554
	} else {
		GEM_BUG_ON(rb_first(&b->waiters) == &wait->node);
	}

	GEM_BUG_ON(RB_EMPTY_NODE(&wait->node));
	rb_erase(&wait->node, &b->waiters);
555
	RB_CLEAR_NODE(&wait->node);
556

557
out:
558
	GEM_BUG_ON(b->irq_wait == wait);
559
	GEM_BUG_ON(rb_first(&b->waiters) !=
560
		   (b->irq_wait ? &b->irq_wait->node : NULL));
561 562 563 564 565 566 567 568 569 570 571
}

void intel_engine_remove_wait(struct intel_engine_cs *engine,
			      struct intel_wait *wait)
{
	struct intel_breadcrumbs *b = &engine->breadcrumbs;

	/* Quick check to see if this waiter was already decoupled from
	 * the tree by the bottom-half to avoid contention on the spinlock
	 * by the herd.
	 */
572 573
	if (RB_EMPTY_NODE(&wait->node)) {
		GEM_BUG_ON(READ_ONCE(b->irq_wait) == wait);
574
		return;
575
	}
576

577
	spin_lock_irq(&b->rb_lock);
578
	__intel_engine_remove_wait(engine, wait);
579
	spin_unlock_irq(&b->rb_lock);
580 581
}

582 583 584 585 586 587 588 589 590 591 592
static void signaler_set_rtpriority(void)
{
	 struct sched_param param = { .sched_priority = 1 };

	 sched_setscheduler_nocheck(current, SCHED_FIFO, &param);
}

static int intel_breadcrumbs_signaler(void *arg)
{
	struct intel_engine_cs *engine = arg;
	struct intel_breadcrumbs *b = &engine->breadcrumbs;
593
	struct i915_request *rq, *n;
594 595 596 597 598

	/* Install ourselves with high priority to reduce signalling latency */
	signaler_set_rtpriority();

	do {
599
		bool do_schedule = true;
600 601
		LIST_HEAD(list);
		u32 seqno;
602

603
		set_current_state(TASK_INTERRUPTIBLE);
604 605
		if (list_empty(&b->signals))
			goto sleep;
606

607 608
		/*
		 * We are either woken up by the interrupt bottom-half,
609 610 611 612 613 614 615
		 * or by a client adding a new signaller. In both cases,
		 * the GPU seqno may have advanced beyond our oldest signal.
		 * If it has, propagate the signal, remove the waiter and
		 * check again with the next oldest signal. Otherwise we
		 * need to wait for a new interrupt from the GPU or for
		 * a new client.
		 */
616 617 618 619 620 621 622
		seqno = intel_engine_get_seqno(engine);

		spin_lock_irq(&b->rb_lock);
		list_for_each_entry_safe(rq, n, &b->signals, signaling.link) {
			u32 this = rq->signaling.wait.seqno;

			GEM_BUG_ON(!rq->signaling.wait.seqno);
623

624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649
			if (!i915_seqno_passed(seqno, this))
				break;

			if (likely(this == i915_request_global_seqno(rq))) {
				__intel_engine_remove_wait(engine,
							   &rq->signaling.wait);

				rq->signaling.wait.seqno = 0;
				__list_del_entry(&rq->signaling.link);

				if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
					      &rq->fence.flags)) {
					list_add_tail(&rq->signaling.link,
						      &list);
					i915_request_get(rq);
				}
			}
		}
		spin_unlock_irq(&b->rb_lock);

		if (!list_empty(&list)) {
			local_bh_disable();
			list_for_each_entry_safe(rq, n, &list, signaling.link) {
				dma_fence_signal(&rq->fence);
				GEM_BUG_ON(!i915_request_completed(rq));
				i915_request_put(rq);
650
			}
651
			local_bh_enable(); /* kick start the tasklets */
652

653 654
			/*
			 * If the engine is saturated we may be continually
655 656 657 658 659 660 661 662 663 664
			 * processing completed requests. This angers the
			 * NMI watchdog if we never let anything else
			 * have access to the CPU. Let's pretend to be nice
			 * and relinquish the CPU if we burn through the
			 * entire RT timeslice!
			 */
			do_schedule = need_resched();
		}

		if (unlikely(do_schedule)) {
665
sleep:
666 667 668
			if (kthread_should_park())
				kthread_parkme();

669
			if (unlikely(kthread_should_stop()))
670 671 672 673 674 675 676 677 678 679
				break;

			schedule();
		}
	} while (1);
	__set_current_state(TASK_RUNNING);

	return 0;
}

680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705
static void insert_signal(struct intel_breadcrumbs *b,
			  struct i915_request *request,
			  const u32 seqno)
{
	struct i915_request *iter;

	lockdep_assert_held(&b->rb_lock);

	/*
	 * A reasonable assumption is that we are called to add signals
	 * in sequence, as the requests are submitted for execution and
	 * assigned a global_seqno. This will be the case for the majority
	 * of internally generated signals (inter-engine signaling).
	 *
	 * Out of order waiters triggering random signaling enabling will
	 * be more problematic, but hopefully rare enough and the list
	 * small enough that the O(N) insertion sort is not an issue.
	 */

	list_for_each_entry_reverse(iter, &b->signals, signaling.link)
		if (i915_seqno_passed(seqno, iter->signaling.wait.seqno))
			break;

	list_add(&request->signaling.link, &iter->signaling.link);
}

706
bool intel_engine_enable_signaling(struct i915_request *request, bool wakeup)
707 708 709
{
	struct intel_engine_cs *engine = request->engine;
	struct intel_breadcrumbs *b = &engine->breadcrumbs;
710
	struct intel_wait *wait = &request->signaling.wait;
711
	u32 seqno;
712

713 714
	/*
	 * Note that we may be called from an interrupt handler on another
715 716
	 * device (e.g. nouveau signaling a fence completion causing us
	 * to submit a request, and so enable signaling). As such,
717
	 * we need to make sure that all other users of b->rb_lock protect
718 719 720 721
	 * against interrupts, i.e. use spin_lock_irqsave.
	 */

	/* locked by dma_fence_enable_sw_signaling() (irqsafe fence->lock) */
722
	GEM_BUG_ON(!irqs_disabled());
723
	lockdep_assert_held(&request->lock);
724

725
	seqno = i915_request_global_seqno(request);
726
	if (!seqno) /* will be enabled later upon execution */
727
		return true;
728

729 730 731 732
	GEM_BUG_ON(wait->seqno);
	wait->tsk = b->signaler;
	wait->request = request;
	wait->seqno = seqno;
733

734 735
	/*
	 * Add ourselves into the list of waiters, but registering our
736 737 738 739 740 741 742
	 * bottom-half as the signaller thread. As per usual, only the oldest
	 * waiter (not just signaller) is tasked as the bottom-half waking
	 * up all completed waiters after the user interrupt.
	 *
	 * If we are the oldest waiter, enable the irq (after which we
	 * must double check that the seqno did not complete).
	 */
743 744
	spin_lock(&b->rb_lock);
	insert_signal(b, request, seqno);
745
	wakeup &= __intel_engine_add_wait(engine, wait);
746
	spin_unlock(&b->rb_lock);
747

748
	if (wakeup) {
749
		wake_up_process(b->signaler);
750 751 752 753
		return !intel_wait_complete(wait);
	}

	return true;
754 755
}

756
void intel_engine_cancel_signaling(struct i915_request *request)
757
{
758 759 760
	struct intel_engine_cs *engine = request->engine;
	struct intel_breadcrumbs *b = &engine->breadcrumbs;

761
	GEM_BUG_ON(!irqs_disabled());
762
	lockdep_assert_held(&request->lock);
763

764 765
	if (!READ_ONCE(request->signaling.wait.seqno))
		return;
766

767 768 769 770 771
	spin_lock(&b->rb_lock);
	__intel_engine_remove_wait(engine, &request->signaling.wait);
	if (fetch_and_zero(&request->signaling.wait.seqno))
		__list_del_entry(&request->signaling.link);
	spin_unlock(&b->rb_lock);
772 773
}

774 775 776
int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
{
	struct intel_breadcrumbs *b = &engine->breadcrumbs;
777
	struct task_struct *tsk;
778

779 780 781
	spin_lock_init(&b->rb_lock);
	spin_lock_init(&b->irq_lock);

782 783
	timer_setup(&b->fake_irq, intel_breadcrumbs_fake_irq, 0);
	timer_setup(&b->hangcheck, intel_breadcrumbs_hangcheck, 0);
784

785 786
	INIT_LIST_HEAD(&b->signals);

787 788 789 790 791 792 793 794 795 796 797 798 799
	/* Spawn a thread to provide a common bottom-half for all signals.
	 * As this is an asynchronous interface we cannot steal the current
	 * task for handling the bottom-half to the user interrupt, therefore
	 * we create a thread to do the coherent seqno dance after the
	 * interrupt and then signal the waitqueue (via the dma-buf/fence).
	 */
	tsk = kthread_run(intel_breadcrumbs_signaler, engine,
			  "i915/signal:%d", engine->id);
	if (IS_ERR(tsk))
		return PTR_ERR(tsk);

	b->signaler = tsk;

800 801 802
	return 0;
}

803 804 805 806
static void cancel_fake_irq(struct intel_engine_cs *engine)
{
	struct intel_breadcrumbs *b = &engine->breadcrumbs;

807
	del_timer_sync(&b->fake_irq); /* may queue b->hangcheck */
808 809 810 811 812 813 814
	del_timer_sync(&b->hangcheck);
	clear_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
}

void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
{
	struct intel_breadcrumbs *b = &engine->breadcrumbs;
815
	unsigned long flags;
816

817
	spin_lock_irqsave(&b->irq_lock, flags);
818

819 820 821 822 823 824 825
	/*
	 * Leave the fake_irq timer enabled (if it is running), but clear the
	 * bit so that it turns itself off on its next wake up and goes back
	 * to the long hangcheck interval if still required.
	 */
	clear_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);

826 827 828
	if (b->irq_enabled)
		irq_enable(engine);
	else
829
		irq_disable(engine);
830

831
	spin_unlock_irqrestore(&b->irq_lock, flags);
832 833
}

834 835 836 837
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
{
	struct intel_breadcrumbs *b = &engine->breadcrumbs;

838
	/* The engines should be idle and all requests accounted for! */
839
	WARN_ON(READ_ONCE(b->irq_wait));
840
	WARN_ON(!RB_EMPTY_ROOT(&b->waiters));
841
	WARN_ON(!list_empty(&b->signals));
842

843 844 845
	if (!IS_ERR_OR_NULL(b->signaler))
		kthread_stop(b->signaler);

846
	cancel_fake_irq(engine);
847 848
}

849 850 851
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/intel_breadcrumbs.c"
#endif