intel_reset.c 35.0 KB
Newer Older
1 2 3 4 5 6 7
/*
 * SPDX-License-Identifier: MIT
 *
 * Copyright © 2008-2018 Intel Corporation
 */

#include <linux/sched/mm.h>
8
#include <linux/stop_machine.h>
9

10
#include "display/intel_display_types.h"
11 12
#include "display/intel_overlay.h"

13 14
#include "gem/i915_gem_context.h"

15 16
#include "i915_drv.h"
#include "i915_gpu_error.h"
17
#include "i915_irq.h"
18
#include "intel_engine_pm.h"
19
#include "intel_gt.h"
20
#include "intel_gt_pm.h"
21
#include "intel_reset.h"
22

23
#include "uc/intel_guc.h"
24

25 26
#define RESET_MAX_RETRIES 3

27 28 29
/* XXX How to handle concurrent GGTT updates using tiling registers? */
#define RESET_UNDER_STOP_MACHINE 0

30 31 32 33 34 35 36 37 38 39
static void rmw_set_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
{
	intel_uncore_rmw_fw(uncore, reg, 0, set);
}

static void rmw_clear_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
{
	intel_uncore_rmw_fw(uncore, reg, clr, 0);
}

40 41 42 43 44
static void engine_skip_context(struct i915_request *rq)
{
	struct intel_engine_cs *engine = rq->engine;
	struct i915_gem_context *hung_ctx = rq->gem_context;

45 46
	if (!i915_request_is_active(rq))
		return;
47

48
	lockdep_assert_held(&engine->active.lock);
49
	list_for_each_entry_continue(rq, &engine->active.requests, sched.link)
50 51
		if (rq->gem_context == hung_ctx)
			i915_request_skip(rq, -EIO);
52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
}

static void client_mark_guilty(struct drm_i915_file_private *file_priv,
			       const struct i915_gem_context *ctx)
{
	unsigned int score;
	unsigned long prev_hang;

	if (i915_gem_context_is_banned(ctx))
		score = I915_CLIENT_SCORE_CONTEXT_BAN;
	else
		score = 0;

	prev_hang = xchg(&file_priv->hang_timestamp, jiffies);
	if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES))
		score += I915_CLIENT_SCORE_HANG_FAST;

	if (score) {
		atomic_add(score, &file_priv->ban_score);

		DRM_DEBUG_DRIVER("client %s: gained %u ban score, now %u\n",
				 ctx->name, score,
				 atomic_read(&file_priv->ban_score));
	}
}

78
static bool context_mark_guilty(struct i915_gem_context *ctx)
79
{
80 81 82
	unsigned long prev_hang;
	bool banned;
	int i;
83 84 85

	atomic_inc(&ctx->guilty_count);

86 87
	/* Cool contexts are too cool to be banned! (Used for reset testing.) */
	if (!i915_gem_context_is_bannable(ctx))
88
		return false;
89

90 91 92 93
	dev_notice(ctx->i915->drm.dev,
		   "%s context reset due to GPU hang\n",
		   ctx->name);

94 95 96 97 98 99 100 101 102 103
	/* Record the timestamp for the last N hangs */
	prev_hang = ctx->hang_timestamp[0];
	for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp) - 1; i++)
		ctx->hang_timestamp[i] = ctx->hang_timestamp[i + 1];
	ctx->hang_timestamp[i] = jiffies;

	/* If we have hung N+1 times in rapid succession, we ban the context! */
	banned = !i915_gem_context_is_recoverable(ctx);
	if (time_before(jiffies, prev_hang + CONTEXT_FAST_HANG_JIFFIES))
		banned = true;
104
	if (banned) {
105 106
		DRM_DEBUG_DRIVER("context %s: guilty %d, banned\n",
				 ctx->name, atomic_read(&ctx->guilty_count));
107 108 109 110 111
		i915_gem_context_set_banned(ctx);
	}

	if (!IS_ERR_OR_NULL(ctx->file_priv))
		client_mark_guilty(ctx->file_priv, ctx);
112 113

	return banned;
114 115 116 117 118 119 120
}

static void context_mark_innocent(struct i915_gem_context *ctx)
{
	atomic_inc(&ctx->active_count);
}

121
void __i915_request_reset(struct i915_request *rq, bool guilty)
122
{
123 124 125 126 127 128
	GEM_TRACE("%s rq=%llx:%lld, guilty? %s\n",
		  rq->engine->name,
		  rq->fence.context,
		  rq->fence.seqno,
		  yesno(guilty));

129 130 131 132 133 134 135 136 137 138 139 140
	GEM_BUG_ON(i915_request_completed(rq));

	if (guilty) {
		i915_request_skip(rq, -EIO);
		if (context_mark_guilty(rq->gem_context))
			engine_skip_context(rq);
	} else {
		dma_fence_set_error(&rq->fence, -EAGAIN);
		context_mark_innocent(rq->gem_context);
	}
}

141 142 143 144 145 146 147 148
static bool i915_in_reset(struct pci_dev *pdev)
{
	u8 gdrst;

	pci_read_config_byte(pdev, I915_GDRST, &gdrst);
	return gdrst & GRDOM_RESET_STATUS;
}

149
static int i915_do_reset(struct intel_gt *gt,
150
			 intel_engine_mask_t engine_mask,
151 152
			 unsigned int retry)
{
153
	struct pci_dev *pdev = gt->i915->drm.pdev;
154 155 156 157
	int err;

	/* Assert reset for at least 20 usec, and wait for acknowledgement. */
	pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
158 159
	udelay(50);
	err = wait_for_atomic(i915_in_reset(pdev), 50);
160 161 162

	/* Clear the reset request. */
	pci_write_config_byte(pdev, I915_GDRST, 0);
163
	udelay(50);
164
	if (!err)
165
		err = wait_for_atomic(!i915_in_reset(pdev), 50);
166 167 168 169 170 171 172 173 174 175 176 177

	return err;
}

static bool g4x_reset_complete(struct pci_dev *pdev)
{
	u8 gdrst;

	pci_read_config_byte(pdev, I915_GDRST, &gdrst);
	return (gdrst & GRDOM_RESET_ENABLE) == 0;
}

178
static int g33_do_reset(struct intel_gt *gt,
179
			intel_engine_mask_t engine_mask,
180 181
			unsigned int retry)
{
182
	struct pci_dev *pdev = gt->i915->drm.pdev;
183 184

	pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
185
	return wait_for_atomic(g4x_reset_complete(pdev), 50);
186 187
}

188
static int g4x_do_reset(struct intel_gt *gt,
189
			intel_engine_mask_t engine_mask,
190 191
			unsigned int retry)
{
192 193
	struct pci_dev *pdev = gt->i915->drm.pdev;
	struct intel_uncore *uncore = gt->uncore;
194 195 196
	int ret;

	/* WaVcpClkGateDisableForMediaReset:ctg,elk */
197 198
	rmw_set_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE);
	intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
199 200 201

	pci_write_config_byte(pdev, I915_GDRST,
			      GRDOM_MEDIA | GRDOM_RESET_ENABLE);
202
	ret =  wait_for_atomic(g4x_reset_complete(pdev), 50);
203 204 205 206 207 208 209
	if (ret) {
		DRM_DEBUG_DRIVER("Wait for media reset failed\n");
		goto out;
	}

	pci_write_config_byte(pdev, I915_GDRST,
			      GRDOM_RENDER | GRDOM_RESET_ENABLE);
210
	ret =  wait_for_atomic(g4x_reset_complete(pdev), 50);
211 212 213 214 215 216 217 218
	if (ret) {
		DRM_DEBUG_DRIVER("Wait for render reset failed\n");
		goto out;
	}

out:
	pci_write_config_byte(pdev, I915_GDRST, 0);

219 220
	rmw_clear_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE);
	intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
221 222 223 224

	return ret;
}

225
static int ironlake_do_reset(struct intel_gt *gt,
226
			     intel_engine_mask_t engine_mask,
227 228
			     unsigned int retry)
{
229
	struct intel_uncore *uncore = gt->uncore;
230 231
	int ret;

232 233 234
	intel_uncore_write_fw(uncore, ILK_GDSR,
			      ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
	ret = __intel_wait_for_register_fw(uncore, ILK_GDSR,
235 236 237
					   ILK_GRDOM_RESET_ENABLE, 0,
					   5000, 0,
					   NULL);
238 239 240 241 242
	if (ret) {
		DRM_DEBUG_DRIVER("Wait for render reset failed\n");
		goto out;
	}

243 244 245
	intel_uncore_write_fw(uncore, ILK_GDSR,
			      ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
	ret = __intel_wait_for_register_fw(uncore, ILK_GDSR,
246 247 248
					   ILK_GRDOM_RESET_ENABLE, 0,
					   5000, 0,
					   NULL);
249 250 251 252 253 254
	if (ret) {
		DRM_DEBUG_DRIVER("Wait for media reset failed\n");
		goto out;
	}

out:
255 256
	intel_uncore_write_fw(uncore, ILK_GDSR, 0);
	intel_uncore_posting_read_fw(uncore, ILK_GDSR);
257 258 259 260
	return ret;
}

/* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
261
static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask)
262
{
263
	struct intel_uncore *uncore = gt->uncore;
264 265 266 267 268 269 270
	int err;

	/*
	 * GEN6_GDRST is not in the gt power well, no need to check
	 * for fifo space for the write or forcewake the chip for
	 * the read
	 */
271
	intel_uncore_write_fw(uncore, GEN6_GDRST, hw_domain_mask);
272 273

	/* Wait for the device to ack the reset requests */
274
	err = __intel_wait_for_register_fw(uncore,
275 276 277 278 279 280 281 282 283 284
					   GEN6_GDRST, hw_domain_mask, 0,
					   500, 0,
					   NULL);
	if (err)
		DRM_DEBUG_DRIVER("Wait for 0x%08x engines reset failed\n",
				 hw_domain_mask);

	return err;
}

285
static int gen6_reset_engines(struct intel_gt *gt,
286
			      intel_engine_mask_t engine_mask,
287 288
			      unsigned int retry)
{
289
	static const u32 hw_engine_mask[] = {
290 291 292 293 294
		[RCS0]  = GEN6_GRDOM_RENDER,
		[BCS0]  = GEN6_GRDOM_BLT,
		[VCS0]  = GEN6_GRDOM_MEDIA,
		[VCS1]  = GEN8_GRDOM_MEDIA2,
		[VECS0] = GEN6_GRDOM_VECS,
295
	};
296
	struct intel_engine_cs *engine;
297 298 299 300 301
	u32 hw_mask;

	if (engine_mask == ALL_ENGINES) {
		hw_mask = GEN6_GRDOM_FULL;
	} else {
302
		intel_engine_mask_t tmp;
303 304

		hw_mask = 0;
305
		for_each_engine_masked(engine, gt, engine_mask, tmp) {
306
			GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
307
			hw_mask |= hw_engine_mask[engine->id];
308
		}
309 310
	}

311
	return gen6_hw_domain_reset(gt, hw_mask);
312 313
}

314
static int gen11_lock_sfc(struct intel_engine_cs *engine, u32 *hw_mask)
315
{
316 317
	struct intel_uncore *uncore = engine->uncore;
	u8 vdbox_sfc_access = RUNTIME_INFO(engine->i915)->vdbox_sfc_access;
318 319 320 321 322
	i915_reg_t sfc_forced_lock, sfc_forced_lock_ack;
	u32 sfc_forced_lock_bit, sfc_forced_lock_ack_bit;
	i915_reg_t sfc_usage;
	u32 sfc_usage_bit;
	u32 sfc_reset_bit;
323
	int ret;
324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357

	switch (engine->class) {
	case VIDEO_DECODE_CLASS:
		if ((BIT(engine->instance) & vdbox_sfc_access) == 0)
			return 0;

		sfc_forced_lock = GEN11_VCS_SFC_FORCED_LOCK(engine);
		sfc_forced_lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT;

		sfc_forced_lock_ack = GEN11_VCS_SFC_LOCK_STATUS(engine);
		sfc_forced_lock_ack_bit  = GEN11_VCS_SFC_LOCK_ACK_BIT;

		sfc_usage = GEN11_VCS_SFC_LOCK_STATUS(engine);
		sfc_usage_bit = GEN11_VCS_SFC_USAGE_BIT;
		sfc_reset_bit = GEN11_VCS_SFC_RESET_BIT(engine->instance);
		break;

	case VIDEO_ENHANCEMENT_CLASS:
		sfc_forced_lock = GEN11_VECS_SFC_FORCED_LOCK(engine);
		sfc_forced_lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT;

		sfc_forced_lock_ack = GEN11_VECS_SFC_LOCK_ACK(engine);
		sfc_forced_lock_ack_bit  = GEN11_VECS_SFC_LOCK_ACK_BIT;

		sfc_usage = GEN11_VECS_SFC_USAGE(engine);
		sfc_usage_bit = GEN11_VECS_SFC_USAGE_BIT;
		sfc_reset_bit = GEN11_VECS_SFC_RESET_BIT(engine->instance);
		break;

	default:
		return 0;
	}

	/*
358 359 360 361 362
	 * If the engine is using a SFC, tell the engine that a software reset
	 * is going to happen. The engine will then try to force lock the SFC.
	 * If SFC ends up being locked to the engine we want to reset, we have
	 * to reset it as well (we will unlock it once the reset sequence is
	 * completed).
363
	 */
364 365 366
	if (!(intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit))
		return 0;

367
	rmw_set_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
368

369 370 371 372 373 374 375 376
	ret = __intel_wait_for_register_fw(uncore,
					   sfc_forced_lock_ack,
					   sfc_forced_lock_ack_bit,
					   sfc_forced_lock_ack_bit,
					   1000, 0, NULL);

	/* Was the SFC released while we were trying to lock it? */
	if (!(intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit))
377 378
		return 0;

379 380 381 382
	if (ret) {
		DRM_DEBUG_DRIVER("Wait for SFC forced lock ack failed\n");
		return ret;
	}
383

384
	*hw_mask |= sfc_reset_bit;
385 386 387
	return 0;
}

388
static void gen11_unlock_sfc(struct intel_engine_cs *engine)
389
{
390 391
	struct intel_uncore *uncore = engine->uncore;
	u8 vdbox_sfc_access = RUNTIME_INFO(engine->i915)->vdbox_sfc_access;
392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412
	i915_reg_t sfc_forced_lock;
	u32 sfc_forced_lock_bit;

	switch (engine->class) {
	case VIDEO_DECODE_CLASS:
		if ((BIT(engine->instance) & vdbox_sfc_access) == 0)
			return;

		sfc_forced_lock = GEN11_VCS_SFC_FORCED_LOCK(engine);
		sfc_forced_lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT;
		break;

	case VIDEO_ENHANCEMENT_CLASS:
		sfc_forced_lock = GEN11_VECS_SFC_FORCED_LOCK(engine);
		sfc_forced_lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT;
		break;

	default:
		return;
	}

413
	rmw_clear_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
414 415
}

416
static int gen11_reset_engines(struct intel_gt *gt,
417
			       intel_engine_mask_t engine_mask,
418 419
			       unsigned int retry)
{
420
	static const u32 hw_engine_mask[] = {
421 422 423 424 425 426 427 428
		[RCS0]  = GEN11_GRDOM_RENDER,
		[BCS0]  = GEN11_GRDOM_BLT,
		[VCS0]  = GEN11_GRDOM_MEDIA,
		[VCS1]  = GEN11_GRDOM_MEDIA2,
		[VCS2]  = GEN11_GRDOM_MEDIA3,
		[VCS3]  = GEN11_GRDOM_MEDIA4,
		[VECS0] = GEN11_GRDOM_VECS,
		[VECS1] = GEN11_GRDOM_VECS2,
429 430
	};
	struct intel_engine_cs *engine;
431
	intel_engine_mask_t tmp;
432 433 434 435 436 437 438
	u32 hw_mask;
	int ret;

	if (engine_mask == ALL_ENGINES) {
		hw_mask = GEN11_GRDOM_FULL;
	} else {
		hw_mask = 0;
439
		for_each_engine_masked(engine, gt, engine_mask, tmp) {
440
			GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
441
			hw_mask |= hw_engine_mask[engine->id];
442 443 444
			ret = gen11_lock_sfc(engine, &hw_mask);
			if (ret)
				goto sfc_unlock;
445 446 447
		}
	}

448
	ret = gen6_hw_domain_reset(gt, hw_mask);
449

450 451 452 453 454 455 456
sfc_unlock:
	/*
	 * We unlock the SFC based on the lock status and not the result of
	 * gen11_lock_sfc to make sure that we clean properly if something
	 * wrong happened during the lock (e.g. lock acquired after timeout
	 * expiration).
	 */
457
	if (engine_mask != ALL_ENGINES)
458
		for_each_engine_masked(engine, gt, engine_mask, tmp)
459
			gen11_unlock_sfc(engine);
460 461 462 463 464 465

	return ret;
}

static int gen8_engine_reset_prepare(struct intel_engine_cs *engine)
{
466
	struct intel_uncore *uncore = engine->uncore;
467 468
	const i915_reg_t reg = RING_RESET_CTL(engine->mmio_base);
	u32 request, mask, ack;
469 470
	int ret;

471
	ack = intel_uncore_read_fw(uncore, reg);
472 473 474 475 476 477 478 479 480 481 482
	if (ack & RESET_CTL_CAT_ERROR) {
		/*
		 * For catastrophic errors, ready-for-reset sequence
		 * needs to be bypassed: HAS#396813
		 */
		request = RESET_CTL_CAT_ERROR;
		mask = RESET_CTL_CAT_ERROR;

		/* Catastrophic errors need to be cleared by HW */
		ack = 0;
	} else if (!(ack & RESET_CTL_READY_TO_RESET)) {
483 484 485 486 487 488
		request = RESET_CTL_REQUEST_RESET;
		mask = RESET_CTL_READY_TO_RESET;
		ack = RESET_CTL_READY_TO_RESET;
	} else {
		return 0;
	}
489

490 491 492
	intel_uncore_write_fw(uncore, reg, _MASKED_BIT_ENABLE(request));
	ret = __intel_wait_for_register_fw(uncore, reg, mask, ack,
					   700, 0, NULL);
493
	if (ret)
494 495 496
		DRM_ERROR("%s reset request timed out: {request: %08x, RESET_CTL: %08x}\n",
			  engine->name, request,
			  intel_uncore_read_fw(uncore, reg));
497 498 499 500 501 502

	return ret;
}

static void gen8_engine_reset_cancel(struct intel_engine_cs *engine)
{
503 504 505
	intel_uncore_write_fw(engine->uncore,
			      RING_RESET_CTL(engine->mmio_base),
			      _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
506 507
}

508
static int gen8_reset_engines(struct intel_gt *gt,
509
			      intel_engine_mask_t engine_mask,
510 511 512 513
			      unsigned int retry)
{
	struct intel_engine_cs *engine;
	const bool reset_non_ready = retry >= 1;
514
	intel_engine_mask_t tmp;
515 516
	int ret;

517
	for_each_engine_masked(engine, gt, engine_mask, tmp) {
518 519 520 521 522 523 524 525 526 527 528 529 530 531 532
		ret = gen8_engine_reset_prepare(engine);
		if (ret && !reset_non_ready)
			goto skip_reset;

		/*
		 * If this is not the first failed attempt to prepare,
		 * we decide to proceed anyway.
		 *
		 * By doing so we risk context corruption and with
		 * some gens (kbl), possible system hang if reset
		 * happens during active bb execution.
		 *
		 * We rather take context corruption instead of
		 * failed reset with a wedged driver/gpu. And
		 * active bb execution case should be covered by
533
		 * stop_engines() we have before the reset.
534 535 536
		 */
	}

537 538
	if (INTEL_GEN(gt->i915) >= 11)
		ret = gen11_reset_engines(gt, engine_mask, retry);
539
	else
540
		ret = gen6_reset_engines(gt, engine_mask, retry);
541 542

skip_reset:
543
	for_each_engine_masked(engine, gt, engine_mask, tmp)
544 545 546 547 548
		gen8_engine_reset_cancel(engine);

	return ret;
}

549 550 551 552 553 554 555
static int mock_reset(struct intel_gt *gt,
		      intel_engine_mask_t mask,
		      unsigned int retry)
{
	return 0;
}

556
typedef int (*reset_func)(struct intel_gt *,
557
			  intel_engine_mask_t engine_mask,
558 559
			  unsigned int retry);

560
static reset_func intel_get_gpu_reset(const struct intel_gt *gt)
561
{
562 563
	struct drm_i915_private *i915 = gt->i915;

564 565 566
	if (is_mock_gt(gt))
		return mock_reset;
	else if (INTEL_GEN(i915) >= 8)
567 568 569 570 571 572 573 574 575 576 577 578 579 580 581
		return gen8_reset_engines;
	else if (INTEL_GEN(i915) >= 6)
		return gen6_reset_engines;
	else if (INTEL_GEN(i915) >= 5)
		return ironlake_do_reset;
	else if (IS_G4X(i915))
		return g4x_do_reset;
	else if (IS_G33(i915) || IS_PINEVIEW(i915))
		return g33_do_reset;
	else if (INTEL_GEN(i915) >= 3)
		return i915_do_reset;
	else
		return NULL;
}

582
int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask)
583
{
584 585 586
	const int retries = engine_mask == ALL_ENGINES ? RESET_MAX_RETRIES : 1;
	reset_func reset;
	int ret = -ETIMEDOUT;
587 588
	int retry;

589
	reset = intel_get_gpu_reset(gt);
590 591
	if (!reset)
		return -ENODEV;
592 593 594 595 596

	/*
	 * If the power well sleeps during the reset, the reset
	 * request may be dropped and never completes (causing -EIO).
	 */
597
	intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
598 599 600
	for (retry = 0; ret == -ETIMEDOUT && retry < retries; retry++) {
		GEM_TRACE("engine_mask=%x\n", engine_mask);
		preempt_disable();
601
		ret = reset(gt, engine_mask, retry);
602
		preempt_enable();
603
	}
604
	intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
605 606 607 608

	return ret;
}

609
bool intel_has_gpu_reset(const struct intel_gt *gt)
610
{
611 612 613
	if (!i915_modparams.reset)
		return NULL;

614
	return intel_get_gpu_reset(gt);
615 616
}

617
bool intel_has_reset_engine(const struct intel_gt *gt)
618
{
619 620 621 622
	if (i915_modparams.reset < 2)
		return false;

	return INTEL_INFO(gt->i915)->has_reset_engine;
623 624
}

625
int intel_reset_guc(struct intel_gt *gt)
626 627
{
	u32 guc_domain =
628
		INTEL_GEN(gt->i915) >= 11 ? GEN11_GRDOM_GUC : GEN9_GRDOM_GUC;
629 630
	int ret;

631
	GEM_BUG_ON(!HAS_GT_UC(gt->i915));
632

633 634 635
	intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
	ret = gen6_hw_domain_reset(gt, guc_domain);
	intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
636 637 638 639 640 641 642 643

	return ret;
}

/*
 * Ensure irq handler finishes, and not run again.
 * Also return the active request so that we only search for it once.
 */
644
static void reset_prepare_engine(struct intel_engine_cs *engine)
645 646 647 648 649 650 651 652
{
	/*
	 * During the reset sequence, we must prevent the engine from
	 * entering RC6. As the context state is undefined until we restart
	 * the engine, if it does enter RC6 during the reset, the state
	 * written to the powercontext is undefined and so we may lose
	 * GPU state upon resume, i.e. fail to restart after a reset.
	 */
653
	intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
654
	engine->reset.prepare(engine);
655 656
}

657
static void revoke_mmaps(struct intel_gt *gt)
658 659 660
{
	int i;

661
	for (i = 0; i < gt->ggtt->num_fences; i++) {
662 663 664 665
		struct drm_vma_offset_node *node;
		struct i915_vma *vma;
		u64 vma_offset;

666
		vma = READ_ONCE(gt->ggtt->fence_regs[i].vma);
667 668 669 670 671 672
		if (!vma)
			continue;

		if (!i915_vma_has_userfault(vma))
			continue;

673
		GEM_BUG_ON(vma->fence != &gt->ggtt->fence_regs[i]);
674 675
		node = &vma->obj->base.vma_node;
		vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
676
		unmap_mapping_range(gt->i915->drm.anon_inode->i_mapping,
677 678 679 680 681 682
				    drm_vma_node_offset_addr(node) + vma_offset,
				    vma->size,
				    1);
	}
}

683
static intel_engine_mask_t reset_prepare(struct intel_gt *gt)
684 685
{
	struct intel_engine_cs *engine;
686
	intel_engine_mask_t awake = 0;
687 688
	enum intel_engine_id id;

689
	for_each_engine(engine, gt, id) {
690 691
		if (intel_engine_pm_get_if_awake(engine))
			awake |= engine->mask;
692
		reset_prepare_engine(engine);
693
	}
694

695
	intel_uc_reset_prepare(&gt->uc);
696 697

	return awake;
698 699
}

700
static void gt_revoke(struct intel_gt *gt)
701
{
702
	revoke_mmaps(gt);
703 704
}

705
static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
706
{
707 708 709 710
	struct intel_engine_cs *engine;
	enum intel_engine_id id;
	int err;

711
	/*
712 713
	 * Everything depends on having the GTT running, so we need to start
	 * there.
714
	 */
715
	err = i915_ggtt_enable_hw(gt->i915);
716 717
	if (err)
		return err;
718

719
	for_each_engine(engine, gt, id)
720
		__intel_engine_reset(engine, stalled_mask & engine->mask);
721

722
	i915_gem_restore_fences(gt->ggtt);
723

724
	return err;
725 726
}

727
static void reset_finish_engine(struct intel_engine_cs *engine)
728
{
729
	engine->reset.finish(engine);
730
	intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
731

732
	intel_engine_breadcrumbs_irq(engine);
733 734
}

735
static void reset_finish(struct intel_gt *gt, intel_engine_mask_t awake)
736 737 738 739
{
	struct intel_engine_cs *engine;
	enum intel_engine_id id;

740
	for_each_engine(engine, gt, id) {
741
		reset_finish_engine(engine);
742 743
		if (awake & engine->mask)
			intel_engine_pm_put(engine);
744
	}
745 746 747 748
}

static void nop_submit_request(struct i915_request *request)
{
749
	struct intel_engine_cs *engine = request->engine;
750 751 752
	unsigned long flags;

	GEM_TRACE("%s fence %llx:%lld -> -EIO\n",
753
		  engine->name, request->fence.context, request->fence.seqno);
754 755
	dma_fence_set_error(&request->fence, -EIO);

756
	spin_lock_irqsave(&engine->active.lock, flags);
757
	__i915_request_submit(request);
758
	i915_request_mark_complete(request);
759
	spin_unlock_irqrestore(&engine->active.lock, flags);
760 761

	intel_engine_queue_breadcrumbs(engine);
762 763
}

764
static void __intel_gt_set_wedged(struct intel_gt *gt)
765 766
{
	struct intel_engine_cs *engine;
767
	intel_engine_mask_t awake;
768 769
	enum intel_engine_id id;

770
	if (test_bit(I915_WEDGED, &gt->reset.flags))
771 772
		return;

773
	if (GEM_SHOW_DEBUG() && !intel_engines_are_idle(gt)) {
774 775
		struct drm_printer p = drm_debug_printer(__func__);

776
		for_each_engine(engine, gt, id)
777 778 779 780 781 782 783 784 785 786
			intel_engine_dump(engine, &p, "%s\n", engine->name);
	}

	GEM_TRACE("start\n");

	/*
	 * First, stop submission to hw, but do not yet complete requests by
	 * rolling the global seqno forward (since this would complete requests
	 * for which we haven't set the fence error to EIO yet).
	 */
787
	awake = reset_prepare(gt);
788

789
	/* Even if the GPU reset fails, it should still stop the engines */
790 791
	if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
		__intel_gt_reset(gt, ALL_ENGINES);
792

793
	for_each_engine(engine, gt, id)
794 795 796 797 798 799 800
		engine->submit_request = nop_submit_request;

	/*
	 * Make sure no request can slip through without getting completed by
	 * either this call here to intel_engine_write_global_seqno, or the one
	 * in nop_submit_request.
	 */
C
Chris Wilson 已提交
801
	synchronize_rcu_expedited();
802
	set_bit(I915_WEDGED, &gt->reset.flags);
803 804

	/* Mark all executing requests as skipped */
805
	for_each_engine(engine, gt, id)
806 807
		engine->cancel_requests(engine);

808
	reset_finish(gt, awake);
809 810

	GEM_TRACE("end\n");
811
}
812

813
void intel_gt_set_wedged(struct intel_gt *gt)
814
{
815
	intel_wakeref_t wakeref;
816

817
	mutex_lock(&gt->reset.mutex);
818
	with_intel_runtime_pm(gt->uncore->rpm, wakeref)
819 820
		__intel_gt_set_wedged(gt);
	mutex_unlock(&gt->reset.mutex);
821 822
}

823
static bool __intel_gt_unset_wedged(struct intel_gt *gt)
824
{
825
	struct intel_gt_timelines *timelines = &gt->timelines;
826
	struct intel_timeline *tl;
827
	unsigned long flags;
828
	bool ok;
829

830
	if (!test_bit(I915_WEDGED, &gt->reset.flags))
831 832
		return true;

833 834
	/* Never fully initialised, recovery impossible */
	if (test_bit(I915_WEDGED_ON_INIT, &gt->reset.flags))
835 836 837 838 839 840 841 842 843 844 845 846 847 848
		return false;

	GEM_TRACE("start\n");

	/*
	 * Before unwedging, make sure that all pending operations
	 * are flushed and errored out - we may have requests waiting upon
	 * third party fences. We marked all inflight requests as EIO, and
	 * every execbuf since returned EIO, for consistency we want all
	 * the currently pending requests to also be marked as EIO, which
	 * is done inside our nop_submit_request - and so we must wait.
	 *
	 * No more can be submitted until we reset the wedged bit.
	 */
849
	spin_lock_irqsave(&timelines->lock, flags);
850
	list_for_each_entry(tl, &timelines->active_list, link) {
851
		struct dma_fence *fence;
852

853 854
		fence = i915_active_fence_get(&tl->last_request);
		if (!fence)
855 856
			continue;

857
		spin_unlock_irqrestore(&timelines->lock, flags);
858

859
		/*
860 861 862 863 864
		 * All internal dependencies (i915_requests) will have
		 * been flushed by the set-wedge, but we may be stuck waiting
		 * for external fences. These should all be capped to 10s
		 * (I915_FENCE_TIMEOUT) so this wait should not be unbounded
		 * in the worst case.
865
		 */
866 867
		dma_fence_default_wait(fence, false, MAX_SCHEDULE_TIMEOUT);
		dma_fence_put(fence);
868 869

		/* Restart iteration after droping lock */
870
		spin_lock_irqsave(&timelines->lock, flags);
871
		tl = list_entry(&timelines->active_list, typeof(*tl), link);
872
	}
873
	spin_unlock_irqrestore(&timelines->lock, flags);
874

875 876 877 878
	/* We must reset pending GPU events before restoring our submission */
	ok = !HAS_EXECLISTS(gt->i915); /* XXX better agnosticism desired */
	if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
		ok = __intel_gt_reset(gt, ALL_ENGINES) == 0;
879 880 881 882 883 884
	if (!ok) {
		/*
		 * Warn CI about the unrecoverable wedged condition.
		 * Time for a reboot.
		 */
		add_taint_for_CI(TAINT_WARN);
885
		return false;
886
	}
887 888 889 890 891 892 893 894 895 896

	/*
	 * Undo nop_submit_request. We prevent all new i915 requests from
	 * being queued (by disallowing execbuf whilst wedged) so having
	 * waited for all active requests above, we know the system is idle
	 * and do not have to worry about a thread being inside
	 * engine->submit_request() as we swap over. So unlike installing
	 * the nop_submit_request on reset, we can do this from normal
	 * context and do not require stop_machine().
	 */
897
	intel_engines_reset_default_submission(gt);
898 899 900 901

	GEM_TRACE("end\n");

	smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
902
	clear_bit(I915_WEDGED, &gt->reset.flags);
903 904

	return true;
905 906
}

907
bool intel_gt_unset_wedged(struct intel_gt *gt)
908 909 910
{
	bool result;

911 912 913
	mutex_lock(&gt->reset.mutex);
	result = __intel_gt_unset_wedged(gt);
	mutex_unlock(&gt->reset.mutex);
914 915 916 917

	return result;
}

918
static int do_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
919 920 921
{
	int err, i;

922
	gt_revoke(gt);
923

924
	err = __intel_gt_reset(gt, ALL_ENGINES);
925
	for (i = 0; err && i < RESET_MAX_RETRIES; i++) {
926
		msleep(10 * (i + 1));
927
		err = __intel_gt_reset(gt, ALL_ENGINES);
928
	}
929 930
	if (err)
		return err;
931

932
	return gt_reset(gt, stalled_mask);
933 934
}

935
static int resume(struct intel_gt *gt)
936 937 938 939 940
{
	struct intel_engine_cs *engine;
	enum intel_engine_id id;
	int ret;

941
	for_each_engine(engine, gt, id) {
942 943 944 945 946 947 948 949
		ret = engine->resume(engine);
		if (ret)
			return ret;
	}

	return 0;
}

950
/**
951 952
 * intel_gt_reset - reset chip after a hang
 * @gt: #intel_gt to reset
953 954 955 956 957 958 959 960 961 962 963 964 965 966
 * @stalled_mask: mask of the stalled engines with the guilty requests
 * @reason: user error message for why we are resetting
 *
 * Reset the chip.  Useful if a hang is detected. Marks the device as wedged
 * on failure.
 *
 * Procedure is fairly simple:
 *   - reset the chip using the reset reg
 *   - re-init context state
 *   - re-init hardware status page
 *   - re-init ring buffer
 *   - re-init interrupt state
 *   - re-init display
 */
967 968 969
void intel_gt_reset(struct intel_gt *gt,
		    intel_engine_mask_t stalled_mask,
		    const char *reason)
970
{
971
	intel_engine_mask_t awake;
972 973
	int ret;

974
	GEM_TRACE("flags=%lx\n", gt->reset.flags);
975 976

	might_sleep();
977 978
	GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &gt->reset.flags));
	mutex_lock(&gt->reset.mutex);
979 980

	/* Clear any previous failed attempts at recovery. Time to try again. */
981
	if (!__intel_gt_unset_wedged(gt))
982
		goto unlock;
983 984

	if (reason)
985 986 987
		dev_notice(gt->i915->drm.dev,
			   "Resetting chip for %s\n", reason);
	atomic_inc(&gt->i915->gpu_error.reset_count);
988

989
	awake = reset_prepare(gt);
990

991
	if (!intel_has_gpu_reset(gt)) {
992
		if (i915_modparams.reset)
993
			dev_err(gt->i915->drm.dev, "GPU reset not supported\n");
994 995 996 997 998
		else
			DRM_DEBUG_DRIVER("GPU reset disabled\n");
		goto error;
	}

999 1000
	if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
		intel_runtime_pm_disable_interrupts(gt->i915);
1001

1002 1003
	if (do_reset(gt, stalled_mask)) {
		dev_err(gt->i915->drm.dev, "Failed to reset chip\n");
1004 1005 1006
		goto taint;
	}

1007 1008
	if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
		intel_runtime_pm_enable_interrupts(gt->i915);
1009

1010
	intel_overlay_reset(gt->i915);
1011 1012 1013 1014 1015 1016 1017 1018 1019

	/*
	 * Next we need to restore the context, but we don't use those
	 * yet either...
	 *
	 * Ring buffer needs to be re-initialized in the KMS case, or if X
	 * was running at the time of the reset (i.e. we weren't VT
	 * switched away).
	 */
1020
	ret = intel_gt_init_hw(gt);
1021 1022 1023
	if (ret) {
		DRM_ERROR("Failed to initialise HW following reset (%d)\n",
			  ret);
1024
		goto taint;
1025 1026
	}

1027
	ret = resume(gt);
1028 1029 1030
	if (ret)
		goto taint;

1031
finish:
1032
	reset_finish(gt, awake);
1033
unlock:
1034
	mutex_unlock(&gt->reset.mutex);
1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049
	return;

taint:
	/*
	 * History tells us that if we cannot reset the GPU now, we
	 * never will. This then impacts everything that is run
	 * subsequently. On failing the reset, we mark the driver
	 * as wedged, preventing further execution on the GPU.
	 * We also want to go one step further and add a taint to the
	 * kernel so that any subsequent faults can be traced back to
	 * this failure. This is important for CI, where if the
	 * GPU/driver fails we would like to reboot and restart testing
	 * rather than continue on into oblivion. For everyone else,
	 * the system should still plod along, but they have been warned!
	 */
1050
	add_taint_for_CI(TAINT_WARN);
1051
error:
1052
	__intel_gt_set_wedged(gt);
1053 1054 1055
	goto finish;
}

1056
static inline int intel_gt_reset_engine(struct intel_engine_cs *engine)
1057
{
1058
	return __intel_gt_reset(engine->gt, engine->mask);
1059 1060 1061
}

/**
1062
 * intel_engine_reset - reset GPU engine to recover from a hang
1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073
 * @engine: engine to reset
 * @msg: reason for GPU reset; or NULL for no dev_notice()
 *
 * Reset a specific GPU engine. Useful if a hang is detected.
 * Returns zero on successful reset or otherwise an error code.
 *
 * Procedure is:
 *  - identifies the request that caused the hang and it is dropped
 *  - reset engine (which will force the engine to idle)
 *  - re-init/configure engine
 */
1074
int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
1075
{
1076
	struct intel_gt *gt = engine->gt;
1077 1078
	int ret;

1079 1080
	GEM_TRACE("%s flags=%lx\n", engine->name, gt->reset.flags);
	GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &gt->reset.flags));
1081

1082
	if (!intel_engine_pm_get_if_awake(engine))
1083 1084
		return 0;

1085
	reset_prepare_engine(engine);
1086 1087 1088 1089

	if (msg)
		dev_notice(engine->i915->drm.dev,
			   "Resetting %s for %s\n", engine->name, msg);
1090
	atomic_inc(&engine->i915->gpu_error.reset_engine_count[engine->uabi_class]);
1091

1092
	if (!engine->gt->uc.guc.execbuf_client)
1093
		ret = intel_gt_reset_engine(engine);
1094
	else
1095
		ret = intel_guc_reset_engine(&engine->gt->uc.guc, engine);
1096 1097 1098
	if (ret) {
		/* If we fail here, we expect to fallback to a global reset */
		DRM_DEBUG_DRIVER("%sFailed to reset %s, ret=%d\n",
1099
				 engine->gt->uc.guc.execbuf_client ? "GuC " : "",
1100 1101 1102 1103 1104 1105 1106 1107 1108
				 engine->name, ret);
		goto out;
	}

	/*
	 * The request that caused the hang is stuck on elsp, we know the
	 * active request and can drop it, adjust head to skip the offending
	 * request to resume executing remaining requests in the queue.
	 */
1109
	__intel_engine_reset(engine, true);
1110 1111 1112 1113 1114 1115

	/*
	 * The engine and its registers (and workarounds in case of render)
	 * have been reset to their default values. Follow the init_ring
	 * process to program RING_MODE, HWSP and re-enable submission.
	 */
1116
	ret = engine->resume(engine);
1117 1118 1119 1120

out:
	intel_engine_cancel_stop_cs(engine);
	reset_finish_engine(engine);
1121
	intel_engine_pm_put(engine);
1122 1123 1124
	return ret;
}

1125 1126 1127
static void intel_gt_reset_global(struct intel_gt *gt,
				  u32 engine_mask,
				  const char *reason)
1128
{
1129
	struct kobject *kobj = &gt->i915->drm.primary->kdev->kobj;
1130 1131 1132
	char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
	char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
	char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
1133
	struct intel_wedge_me w;
1134 1135 1136 1137 1138 1139 1140

	kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);

	DRM_DEBUG_DRIVER("resetting chip\n");
	kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);

	/* Use a watchdog to ensure that our reset completes */
1141 1142
	intel_wedge_on_timeout(&w, gt, 5 * HZ) {
		intel_prepare_reset(gt->i915);
1143

1144
		/* Flush everyone using a resource about to be clobbered */
1145
		synchronize_srcu_expedited(&gt->reset.backoff_srcu);
1146

1147
		intel_gt_reset(gt, engine_mask, reason);
1148

1149
		intel_finish_reset(gt->i915);
1150 1151
	}

1152
	if (!test_bit(I915_WEDGED, &gt->reset.flags))
1153 1154 1155 1156
		kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
}

/**
1157 1158
 * intel_gt_handle_error - handle a gpu error
 * @gt: the intel_gt
1159 1160 1161 1162 1163 1164 1165 1166 1167 1168
 * @engine_mask: mask representing engines that are hung
 * @flags: control flags
 * @fmt: Error message format string
 *
 * Do some basic checking of register state at error time and
 * dump it to the syslog.  Also call i915_capture_error_state() to make
 * sure we get a record and make it available in debugfs.  Fire a uevent
 * so userspace knows something bad happened (should trigger collection
 * of a ring dump etc.).
 */
1169 1170 1171 1172
void intel_gt_handle_error(struct intel_gt *gt,
			   intel_engine_mask_t engine_mask,
			   unsigned long flags,
			   const char *fmt, ...)
1173 1174 1175
{
	struct intel_engine_cs *engine;
	intel_wakeref_t wakeref;
1176
	intel_engine_mask_t tmp;
1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196
	char error_msg[80];
	char *msg = NULL;

	if (fmt) {
		va_list args;

		va_start(args, fmt);
		vscnprintf(error_msg, sizeof(error_msg), fmt, args);
		va_end(args);

		msg = error_msg;
	}

	/*
	 * In most cases it's guaranteed that we get here with an RPM
	 * reference held, for example because there is a pending GPU
	 * request that won't finish until the reset is done. This
	 * isn't the case at least when we get here by doing a
	 * simulated reset via debugfs, so get an RPM reference.
	 */
1197
	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1198

1199
	engine_mask &= INTEL_INFO(gt->i915)->engine_mask;
1200 1201

	if (flags & I915_ERROR_CAPTURE) {
1202 1203
		i915_capture_error_state(gt->i915, engine_mask, msg);
		intel_gt_clear_error_registers(gt, engine_mask);
1204 1205 1206 1207 1208 1209
	}

	/*
	 * Try engine reset when available. We fall back to full reset if
	 * single reset fails.
	 */
1210
	if (intel_has_reset_engine(gt) && !intel_gt_is_wedged(gt)) {
1211
		for_each_engine_masked(engine, gt, engine_mask, tmp) {
1212 1213
			BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
			if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
1214
					     &gt->reset.flags))
1215 1216
				continue;

1217
			if (intel_engine_reset(engine, msg) == 0)
1218
				engine_mask &= ~engine->mask;
1219

1220 1221
			clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id,
					      &gt->reset.flags);
1222 1223 1224 1225 1226 1227 1228
		}
	}

	if (!engine_mask)
		goto out;

	/* Full reset needs the mutex, stop any other user trying to do so. */
1229 1230 1231
	if (test_and_set_bit(I915_RESET_BACKOFF, &gt->reset.flags)) {
		wait_event(gt->reset.queue,
			   !test_bit(I915_RESET_BACKOFF, &gt->reset.flags));
1232
		goto out; /* piggy-back on the other reset */
1233 1234
	}

1235 1236 1237
	/* Make sure i915_reset_trylock() sees the I915_RESET_BACKOFF */
	synchronize_rcu_expedited();

1238
	/* Prevent any other reset-engine attempt. */
1239
	for_each_engine(engine, gt, tmp) {
1240
		while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
1241 1242
					&gt->reset.flags))
			wait_on_bit(&gt->reset.flags,
1243 1244 1245 1246
				    I915_RESET_ENGINE + engine->id,
				    TASK_UNINTERRUPTIBLE);
	}

1247
	intel_gt_reset_global(gt, engine_mask, msg);
1248

1249
	for_each_engine(engine, gt, tmp)
1250 1251 1252 1253 1254
		clear_bit_unlock(I915_RESET_ENGINE + engine->id,
				 &gt->reset.flags);
	clear_bit_unlock(I915_RESET_BACKOFF, &gt->reset.flags);
	smp_mb__after_atomic();
	wake_up_all(&gt->reset.queue);
1255 1256

out:
1257
	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1258 1259
}

1260
int intel_gt_reset_trylock(struct intel_gt *gt, int *srcu)
1261
{
1262
	might_lock(&gt->reset.backoff_srcu);
1263 1264
	might_sleep();

1265
	rcu_read_lock();
1266
	while (test_bit(I915_RESET_BACKOFF, &gt->reset.flags)) {
1267 1268
		rcu_read_unlock();

1269
		if (wait_event_interruptible(gt->reset.queue,
1270
					     !test_bit(I915_RESET_BACKOFF,
1271
						       &gt->reset.flags)))
1272 1273 1274 1275
			return -EINTR;

		rcu_read_lock();
	}
1276
	*srcu = srcu_read_lock(&gt->reset.backoff_srcu);
1277 1278
	rcu_read_unlock();

1279
	return 0;
1280 1281
}

1282 1283
void intel_gt_reset_unlock(struct intel_gt *gt, int tag)
__releases(&gt->reset.backoff_srcu)
1284
{
1285
	srcu_read_unlock(&gt->reset.backoff_srcu, tag);
1286 1287
}

1288
int intel_gt_terminally_wedged(struct intel_gt *gt)
1289 1290 1291
{
	might_sleep();

1292
	if (!intel_gt_is_wedged(gt))
1293 1294 1295
		return 0;

	/* Reset still in progress? Maybe we will recover? */
1296
	if (!test_bit(I915_RESET_BACKOFF, &gt->reset.flags))
1297 1298
		return -EIO;

1299
	if (wait_event_interruptible(gt->reset.queue,
1300
				     !test_bit(I915_RESET_BACKOFF,
1301
					       &gt->reset.flags)))
1302 1303
		return -EINTR;

1304 1305 1306
	return intel_gt_is_wedged(gt) ? -EIO : 0;
}

1307 1308 1309 1310 1311 1312 1313 1314
void intel_gt_set_wedged_on_init(struct intel_gt *gt)
{
	BUILD_BUG_ON(I915_RESET_ENGINE + I915_NUM_ENGINES >
		     I915_WEDGED_ON_INIT);
	intel_gt_set_wedged(gt);
	set_bit(I915_WEDGED_ON_INIT, &gt->reset.flags);
}

1315 1316 1317 1318 1319 1320 1321 1322 1323 1324
void intel_gt_init_reset(struct intel_gt *gt)
{
	init_waitqueue_head(&gt->reset.queue);
	mutex_init(&gt->reset.mutex);
	init_srcu_struct(&gt->reset.backoff_srcu);
}

void intel_gt_fini_reset(struct intel_gt *gt)
{
	cleanup_srcu_struct(&gt->reset.backoff_srcu);
1325 1326
}

1327
static void intel_wedge_me(struct work_struct *work)
1328
{
1329
	struct intel_wedge_me *w = container_of(work, typeof(*w), work.work);
1330

1331
	dev_err(w->gt->i915->drm.dev,
1332 1333
		"%s timed out, cancelling all in-flight rendering.\n",
		w->name);
1334
	intel_gt_set_wedged(w->gt);
1335 1336
}

1337 1338 1339 1340
void __intel_init_wedge(struct intel_wedge_me *w,
			struct intel_gt *gt,
			long timeout,
			const char *name)
1341
{
1342
	w->gt = gt;
1343 1344
	w->name = name;

1345
	INIT_DELAYED_WORK_ONSTACK(&w->work, intel_wedge_me);
1346 1347 1348
	schedule_delayed_work(&w->work, timeout);
}

1349
void __intel_fini_wedge(struct intel_wedge_me *w)
1350 1351 1352
{
	cancel_delayed_work_sync(&w->work);
	destroy_delayed_work_on_stack(&w->work);
1353
	w->gt = NULL;
1354
}
1355 1356 1357

#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_reset.c"
1358
#include "selftest_hangcheck.c"
1359
#endif