intel_reset.c 38.4 KB
Newer Older
1 2 3 4 5 6 7
/*
 * SPDX-License-Identifier: MIT
 *
 * Copyright © 2008-2018 Intel Corporation
 */

#include <linux/sched/mm.h>
8
#include <linux/stop_machine.h>
9 10 11

#include "i915_drv.h"
#include "i915_gpu_error.h"
12
#include "intel_reset.h"
13 14 15

#include "intel_guc.h"

16 17
#define RESET_MAX_RETRIES 3

18 19 20
/* XXX How to handle concurrent GGTT updates using tiling registers? */
#define RESET_UNDER_STOP_MACHINE 0

21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40
static void rmw_set(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
{
	intel_uncore_rmw(uncore, reg, 0, set);
}

static void rmw_clear(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
{
	intel_uncore_rmw(uncore, reg, clr, 0);
}

static void rmw_set_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
{
	intel_uncore_rmw_fw(uncore, reg, 0, set);
}

static void rmw_clear_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
{
	intel_uncore_rmw_fw(uncore, reg, clr, 0);
}

41 42 43 44 45
static void engine_skip_context(struct i915_request *rq)
{
	struct intel_engine_cs *engine = rq->engine;
	struct i915_gem_context *hung_ctx = rq->gem_context;

46
	lockdep_assert_held(&engine->timeline.lock);
47

48 49
	if (!i915_request_is_active(rq))
		return;
50

51 52 53
	list_for_each_entry_continue(rq, &engine->timeline.requests, link)
		if (rq->gem_context == hung_ctx)
			i915_request_skip(rq, -EIO);
54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
}

static void client_mark_guilty(struct drm_i915_file_private *file_priv,
			       const struct i915_gem_context *ctx)
{
	unsigned int score;
	unsigned long prev_hang;

	if (i915_gem_context_is_banned(ctx))
		score = I915_CLIENT_SCORE_CONTEXT_BAN;
	else
		score = 0;

	prev_hang = xchg(&file_priv->hang_timestamp, jiffies);
	if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES))
		score += I915_CLIENT_SCORE_HANG_FAST;

	if (score) {
		atomic_add(score, &file_priv->ban_score);

		DRM_DEBUG_DRIVER("client %s: gained %u ban score, now %u\n",
				 ctx->name, score,
				 atomic_read(&file_priv->ban_score));
	}
}

80
static bool context_mark_guilty(struct i915_gem_context *ctx)
81
{
82 83 84
	unsigned long prev_hang;
	bool banned;
	int i;
85 86 87

	atomic_inc(&ctx->guilty_count);

88 89
	/* Cool contexts are too cool to be banned! (Used for reset testing.) */
	if (!i915_gem_context_is_bannable(ctx))
90
		return false;
91

92 93 94 95 96 97 98 99 100 101
	/* Record the timestamp for the last N hangs */
	prev_hang = ctx->hang_timestamp[0];
	for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp) - 1; i++)
		ctx->hang_timestamp[i] = ctx->hang_timestamp[i + 1];
	ctx->hang_timestamp[i] = jiffies;

	/* If we have hung N+1 times in rapid succession, we ban the context! */
	banned = !i915_gem_context_is_recoverable(ctx);
	if (time_before(jiffies, prev_hang + CONTEXT_FAST_HANG_JIFFIES))
		banned = true;
102
	if (banned) {
103 104
		DRM_DEBUG_DRIVER("context %s: guilty %d, banned\n",
				 ctx->name, atomic_read(&ctx->guilty_count));
105 106 107 108 109
		i915_gem_context_set_banned(ctx);
	}

	if (!IS_ERR_OR_NULL(ctx->file_priv))
		client_mark_guilty(ctx->file_priv, ctx);
110 111

	return banned;
112 113 114 115 116 117 118
}

static void context_mark_innocent(struct i915_gem_context *ctx)
{
	atomic_inc(&ctx->active_count);
}

119 120
void i915_reset_request(struct i915_request *rq, bool guilty)
{
121 122 123 124 125 126
	GEM_TRACE("%s rq=%llx:%lld, guilty? %s\n",
		  rq->engine->name,
		  rq->fence.context,
		  rq->fence.seqno,
		  yesno(guilty));

127 128 129 130 131 132 133 134 135 136 137 138 139
	lockdep_assert_held(&rq->engine->timeline.lock);
	GEM_BUG_ON(i915_request_completed(rq));

	if (guilty) {
		i915_request_skip(rq, -EIO);
		if (context_mark_guilty(rq->gem_context))
			engine_skip_context(rq);
	} else {
		dma_fence_set_error(&rq->fence, -EAGAIN);
		context_mark_innocent(rq->gem_context);
	}
}

140 141
static void gen3_stop_engine(struct intel_engine_cs *engine)
{
142
	struct intel_uncore *uncore = engine->uncore;
143 144
	const u32 base = engine->mmio_base;

145 146
	GEM_TRACE("%s\n", engine->name);

147
	if (intel_engine_stop_cs(engine))
148
		GEM_TRACE("%s: timed out on STOP_RING\n", engine->name);
149

150 151 152 153
	intel_uncore_write_fw(uncore,
			      RING_HEAD(base),
			      intel_uncore_read_fw(uncore, RING_TAIL(base)));
	intel_uncore_posting_read_fw(uncore, RING_HEAD(base)); /* paranoia */
154

155 156 157
	intel_uncore_write_fw(uncore, RING_HEAD(base), 0);
	intel_uncore_write_fw(uncore, RING_TAIL(base), 0);
	intel_uncore_posting_read_fw(uncore, RING_TAIL(base));
158 159

	/* The ring must be empty before it is disabled */
160
	intel_uncore_write_fw(uncore, RING_CTL(base), 0);
161 162

	/* Check acts as a post */
163
	if (intel_uncore_read_fw(uncore, RING_HEAD(base)))
164
		GEM_TRACE("%s: ring head [%x] not parked\n",
165 166
			  engine->name,
			  intel_uncore_read_fw(uncore, RING_HEAD(base)));
167 168 169
}

static void i915_stop_engines(struct drm_i915_private *i915,
170
			      intel_engine_mask_t engine_mask)
171 172
{
	struct intel_engine_cs *engine;
173
	intel_engine_mask_t tmp;
174 175 176 177

	if (INTEL_GEN(i915) < 3)
		return;

178
	for_each_engine_masked(engine, i915, engine_mask, tmp)
179 180 181 182 183 184 185 186 187 188 189 190
		gen3_stop_engine(engine);
}

static bool i915_in_reset(struct pci_dev *pdev)
{
	u8 gdrst;

	pci_read_config_byte(pdev, I915_GDRST, &gdrst);
	return gdrst & GRDOM_RESET_STATUS;
}

static int i915_do_reset(struct drm_i915_private *i915,
191
			 intel_engine_mask_t engine_mask,
192 193 194 195 196 197 198
			 unsigned int retry)
{
	struct pci_dev *pdev = i915->drm.pdev;
	int err;

	/* Assert reset for at least 20 usec, and wait for acknowledgement. */
	pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
199 200
	udelay(50);
	err = wait_for_atomic(i915_in_reset(pdev), 50);
201 202 203

	/* Clear the reset request. */
	pci_write_config_byte(pdev, I915_GDRST, 0);
204
	udelay(50);
205
	if (!err)
206
		err = wait_for_atomic(!i915_in_reset(pdev), 50);
207 208 209 210 211 212 213 214 215 216 217 218 219

	return err;
}

static bool g4x_reset_complete(struct pci_dev *pdev)
{
	u8 gdrst;

	pci_read_config_byte(pdev, I915_GDRST, &gdrst);
	return (gdrst & GRDOM_RESET_ENABLE) == 0;
}

static int g33_do_reset(struct drm_i915_private *i915,
220
			intel_engine_mask_t engine_mask,
221 222 223 224 225
			unsigned int retry)
{
	struct pci_dev *pdev = i915->drm.pdev;

	pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
226
	return wait_for_atomic(g4x_reset_complete(pdev), 50);
227 228
}

229
static int g4x_do_reset(struct drm_i915_private *i915,
230
			intel_engine_mask_t engine_mask,
231 232
			unsigned int retry)
{
233 234
	struct pci_dev *pdev = i915->drm.pdev;
	struct intel_uncore *uncore = &i915->uncore;
235 236 237
	int ret;

	/* WaVcpClkGateDisableForMediaReset:ctg,elk */
238 239
	rmw_set_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE);
	intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
240 241 242

	pci_write_config_byte(pdev, I915_GDRST,
			      GRDOM_MEDIA | GRDOM_RESET_ENABLE);
243
	ret =  wait_for_atomic(g4x_reset_complete(pdev), 50);
244 245 246 247 248 249 250
	if (ret) {
		DRM_DEBUG_DRIVER("Wait for media reset failed\n");
		goto out;
	}

	pci_write_config_byte(pdev, I915_GDRST,
			      GRDOM_RENDER | GRDOM_RESET_ENABLE);
251
	ret =  wait_for_atomic(g4x_reset_complete(pdev), 50);
252 253 254 255 256 257 258 259
	if (ret) {
		DRM_DEBUG_DRIVER("Wait for render reset failed\n");
		goto out;
	}

out:
	pci_write_config_byte(pdev, I915_GDRST, 0);

260 261
	rmw_clear_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE);
	intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
262 263 264 265

	return ret;
}

266
static int ironlake_do_reset(struct drm_i915_private *i915,
267
			     intel_engine_mask_t engine_mask,
268 269
			     unsigned int retry)
{
270
	struct intel_uncore *uncore = &i915->uncore;
271 272
	int ret;

273 274 275
	intel_uncore_write_fw(uncore, ILK_GDSR,
			      ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
	ret = __intel_wait_for_register_fw(uncore, ILK_GDSR,
276 277 278
					   ILK_GRDOM_RESET_ENABLE, 0,
					   5000, 0,
					   NULL);
279 280 281 282 283
	if (ret) {
		DRM_DEBUG_DRIVER("Wait for render reset failed\n");
		goto out;
	}

284 285 286
	intel_uncore_write_fw(uncore, ILK_GDSR,
			      ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
	ret = __intel_wait_for_register_fw(uncore, ILK_GDSR,
287 288 289
					   ILK_GRDOM_RESET_ENABLE, 0,
					   5000, 0,
					   NULL);
290 291 292 293 294 295
	if (ret) {
		DRM_DEBUG_DRIVER("Wait for media reset failed\n");
		goto out;
	}

out:
296 297
	intel_uncore_write_fw(uncore, ILK_GDSR, 0);
	intel_uncore_posting_read_fw(uncore, ILK_GDSR);
298 299 300 301
	return ret;
}

/* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
302
static int gen6_hw_domain_reset(struct drm_i915_private *i915,
303 304
				u32 hw_domain_mask)
{
305
	struct intel_uncore *uncore = &i915->uncore;
306 307 308 309 310 311 312
	int err;

	/*
	 * GEN6_GDRST is not in the gt power well, no need to check
	 * for fifo space for the write or forcewake the chip for
	 * the read
	 */
313
	intel_uncore_write_fw(uncore, GEN6_GDRST, hw_domain_mask);
314 315

	/* Wait for the device to ack the reset requests */
316
	err = __intel_wait_for_register_fw(uncore,
317 318 319 320 321 322 323 324 325 326 327
					   GEN6_GDRST, hw_domain_mask, 0,
					   500, 0,
					   NULL);
	if (err)
		DRM_DEBUG_DRIVER("Wait for 0x%08x engines reset failed\n",
				 hw_domain_mask);

	return err;
}

static int gen6_reset_engines(struct drm_i915_private *i915,
328
			      intel_engine_mask_t engine_mask,
329 330 331
			      unsigned int retry)
{
	struct intel_engine_cs *engine;
332 333 334 335 336 337
	const u32 hw_engine_mask[] = {
		[RCS0]  = GEN6_GRDOM_RENDER,
		[BCS0]  = GEN6_GRDOM_BLT,
		[VCS0]  = GEN6_GRDOM_MEDIA,
		[VCS1]  = GEN8_GRDOM_MEDIA2,
		[VECS0] = GEN6_GRDOM_VECS,
338 339 340 341 342 343
	};
	u32 hw_mask;

	if (engine_mask == ALL_ENGINES) {
		hw_mask = GEN6_GRDOM_FULL;
	} else {
344
		intel_engine_mask_t tmp;
345 346

		hw_mask = 0;
347 348
		for_each_engine_masked(engine, i915, engine_mask, tmp) {
			GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
349
			hw_mask |= hw_engine_mask[engine->id];
350
		}
351 352 353 354 355
	}

	return gen6_hw_domain_reset(i915, hw_mask);
}

356
static u32 gen11_lock_sfc(struct intel_engine_cs *engine)
357
{
358 359
	struct intel_uncore *uncore = engine->uncore;
	u8 vdbox_sfc_access = RUNTIME_INFO(engine->i915)->vdbox_sfc_access;
360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405
	i915_reg_t sfc_forced_lock, sfc_forced_lock_ack;
	u32 sfc_forced_lock_bit, sfc_forced_lock_ack_bit;
	i915_reg_t sfc_usage;
	u32 sfc_usage_bit;
	u32 sfc_reset_bit;

	switch (engine->class) {
	case VIDEO_DECODE_CLASS:
		if ((BIT(engine->instance) & vdbox_sfc_access) == 0)
			return 0;

		sfc_forced_lock = GEN11_VCS_SFC_FORCED_LOCK(engine);
		sfc_forced_lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT;

		sfc_forced_lock_ack = GEN11_VCS_SFC_LOCK_STATUS(engine);
		sfc_forced_lock_ack_bit  = GEN11_VCS_SFC_LOCK_ACK_BIT;

		sfc_usage = GEN11_VCS_SFC_LOCK_STATUS(engine);
		sfc_usage_bit = GEN11_VCS_SFC_USAGE_BIT;
		sfc_reset_bit = GEN11_VCS_SFC_RESET_BIT(engine->instance);
		break;

	case VIDEO_ENHANCEMENT_CLASS:
		sfc_forced_lock = GEN11_VECS_SFC_FORCED_LOCK(engine);
		sfc_forced_lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT;

		sfc_forced_lock_ack = GEN11_VECS_SFC_LOCK_ACK(engine);
		sfc_forced_lock_ack_bit  = GEN11_VECS_SFC_LOCK_ACK_BIT;

		sfc_usage = GEN11_VECS_SFC_USAGE(engine);
		sfc_usage_bit = GEN11_VECS_SFC_USAGE_BIT;
		sfc_reset_bit = GEN11_VECS_SFC_RESET_BIT(engine->instance);
		break;

	default:
		return 0;
	}

	/*
	 * Tell the engine that a software reset is going to happen. The engine
	 * will then try to force lock the SFC (if currently locked, it will
	 * remain so until we tell the engine it is safe to unlock; if currently
	 * unlocked, it will ignore this and all new lock requests). If SFC
	 * ends up being locked to the engine we want to reset, we have to reset
	 * it as well (we will unlock it once the reset sequence is completed).
	 */
406
	rmw_set_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
407

408
	if (__intel_wait_for_register_fw(uncore,
409 410 411 412 413 414 415 416
					 sfc_forced_lock_ack,
					 sfc_forced_lock_ack_bit,
					 sfc_forced_lock_ack_bit,
					 1000, 0, NULL)) {
		DRM_DEBUG_DRIVER("Wait for SFC forced lock ack failed\n");
		return 0;
	}

417
	if (intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit)
418 419 420 421 422
		return sfc_reset_bit;

	return 0;
}

423
static void gen11_unlock_sfc(struct intel_engine_cs *engine)
424
{
425 426
	struct intel_uncore *uncore = engine->uncore;
	u8 vdbox_sfc_access = RUNTIME_INFO(engine->i915)->vdbox_sfc_access;
427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447
	i915_reg_t sfc_forced_lock;
	u32 sfc_forced_lock_bit;

	switch (engine->class) {
	case VIDEO_DECODE_CLASS:
		if ((BIT(engine->instance) & vdbox_sfc_access) == 0)
			return;

		sfc_forced_lock = GEN11_VCS_SFC_FORCED_LOCK(engine);
		sfc_forced_lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT;
		break;

	case VIDEO_ENHANCEMENT_CLASS:
		sfc_forced_lock = GEN11_VECS_SFC_FORCED_LOCK(engine);
		sfc_forced_lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT;
		break;

	default:
		return;
	}

448
	rmw_clear_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
449 450 451
}

static int gen11_reset_engines(struct drm_i915_private *i915,
452
			       intel_engine_mask_t engine_mask,
453 454
			       unsigned int retry)
{
455 456 457 458 459 460 461 462 463
	const u32 hw_engine_mask[] = {
		[RCS0]  = GEN11_GRDOM_RENDER,
		[BCS0]  = GEN11_GRDOM_BLT,
		[VCS0]  = GEN11_GRDOM_MEDIA,
		[VCS1]  = GEN11_GRDOM_MEDIA2,
		[VCS2]  = GEN11_GRDOM_MEDIA3,
		[VCS3]  = GEN11_GRDOM_MEDIA4,
		[VECS0] = GEN11_GRDOM_VECS,
		[VECS1] = GEN11_GRDOM_VECS2,
464 465
	};
	struct intel_engine_cs *engine;
466
	intel_engine_mask_t tmp;
467 468 469 470 471 472 473 474
	u32 hw_mask;
	int ret;

	if (engine_mask == ALL_ENGINES) {
		hw_mask = GEN11_GRDOM_FULL;
	} else {
		hw_mask = 0;
		for_each_engine_masked(engine, i915, engine_mask, tmp) {
475
			GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
476
			hw_mask |= hw_engine_mask[engine->id];
477
			hw_mask |= gen11_lock_sfc(engine);
478 479 480 481 482 483 484
		}
	}

	ret = gen6_hw_domain_reset(i915, hw_mask);

	if (engine_mask != ALL_ENGINES)
		for_each_engine_masked(engine, i915, engine_mask, tmp)
485
			gen11_unlock_sfc(engine);
486 487 488 489 490 491

	return ret;
}

static int gen8_engine_reset_prepare(struct intel_engine_cs *engine)
{
492
	struct intel_uncore *uncore = engine->uncore;
493 494
	const i915_reg_t reg = RING_RESET_CTL(engine->mmio_base);
	u32 request, mask, ack;
495 496
	int ret;

497
	ack = intel_uncore_read_fw(uncore, reg);
498 499 500 501 502 503 504 505 506 507 508
	if (ack & RESET_CTL_CAT_ERROR) {
		/*
		 * For catastrophic errors, ready-for-reset sequence
		 * needs to be bypassed: HAS#396813
		 */
		request = RESET_CTL_CAT_ERROR;
		mask = RESET_CTL_CAT_ERROR;

		/* Catastrophic errors need to be cleared by HW */
		ack = 0;
	} else if (!(ack & RESET_CTL_READY_TO_RESET)) {
509 510 511 512 513 514
		request = RESET_CTL_REQUEST_RESET;
		mask = RESET_CTL_READY_TO_RESET;
		ack = RESET_CTL_READY_TO_RESET;
	} else {
		return 0;
	}
515

516 517 518
	intel_uncore_write_fw(uncore, reg, _MASKED_BIT_ENABLE(request));
	ret = __intel_wait_for_register_fw(uncore, reg, mask, ack,
					   700, 0, NULL);
519
	if (ret)
520 521 522
		DRM_ERROR("%s reset request timed out: {request: %08x, RESET_CTL: %08x}\n",
			  engine->name, request,
			  intel_uncore_read_fw(uncore, reg));
523 524 525 526 527 528

	return ret;
}

static void gen8_engine_reset_cancel(struct intel_engine_cs *engine)
{
529 530 531
	intel_uncore_write_fw(engine->uncore,
			      RING_RESET_CTL(engine->mmio_base),
			      _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
532 533 534
}

static int gen8_reset_engines(struct drm_i915_private *i915,
535
			      intel_engine_mask_t engine_mask,
536 537 538 539
			      unsigned int retry)
{
	struct intel_engine_cs *engine;
	const bool reset_non_ready = retry >= 1;
540
	intel_engine_mask_t tmp;
541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575
	int ret;

	for_each_engine_masked(engine, i915, engine_mask, tmp) {
		ret = gen8_engine_reset_prepare(engine);
		if (ret && !reset_non_ready)
			goto skip_reset;

		/*
		 * If this is not the first failed attempt to prepare,
		 * we decide to proceed anyway.
		 *
		 * By doing so we risk context corruption and with
		 * some gens (kbl), possible system hang if reset
		 * happens during active bb execution.
		 *
		 * We rather take context corruption instead of
		 * failed reset with a wedged driver/gpu. And
		 * active bb execution case should be covered by
		 * i915_stop_engines we have before the reset.
		 */
	}

	if (INTEL_GEN(i915) >= 11)
		ret = gen11_reset_engines(i915, engine_mask, retry);
	else
		ret = gen6_reset_engines(i915, engine_mask, retry);

skip_reset:
	for_each_engine_masked(engine, i915, engine_mask, tmp)
		gen8_engine_reset_cancel(engine);

	return ret;
}

typedef int (*reset_func)(struct drm_i915_private *,
576
			  intel_engine_mask_t engine_mask,
577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596
			  unsigned int retry);

static reset_func intel_get_gpu_reset(struct drm_i915_private *i915)
{
	if (INTEL_GEN(i915) >= 8)
		return gen8_reset_engines;
	else if (INTEL_GEN(i915) >= 6)
		return gen6_reset_engines;
	else if (INTEL_GEN(i915) >= 5)
		return ironlake_do_reset;
	else if (IS_G4X(i915))
		return g4x_do_reset;
	else if (IS_G33(i915) || IS_PINEVIEW(i915))
		return g33_do_reset;
	else if (INTEL_GEN(i915) >= 3)
		return i915_do_reset;
	else
		return NULL;
}

597 598
int intel_gpu_reset(struct drm_i915_private *i915,
		    intel_engine_mask_t engine_mask)
599
{
600 601 602
	const int retries = engine_mask == ALL_ENGINES ? RESET_MAX_RETRIES : 1;
	reset_func reset;
	int ret = -ETIMEDOUT;
603 604
	int retry;

605 606 607
	reset = intel_get_gpu_reset(i915);
	if (!reset)
		return -ENODEV;
608 609 610 611 612

	/*
	 * If the power well sleeps during the reset, the reset
	 * request may be dropped and never completes (causing -EIO).
	 */
613
	intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
614
	for (retry = 0; ret == -ETIMEDOUT && retry < retries; retry++) {
615 616 617 618 619 620 621 622 623 624 625 626 627 628
		/*
		 * We stop engines, otherwise we might get failed reset and a
		 * dead gpu (on elk). Also as modern gpu as kbl can suffer
		 * from system hang if batchbuffer is progressing when
		 * the reset is issued, regardless of READY_TO_RESET ack.
		 * Thus assume it is best to stop engines on all gens
		 * where we have a gpu reset.
		 *
		 * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES)
		 *
		 * WaMediaResetMainRingCleanup:ctg,elk (presumably)
		 *
		 * FIXME: Wa for more modern gens needs to be validated
		 */
629 630
		if (retry)
			i915_stop_engines(i915, engine_mask);
631

632 633 634 635
		GEM_TRACE("engine_mask=%x\n", engine_mask);
		preempt_disable();
		ret = reset(i915, engine_mask, retry);
		preempt_enable();
636
	}
637
	intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
638 639 640 641 642 643

	return ret;
}

bool intel_has_gpu_reset(struct drm_i915_private *i915)
{
644 645 646
	if (!i915_modparams.reset)
		return NULL;

647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662
	return intel_get_gpu_reset(i915);
}

bool intel_has_reset_engine(struct drm_i915_private *i915)
{
	return INTEL_INFO(i915)->has_reset_engine && i915_modparams.reset >= 2;
}

int intel_reset_guc(struct drm_i915_private *i915)
{
	u32 guc_domain =
		INTEL_GEN(i915) >= 11 ? GEN11_GRDOM_GUC : GEN9_GRDOM_GUC;
	int ret;

	GEM_BUG_ON(!HAS_GUC(i915));

663
	intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
664
	ret = gen6_hw_domain_reset(i915, guc_domain);
665
	intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
666 667 668 669 670 671 672 673

	return ret;
}

/*
 * Ensure irq handler finishes, and not run again.
 * Also return the active request so that we only search for it once.
 */
674
static void reset_prepare_engine(struct intel_engine_cs *engine)
675 676 677 678 679 680 681 682
{
	/*
	 * During the reset sequence, we must prevent the engine from
	 * entering RC6. As the context state is undefined until we restart
	 * the engine, if it does enter RC6 during the reset, the state
	 * written to the powercontext is undefined and so we may lose
	 * GPU state upon resume, i.e. fail to restart after a reset.
	 */
683
	intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
684
	engine->reset.prepare(engine);
685 686
}

687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712
static void revoke_mmaps(struct drm_i915_private *i915)
{
	int i;

	for (i = 0; i < i915->num_fence_regs; i++) {
		struct drm_vma_offset_node *node;
		struct i915_vma *vma;
		u64 vma_offset;

		vma = READ_ONCE(i915->fence_regs[i].vma);
		if (!vma)
			continue;

		if (!i915_vma_has_userfault(vma))
			continue;

		GEM_BUG_ON(vma->fence != &i915->fence_regs[i]);
		node = &vma->obj->base.vma_node;
		vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
		unmap_mapping_range(i915->drm.anon_inode->i_mapping,
				    drm_vma_node_offset_addr(node) + vma_offset,
				    vma->size,
				    1);
	}
}

713
static void reset_prepare(struct drm_i915_private *i915)
714 715 716 717
{
	struct intel_engine_cs *engine;
	enum intel_engine_id id;

718 719
	for_each_engine(engine, i915, id)
		reset_prepare_engine(engine);
720

721
	intel_uc_reset_prepare(i915);
722 723 724 725
}

static void gt_revoke(struct drm_i915_private *i915)
{
726
	revoke_mmaps(i915);
727 728
}

729 730
static int gt_reset(struct drm_i915_private *i915,
		    intel_engine_mask_t stalled_mask)
731
{
732 733 734 735
	struct intel_engine_cs *engine;
	enum intel_engine_id id;
	int err;

736
	/*
737 738
	 * Everything depends on having the GTT running, so we need to start
	 * there.
739
	 */
740 741 742
	err = i915_ggtt_enable_hw(i915);
	if (err)
		return err;
743

744
	for_each_engine(engine, i915, id)
745
		intel_engine_reset(engine, stalled_mask & engine->mask);
746

747
	i915_gem_restore_fences(i915);
748

749
	return err;
750 751
}

752
static void reset_finish_engine(struct intel_engine_cs *engine)
753
{
754
	engine->reset.finish(engine);
755
	intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
756 757
}

758 759 760 761 762 763
struct i915_gpu_restart {
	struct work_struct work;
	struct drm_i915_private *i915;
};

static void restart_work(struct work_struct *work)
764
{
765 766
	struct i915_gpu_restart *arg = container_of(work, typeof(*arg), work);
	struct drm_i915_private *i915 = arg->i915;
767 768
	struct intel_engine_cs *engine;
	enum intel_engine_id id;
769
	intel_wakeref_t wakeref;
770

771 772 773
	wakeref = intel_runtime_pm_get(i915);
	mutex_lock(&i915->drm.struct_mutex);
	WRITE_ONCE(i915->gpu_error.restart, NULL);
774 775

	for_each_engine(engine, i915, id) {
776
		struct i915_request *rq;
777 778 779 780 781 782

		/*
		 * Ostensibily, we always want a context loaded for powersaving,
		 * so if the engine is idle after the reset, send a request
		 * to load our scratch kernel_context.
		 */
783 784
		if (!intel_engine_is_idle(engine))
			continue;
785

786
		rq = i915_request_create(engine->kernel_context);
787 788
		if (!IS_ERR(rq))
			i915_request_add(rq);
789 790
	}

791 792
	mutex_unlock(&i915->drm.struct_mutex);
	intel_runtime_pm_put(i915, wakeref);
793

794
	kfree(arg);
795 796 797 798 799 800 801
}

static void reset_finish(struct drm_i915_private *i915)
{
	struct intel_engine_cs *engine;
	enum intel_engine_id id;

802
	for_each_engine(engine, i915, id) {
803
		reset_finish_engine(engine);
804 805
		intel_engine_signal_breadcrumbs(engine);
	}
806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827
}

static void reset_restart(struct drm_i915_private *i915)
{
	struct i915_gpu_restart *arg;

	/*
	 * Following the reset, ensure that we always reload context for
	 * powersaving, and to correct engine->last_retired_context. Since
	 * this requires us to submit a request, queue a worker to do that
	 * task for us to evade any locking here.
	 */
	if (READ_ONCE(i915->gpu_error.restart))
		return;

	arg = kmalloc(sizeof(*arg), GFP_KERNEL);
	if (arg) {
		arg->i915 = i915;
		INIT_WORK(&arg->work, restart_work);

		WRITE_ONCE(i915->gpu_error.restart, arg);
		queue_work(i915->wq, &arg->work);
828 829 830 831 832
	}
}

static void nop_submit_request(struct i915_request *request)
{
833
	struct intel_engine_cs *engine = request->engine;
834 835 836
	unsigned long flags;

	GEM_TRACE("%s fence %llx:%lld -> -EIO\n",
837
		  engine->name, request->fence.context, request->fence.seqno);
838 839
	dma_fence_set_error(&request->fence, -EIO);

840
	spin_lock_irqsave(&engine->timeline.lock, flags);
841
	__i915_request_submit(request);
842
	i915_request_mark_complete(request);
843 844 845
	spin_unlock_irqrestore(&engine->timeline.lock, flags);

	intel_engine_queue_breadcrumbs(engine);
846 847
}

848
static void __i915_gem_set_wedged(struct drm_i915_private *i915)
849 850 851 852 853
{
	struct i915_gpu_error *error = &i915->gpu_error;
	struct intel_engine_cs *engine;
	enum intel_engine_id id;

854
	if (test_bit(I915_WEDGED, &error->flags))
855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870
		return;

	if (GEM_SHOW_DEBUG() && !intel_engines_are_idle(i915)) {
		struct drm_printer p = drm_debug_printer(__func__);

		for_each_engine(engine, i915, id)
			intel_engine_dump(engine, &p, "%s\n", engine->name);
	}

	GEM_TRACE("start\n");

	/*
	 * First, stop submission to hw, but do not yet complete requests by
	 * rolling the global seqno forward (since this would complete requests
	 * for which we haven't set the fence error to EIO yet).
	 */
871
	reset_prepare(i915);
872

873
	/* Even if the GPU reset fails, it should still stop the engines */
874
	if (!INTEL_INFO(i915)->gpu_reset_clobbers_display)
875 876 877 878 879 880 881 882 883 884 885 886 887
		intel_gpu_reset(i915, ALL_ENGINES);

	for_each_engine(engine, i915, id) {
		engine->submit_request = nop_submit_request;
		engine->schedule = NULL;
	}
	i915->caps.scheduler = 0;

	/*
	 * Make sure no request can slip through without getting completed by
	 * either this call here to intel_engine_write_global_seqno, or the one
	 * in nop_submit_request.
	 */
C
Chris Wilson 已提交
888
	synchronize_rcu_expedited();
889 890 891 892 893

	/* Mark all executing requests as skipped */
	for_each_engine(engine, i915, id)
		engine->cancel_requests(engine);

894
	reset_finish(i915);
895 896 897 898 899

	smp_mb__before_atomic();
	set_bit(I915_WEDGED, &error->flags);

	GEM_TRACE("end\n");
900
}
901

902 903 904
void i915_gem_set_wedged(struct drm_i915_private *i915)
{
	struct i915_gpu_error *error = &i915->gpu_error;
905
	intel_wakeref_t wakeref;
906 907

	mutex_lock(&error->wedge_mutex);
908 909
	with_intel_runtime_pm(i915, wakeref)
		__i915_gem_set_wedged(i915);
910
	mutex_unlock(&error->wedge_mutex);
911 912
}

913
static bool __i915_gem_unset_wedged(struct drm_i915_private *i915)
914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935
{
	struct i915_gpu_error *error = &i915->gpu_error;
	struct i915_timeline *tl;

	if (!test_bit(I915_WEDGED, &error->flags))
		return true;

	if (!i915->gt.scratch) /* Never full initialised, recovery impossible */
		return false;

	GEM_TRACE("start\n");

	/*
	 * Before unwedging, make sure that all pending operations
	 * are flushed and errored out - we may have requests waiting upon
	 * third party fences. We marked all inflight requests as EIO, and
	 * every execbuf since returned EIO, for consistency we want all
	 * the currently pending requests to also be marked as EIO, which
	 * is done inside our nop_submit_request - and so we must wait.
	 *
	 * No more can be submitted until we reset the wedged bit.
	 */
936
	mutex_lock(&i915->gt.timelines.mutex);
C
Chris Wilson 已提交
937
	list_for_each_entry(tl, &i915->gt.timelines.active_list, link) {
938 939
		struct i915_request *rq;

940
		rq = i915_active_request_get_unlocked(&tl->last_request);
941 942 943 944
		if (!rq)
			continue;

		/*
945 946 947 948 949
		 * All internal dependencies (i915_requests) will have
		 * been flushed by the set-wedge, but we may be stuck waiting
		 * for external fences. These should all be capped to 10s
		 * (I915_FENCE_TIMEOUT) so this wait should not be unbounded
		 * in the worst case.
950
		 */
951
		dma_fence_default_wait(&rq->fence, false, MAX_SCHEDULE_TIMEOUT);
952
		i915_request_put(rq);
953
	}
954
	mutex_unlock(&i915->gt.timelines.mutex);
955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972

	intel_engines_sanitize(i915, false);

	/*
	 * Undo nop_submit_request. We prevent all new i915 requests from
	 * being queued (by disallowing execbuf whilst wedged) so having
	 * waited for all active requests above, we know the system is idle
	 * and do not have to worry about a thread being inside
	 * engine->submit_request() as we swap over. So unlike installing
	 * the nop_submit_request on reset, we can do this from normal
	 * context and do not require stop_machine().
	 */
	intel_engines_reset_default_submission(i915);

	GEM_TRACE("end\n");

	smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
	clear_bit(I915_WEDGED, &i915->gpu_error.flags);
973 974

	return true;
975 976
}

977 978 979 980 981 982 983 984 985 986 987 988
bool i915_gem_unset_wedged(struct drm_i915_private *i915)
{
	struct i915_gpu_error *error = &i915->gpu_error;
	bool result;

	mutex_lock(&error->wedge_mutex);
	result = __i915_gem_unset_wedged(i915);
	mutex_unlock(&error->wedge_mutex);

	return result;
}

989 990
static int do_reset(struct drm_i915_private *i915,
		    intel_engine_mask_t stalled_mask)
991 992 993
{
	int err, i;

994 995
	gt_revoke(i915);

996
	err = intel_gpu_reset(i915, ALL_ENGINES);
997
	for (i = 0; err && i < RESET_MAX_RETRIES; i++) {
998 999
		msleep(10 * (i + 1));
		err = intel_gpu_reset(i915, ALL_ENGINES);
1000
	}
1001 1002
	if (err)
		return err;
1003

1004
	return gt_reset(i915, stalled_mask);
1005 1006
}

1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024
/**
 * i915_reset - reset chip after a hang
 * @i915: #drm_i915_private to reset
 * @stalled_mask: mask of the stalled engines with the guilty requests
 * @reason: user error message for why we are resetting
 *
 * Reset the chip.  Useful if a hang is detected. Marks the device as wedged
 * on failure.
 *
 * Procedure is fairly simple:
 *   - reset the chip using the reset reg
 *   - re-init context state
 *   - re-init hardware status page
 *   - re-init ring buffer
 *   - re-init interrupt state
 *   - re-init display
 */
void i915_reset(struct drm_i915_private *i915,
1025
		intel_engine_mask_t stalled_mask,
1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037
		const char *reason)
{
	struct i915_gpu_error *error = &i915->gpu_error;
	int ret;

	GEM_TRACE("flags=%lx\n", error->flags);

	might_sleep();
	assert_rpm_wakelock_held(i915);
	GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags));

	/* Clear any previous failed attempts at recovery. Time to try again. */
1038
	if (!__i915_gem_unset_wedged(i915))
1039
		return;
1040 1041 1042 1043 1044

	if (reason)
		dev_notice(i915->drm.dev, "Resetting chip for %s\n", reason);
	error->reset_count++;

1045
	reset_prepare(i915);
1046 1047 1048 1049 1050 1051 1052 1053 1054

	if (!intel_has_gpu_reset(i915)) {
		if (i915_modparams.reset)
			dev_err(i915->drm.dev, "GPU reset not supported\n");
		else
			DRM_DEBUG_DRIVER("GPU reset disabled\n");
		goto error;
	}

1055 1056 1057
	if (INTEL_INFO(i915)->gpu_reset_clobbers_display)
		intel_runtime_pm_disable_interrupts(i915);

1058
	if (do_reset(i915, stalled_mask)) {
1059 1060 1061 1062
		dev_err(i915->drm.dev, "Failed to reset chip\n");
		goto taint;
	}

1063 1064 1065
	if (INTEL_INFO(i915)->gpu_reset_clobbers_display)
		intel_runtime_pm_enable_interrupts(i915);

1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086
	intel_overlay_reset(i915);

	/*
	 * Next we need to restore the context, but we don't use those
	 * yet either...
	 *
	 * Ring buffer needs to be re-initialized in the KMS case, or if X
	 * was running at the time of the reset (i.e. we weren't VT
	 * switched away).
	 */
	ret = i915_gem_init_hw(i915);
	if (ret) {
		DRM_ERROR("Failed to initialise HW following reset (%d)\n",
			  ret);
		goto error;
	}

	i915_queue_hangcheck(i915);

finish:
	reset_finish(i915);
1087
	if (!__i915_wedged(error))
1088
		reset_restart(i915);
1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105
	return;

taint:
	/*
	 * History tells us that if we cannot reset the GPU now, we
	 * never will. This then impacts everything that is run
	 * subsequently. On failing the reset, we mark the driver
	 * as wedged, preventing further execution on the GPU.
	 * We also want to go one step further and add a taint to the
	 * kernel so that any subsequent faults can be traced back to
	 * this failure. This is important for CI, where if the
	 * GPU/driver fails we would like to reboot and restart testing
	 * rather than continue on into oblivion. For everyone else,
	 * the system should still plod along, but they have been warned!
	 */
	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
error:
1106
	__i915_gem_set_wedged(i915);
1107 1108 1109 1110 1111 1112
	goto finish;
}

static inline int intel_gt_reset_engine(struct drm_i915_private *i915,
					struct intel_engine_cs *engine)
{
1113
	return intel_gpu_reset(i915, engine->mask);
1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136
}

/**
 * i915_reset_engine - reset GPU engine to recover from a hang
 * @engine: engine to reset
 * @msg: reason for GPU reset; or NULL for no dev_notice()
 *
 * Reset a specific GPU engine. Useful if a hang is detected.
 * Returns zero on successful reset or otherwise an error code.
 *
 * Procedure is:
 *  - identifies the request that caused the hang and it is dropped
 *  - reset engine (which will force the engine to idle)
 *  - re-init/configure engine
 */
int i915_reset_engine(struct intel_engine_cs *engine, const char *msg)
{
	struct i915_gpu_error *error = &engine->i915->gpu_error;
	int ret;

	GEM_TRACE("%s flags=%lx\n", engine->name, error->flags);
	GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags));

1137
	reset_prepare_engine(engine);
1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160

	if (msg)
		dev_notice(engine->i915->drm.dev,
			   "Resetting %s for %s\n", engine->name, msg);
	error->reset_engine_count[engine->id]++;

	if (!engine->i915->guc.execbuf_client)
		ret = intel_gt_reset_engine(engine->i915, engine);
	else
		ret = intel_guc_reset_engine(&engine->i915->guc, engine);
	if (ret) {
		/* If we fail here, we expect to fallback to a global reset */
		DRM_DEBUG_DRIVER("%sFailed to reset %s, ret=%d\n",
				 engine->i915->guc.execbuf_client ? "GuC " : "",
				 engine->name, ret);
		goto out;
	}

	/*
	 * The request that caused the hang is stuck on elsp, we know the
	 * active request and can drop it, adjust head to skip the offending
	 * request to resume executing remaining requests in the queue.
	 */
1161
	intel_engine_reset(engine, true);
1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197

	/*
	 * The engine and its registers (and workarounds in case of render)
	 * have been reset to their default values. Follow the init_ring
	 * process to program RING_MODE, HWSP and re-enable submission.
	 */
	ret = engine->init_hw(engine);
	if (ret)
		goto out;

out:
	intel_engine_cancel_stop_cs(engine);
	reset_finish_engine(engine);
	return ret;
}

static void i915_reset_device(struct drm_i915_private *i915,
			      u32 engine_mask,
			      const char *reason)
{
	struct i915_gpu_error *error = &i915->gpu_error;
	struct kobject *kobj = &i915->drm.primary->kdev->kobj;
	char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
	char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
	char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
	struct i915_wedge_me w;

	kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);

	DRM_DEBUG_DRIVER("resetting chip\n");
	kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);

	/* Use a watchdog to ensure that our reset completes */
	i915_wedge_on_timeout(&w, i915, 5 * HZ) {
		intel_prepare_reset(i915);

1198
		/* Flush everyone using a resource about to be clobbered */
1199
		synchronize_srcu_expedited(&error->reset_backoff_srcu);
1200

1201
		mutex_lock(&error->wedge_mutex);
1202
		i915_reset(i915, engine_mask, reason);
1203
		mutex_unlock(&error->wedge_mutex);
1204 1205 1206 1207 1208 1209 1210 1211

		intel_finish_reset(i915);
	}

	if (!test_bit(I915_WEDGED, &error->flags))
		kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
}

1212
static void clear_register(struct intel_uncore *uncore, i915_reg_t reg)
1213
{
1214
	intel_uncore_rmw(uncore, reg, 0, 0);
1215 1216
}

1217
void i915_clear_error_registers(struct drm_i915_private *i915)
1218
{
1219
	struct intel_uncore *uncore = &i915->uncore;
1220 1221
	u32 eir;

1222 1223
	if (!IS_GEN(i915, 2))
		clear_register(uncore, PGTBL_ER);
1224

1225 1226
	if (INTEL_GEN(i915) < 4)
		clear_register(uncore, IPEIR(RENDER_RING_BASE));
1227
	else
1228
		clear_register(uncore, IPEIR_I965);
1229

1230 1231
	clear_register(uncore, EIR);
	eir = intel_uncore_read(uncore, EIR);
1232 1233 1234 1235 1236 1237
	if (eir) {
		/*
		 * some errors might have become stuck,
		 * mask them.
		 */
		DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
1238
		rmw_set(uncore, EMR, eir);
1239 1240
		intel_uncore_write(uncore, GEN2_IIR,
				   I915_MASTER_ERROR_INTERRUPT);
1241 1242
	}

1243 1244 1245 1246
	if (INTEL_GEN(i915) >= 8) {
		rmw_clear(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID);
		intel_uncore_posting_read(uncore, GEN8_RING_FAULT_REG);
	} else if (INTEL_GEN(i915) >= 6) {
1247 1248 1249
		struct intel_engine_cs *engine;
		enum intel_engine_id id;

1250 1251 1252 1253 1254
		for_each_engine(engine, i915, id) {
			rmw_clear(uncore,
				  RING_FAULT_REG(engine), RING_FAULT_VALID);
			intel_uncore_posting_read(uncore,
						  RING_FAULT_REG(engine));
1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272
		}
	}
}

/**
 * i915_handle_error - handle a gpu error
 * @i915: i915 device private
 * @engine_mask: mask representing engines that are hung
 * @flags: control flags
 * @fmt: Error message format string
 *
 * Do some basic checking of register state at error time and
 * dump it to the syslog.  Also call i915_capture_error_state() to make
 * sure we get a record and make it available in debugfs.  Fire a uevent
 * so userspace knows something bad happened (should trigger collection
 * of a ring dump etc.).
 */
void i915_handle_error(struct drm_i915_private *i915,
1273
		       intel_engine_mask_t engine_mask,
1274 1275 1276
		       unsigned long flags,
		       const char *fmt, ...)
{
1277
	struct i915_gpu_error *error = &i915->gpu_error;
1278 1279
	struct intel_engine_cs *engine;
	intel_wakeref_t wakeref;
1280
	intel_engine_mask_t tmp;
1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302
	char error_msg[80];
	char *msg = NULL;

	if (fmt) {
		va_list args;

		va_start(args, fmt);
		vscnprintf(error_msg, sizeof(error_msg), fmt, args);
		va_end(args);

		msg = error_msg;
	}

	/*
	 * In most cases it's guaranteed that we get here with an RPM
	 * reference held, for example because there is a pending GPU
	 * request that won't finish until the reset is done. This
	 * isn't the case at least when we get here by doing a
	 * simulated reset via debugfs, so get an RPM reference.
	 */
	wakeref = intel_runtime_pm_get(i915);

1303
	engine_mask &= INTEL_INFO(i915)->engine_mask;
1304 1305 1306 1307 1308 1309 1310 1311 1312 1313

	if (flags & I915_ERROR_CAPTURE) {
		i915_capture_error_state(i915, engine_mask, msg);
		i915_clear_error_registers(i915);
	}

	/*
	 * Try engine reset when available. We fall back to full reset if
	 * single reset fails.
	 */
1314
	if (intel_has_reset_engine(i915) && !__i915_wedged(error)) {
1315 1316 1317
		for_each_engine_masked(engine, i915, engine_mask, tmp) {
			BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
			if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
1318
					     &error->flags))
1319 1320 1321
				continue;

			if (i915_reset_engine(engine, msg) == 0)
1322
				engine_mask &= ~engine->mask;
1323 1324

			clear_bit(I915_RESET_ENGINE + engine->id,
1325 1326
				  &error->flags);
			wake_up_bit(&error->flags,
1327 1328 1329 1330 1331 1332 1333 1334
				    I915_RESET_ENGINE + engine->id);
		}
	}

	if (!engine_mask)
		goto out;

	/* Full reset needs the mutex, stop any other user trying to do so. */
1335 1336 1337
	if (test_and_set_bit(I915_RESET_BACKOFF, &error->flags)) {
		wait_event(error->reset_queue,
			   !test_bit(I915_RESET_BACKOFF, &error->flags));
1338
		goto out; /* piggy-back on the other reset */
1339 1340
	}

1341 1342 1343
	/* Make sure i915_reset_trylock() sees the I915_RESET_BACKOFF */
	synchronize_rcu_expedited();

1344 1345 1346
	/* Prevent any other reset-engine attempt. */
	for_each_engine(engine, i915, tmp) {
		while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
1347 1348
					&error->flags))
			wait_on_bit(&error->flags,
1349 1350 1351 1352 1353 1354 1355 1356
				    I915_RESET_ENGINE + engine->id,
				    TASK_UNINTERRUPTIBLE);
	}

	i915_reset_device(i915, engine_mask, msg);

	for_each_engine(engine, i915, tmp) {
		clear_bit(I915_RESET_ENGINE + engine->id,
1357
			  &error->flags);
1358 1359
	}

1360 1361
	clear_bit(I915_RESET_BACKOFF, &error->flags);
	wake_up_all(&error->reset_queue);
1362 1363 1364 1365 1366

out:
	intel_runtime_pm_put(i915, wakeref);
}

1367 1368 1369 1370 1371
int i915_reset_trylock(struct drm_i915_private *i915)
{
	struct i915_gpu_error *error = &i915->gpu_error;
	int srcu;

1372 1373 1374
	might_lock(&error->reset_backoff_srcu);
	might_sleep();

1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399
	rcu_read_lock();
	while (test_bit(I915_RESET_BACKOFF, &error->flags)) {
		rcu_read_unlock();

		if (wait_event_interruptible(error->reset_queue,
					     !test_bit(I915_RESET_BACKOFF,
						       &error->flags)))
			return -EINTR;

		rcu_read_lock();
	}
	srcu = srcu_read_lock(&error->reset_backoff_srcu);
	rcu_read_unlock();

	return srcu;
}

void i915_reset_unlock(struct drm_i915_private *i915, int tag)
__releases(&i915->gpu_error.reset_backoff_srcu)
{
	struct i915_gpu_error *error = &i915->gpu_error;

	srcu_read_unlock(&error->reset_backoff_srcu, tag);
}

1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424
int i915_terminally_wedged(struct drm_i915_private *i915)
{
	struct i915_gpu_error *error = &i915->gpu_error;

	might_sleep();

	if (!__i915_wedged(error))
		return 0;

	/* Reset still in progress? Maybe we will recover? */
	if (!test_bit(I915_RESET_BACKOFF, &error->flags))
		return -EIO;

	/* XXX intel_reset_finish() still takes struct_mutex!!! */
	if (mutex_is_locked(&i915->drm.struct_mutex))
		return -EAGAIN;

	if (wait_event_interruptible(error->reset_queue,
				     !test_bit(I915_RESET_BACKOFF,
					       &error->flags)))
		return -EINTR;

	return __i915_wedged(error) ? -EIO : 0;
}

1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443
bool i915_reset_flush(struct drm_i915_private *i915)
{
	int err;

	cancel_delayed_work_sync(&i915->gpu_error.hangcheck_work);

	flush_workqueue(i915->wq);
	GEM_BUG_ON(READ_ONCE(i915->gpu_error.restart));

	mutex_lock(&i915->drm.struct_mutex);
	err = i915_gem_wait_for_idle(i915,
				     I915_WAIT_LOCKED |
				     I915_WAIT_FOR_IDLE_BOOST,
				     MAX_SCHEDULE_TIMEOUT);
	mutex_unlock(&i915->drm.struct_mutex);

	return !err;
}

1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471
static void i915_wedge_me(struct work_struct *work)
{
	struct i915_wedge_me *w = container_of(work, typeof(*w), work.work);

	dev_err(w->i915->drm.dev,
		"%s timed out, cancelling all in-flight rendering.\n",
		w->name);
	i915_gem_set_wedged(w->i915);
}

void __i915_init_wedge(struct i915_wedge_me *w,
		       struct drm_i915_private *i915,
		       long timeout,
		       const char *name)
{
	w->i915 = i915;
	w->name = name;

	INIT_DELAYED_WORK_ONSTACK(&w->work, i915_wedge_me);
	schedule_delayed_work(&w->work, timeout);
}

void __i915_fini_wedge(struct i915_wedge_me *w)
{
	cancel_delayed_work_sync(&w->work);
	destroy_delayed_work_on_stack(&w->work);
	w->i915 = NULL;
}