intel_gt.c 24.8 KB
Newer Older
1 2 3 4 5
// SPDX-License-Identifier: MIT
/*
 * Copyright © 2019 Intel Corporation
 */

A
Andi Shyti 已提交
6
#include <drm/drm_managed.h>
7
#include <drm/intel-gtt.h>
A
Andi Shyti 已提交
8

9
#include "intel_gt_debugfs.h"
10 11

#include "gem/i915_gem_lmem.h"
12
#include "i915_drv.h"
13
#include "intel_context.h"
14
#include "intel_gt.h"
15
#include "intel_gt_buffer_pool.h"
16
#include "intel_gt_clock_utils.h"
17
#include "intel_gt_pm.h"
18
#include "intel_gt_requests.h"
19
#include "intel_migrate.h"
20
#include "intel_mocs.h"
21
#include "intel_pm.h"
22
#include "intel_rc6.h"
23
#include "intel_renderstate.h"
24
#include "intel_rps.h"
25
#include "intel_uncore.h"
26
#include "shmem_utils.h"
27
#include "pxp/intel_pxp.h"
28

29
void __intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
30
{
31 32
	spin_lock_init(&gt->irq_lock);

33 34
	mutex_init(&gt->tlb_invalidate_lock);

35
	INIT_LIST_HEAD(&gt->closed_vma);
36
	spin_lock_init(&gt->closed_lock);
37

38 39 40
	init_llist_head(&gt->watchdog.list);
	INIT_WORK(&gt->watchdog.work, intel_gt_watchdog_work);

41
	intel_gt_init_buffer_pool(gt);
42
	intel_gt_init_reset(gt);
43
	intel_gt_init_requests(gt);
44
	intel_gt_init_timelines(gt);
45
	intel_gt_pm_init_early(gt);
46

47
	intel_uc_init_early(&gt->uc);
48
	intel_rps_init_early(&gt->rps);
49
}
50

51 52 53 54 55 56
void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
{
	gt->i915 = i915;
	gt->uncore = &i915->uncore;
}

57 58 59 60 61 62 63
int intel_gt_probe_lmem(struct intel_gt *gt)
{
	struct drm_i915_private *i915 = gt->i915;
	struct intel_memory_region *mem;
	int id;
	int err;

M
Matthew Auld 已提交
64
	mem = intel_gt_setup_lmem(gt);
65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
	if (IS_ERR(mem)) {
		err = PTR_ERR(mem);
		if (err == -ENODEV)
			return 0;

		drm_err(&i915->drm,
			"Failed to setup region(%d) type=%d\n",
			err, INTEL_MEMORY_LOCAL);
		return err;
	}

	id = INTEL_REGION_LMEM;

	mem->id = id;

80 81
	intel_memory_region_set_name(mem, "local%u", mem->instance);

82 83 84 85 86 87 88
	GEM_BUG_ON(!HAS_REGION(i915, id));
	GEM_BUG_ON(i915->mm.regions[id]);
	i915->mm.regions[id] = mem;

	return 0;
}

A
Andi Shyti 已提交
89
int intel_gt_assign_ggtt(struct intel_gt *gt)
90
{
A
Andi Shyti 已提交
91 92 93
	gt->ggtt = drmm_kzalloc(&gt->i915->drm, sizeof(*gt->ggtt), GFP_KERNEL);

	return gt->ggtt ? 0 : -ENOMEM;
94 95
}

96 97 98 99 100
static const struct intel_mmio_range icl_l3bank_steering_table[] = {
	{ 0x00B100, 0x00B3FF },
	{},
};

101 102 103 104 105 106 107 108 109 110 111 112 113 114
static const struct intel_mmio_range xehpsdv_mslice_steering_table[] = {
	{ 0x004000, 0x004AFF },
	{ 0x00C800, 0x00CFFF },
	{ 0x00DD00, 0x00DDFF },
	{ 0x00E900, 0x00FFFF }, /* 0xEA00 - OxEFFF is unused */
	{},
};

static const struct intel_mmio_range xehpsdv_lncf_steering_table[] = {
	{ 0x00B000, 0x00B0FF },
	{ 0x00D800, 0x00D8FF },
	{},
};

115 116 117 118 119 120
static const struct intel_mmio_range dg2_lncf_steering_table[] = {
	{ 0x00B000, 0x00B0FF },
	{ 0x00D880, 0x00D8FF },
	{},
};

121 122 123 124 125 126 127
static u16 slicemask(struct intel_gt *gt, int count)
{
	u64 dss_mask = intel_sseu_get_subslices(&gt->info.sseu, 0);

	return intel_slicemask_from_dssmask(dss_mask, count);
}

128 129
int intel_gt_init_mmio(struct intel_gt *gt)
{
130 131
	struct drm_i915_private *i915 = gt->i915;

132 133
	intel_gt_init_clock_frequency(gt);

134
	intel_uc_init_mmio(&gt->uc);
135
	intel_sseu_info_init(gt);
136

137 138 139 140 141 142 143 144 145 146
	/*
	 * An mslice is unavailable only if both the meml3 for the slice is
	 * disabled *and* all of the DSS in the slice (quadrant) are disabled.
	 */
	if (HAS_MSLICES(i915))
		gt->info.mslice_mask =
			slicemask(gt, GEN_DSS_PER_MSLICE) |
			(intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3) &
			 GEN12_MEML3_EN_MASK);

147 148 149 150
	if (IS_DG2(i915)) {
		gt->steering_table[MSLICE] = xehpsdv_mslice_steering_table;
		gt->steering_table[LNCF] = dg2_lncf_steering_table;
	} else if (IS_XEHPSDV(i915)) {
151 152 153
		gt->steering_table[MSLICE] = xehpsdv_mslice_steering_table;
		gt->steering_table[LNCF] = xehpsdv_lncf_steering_table;
	} else if (GRAPHICS_VER(i915) >= 11 &&
154
		   GRAPHICS_VER_FULL(i915) < IP_VER(12, 50)) {
155 156 157 158
		gt->steering_table[L3BANK] = icl_l3bank_steering_table;
		gt->info.l3bank_mask =
			~intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3) &
			GEN10_L3BANK_MASK;
159 160
	} else if (HAS_MSLICES(i915)) {
		MISSING_CASE(INTEL_INFO(i915)->platform);
161 162
	}

163 164 165
	return intel_engines_init_mmio(gt);
}

166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185
static void init_unused_ring(struct intel_gt *gt, u32 base)
{
	struct intel_uncore *uncore = gt->uncore;

	intel_uncore_write(uncore, RING_CTL(base), 0);
	intel_uncore_write(uncore, RING_HEAD(base), 0);
	intel_uncore_write(uncore, RING_TAIL(base), 0);
	intel_uncore_write(uncore, RING_START(base), 0);
}

static void init_unused_rings(struct intel_gt *gt)
{
	struct drm_i915_private *i915 = gt->i915;

	if (IS_I830(i915)) {
		init_unused_ring(gt, PRB1_BASE);
		init_unused_ring(gt, SRB0_BASE);
		init_unused_ring(gt, SRB1_BASE);
		init_unused_ring(gt, SRB2_BASE);
		init_unused_ring(gt, SRB3_BASE);
186
	} else if (GRAPHICS_VER(i915) == 2) {
187 188
		init_unused_ring(gt, SRB0_BASE);
		init_unused_ring(gt, SRB1_BASE);
189
	} else if (GRAPHICS_VER(i915) == 3) {
190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205
		init_unused_ring(gt, PRB1_BASE);
		init_unused_ring(gt, PRB2_BASE);
	}
}

int intel_gt_init_hw(struct intel_gt *gt)
{
	struct drm_i915_private *i915 = gt->i915;
	struct intel_uncore *uncore = gt->uncore;
	int ret;

	gt->last_init_time = ktime_get();

	/* Double layer security blanket, see i915_gem_init() */
	intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);

206
	if (HAS_EDRAM(i915) && GRAPHICS_VER(i915) < 9)
207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249
		intel_uncore_rmw(uncore, HSW_IDICR, 0, IDIHASHMSK(0xf));

	if (IS_HASWELL(i915))
		intel_uncore_write(uncore,
				   MI_PREDICATE_RESULT_2,
				   IS_HSW_GT3(i915) ?
				   LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);

	/* Apply the GT workarounds... */
	intel_gt_apply_workarounds(gt);
	/* ...and determine whether they are sticking. */
	intel_gt_verify_workarounds(gt, "init");

	intel_gt_init_swizzling(gt);

	/*
	 * At least 830 can leave some of the unused rings
	 * "active" (ie. head != tail) after resume which
	 * will prevent c3 entry. Makes sure all unused rings
	 * are totally idle.
	 */
	init_unused_rings(gt);

	ret = i915_ppgtt_init_hw(gt);
	if (ret) {
		DRM_ERROR("Enabling PPGTT failed (%d)\n", ret);
		goto out;
	}

	/* We can't enable contexts until all firmware is loaded */
	ret = intel_uc_init_hw(&gt->uc);
	if (ret) {
		i915_probe_error(i915, "Enabling uc failed (%d)\n", ret);
		goto out;
	}

	intel_mocs_init(gt);

out:
	intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
	return ret;
}

250 251 252 253 254 255 256 257 258 259 260 261 262 263 264
static void rmw_set(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
{
	intel_uncore_rmw(uncore, reg, 0, set);
}

static void rmw_clear(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
{
	intel_uncore_rmw(uncore, reg, clr, 0);
}

static void clear_register(struct intel_uncore *uncore, i915_reg_t reg)
{
	intel_uncore_rmw(uncore, reg, 0, 0);
}

265
static void gen6_clear_engine_error_register(struct intel_engine_cs *engine)
266 267 268 269 270 271 272 273 274 275 276 277 278
{
	GEN6_RING_FAULT_REG_RMW(engine, RING_FAULT_VALID, 0);
	GEN6_RING_FAULT_REG_POSTING_READ(engine);
}

void
intel_gt_clear_error_registers(struct intel_gt *gt,
			       intel_engine_mask_t engine_mask)
{
	struct drm_i915_private *i915 = gt->i915;
	struct intel_uncore *uncore = gt->uncore;
	u32 eir;

279
	if (GRAPHICS_VER(i915) != 2)
280 281
		clear_register(uncore, PGTBL_ER);

282
	if (GRAPHICS_VER(i915) < 4)
283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299
		clear_register(uncore, IPEIR(RENDER_RING_BASE));
	else
		clear_register(uncore, IPEIR_I965);

	clear_register(uncore, EIR);
	eir = intel_uncore_read(uncore, EIR);
	if (eir) {
		/*
		 * some errors might have become stuck,
		 * mask them.
		 */
		DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
		rmw_set(uncore, EMR, eir);
		intel_uncore_write(uncore, GEN2_IIR,
				   I915_MASTER_ERROR_INTERRUPT);
	}

300
	if (GRAPHICS_VER(i915) >= 12) {
301 302
		rmw_clear(uncore, GEN12_RING_FAULT_REG, RING_FAULT_VALID);
		intel_uncore_posting_read(uncore, GEN12_RING_FAULT_REG);
303
	} else if (GRAPHICS_VER(i915) >= 8) {
304 305
		rmw_clear(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID);
		intel_uncore_posting_read(uncore, GEN8_RING_FAULT_REG);
306
	} else if (GRAPHICS_VER(i915) >= 6) {
307 308 309
		struct intel_engine_cs *engine;
		enum intel_engine_id id;

310
		for_each_engine_masked(engine, gt, engine_mask, id)
311
			gen6_clear_engine_error_register(engine);
312 313 314 315 316 317 318 319 320
	}
}

static void gen6_check_faults(struct intel_gt *gt)
{
	struct intel_engine_cs *engine;
	enum intel_engine_id id;
	u32 fault;

321
	for_each_engine(engine, gt, id) {
322 323
		fault = GEN6_RING_FAULT_REG_READ(engine);
		if (fault & RING_FAULT_VALID) {
324 325 326 327 328 329 330 331 332 333
			drm_dbg(&engine->i915->drm, "Unexpected fault\n"
				"\tAddr: 0x%08lx\n"
				"\tAddress space: %s\n"
				"\tSource ID: %d\n"
				"\tType: %d\n",
				fault & PAGE_MASK,
				fault & RING_FAULT_GTTSEL_MASK ?
				"GGTT" : "PPGTT",
				RING_FAULT_SRCID(fault),
				RING_FAULT_FAULT_TYPE(fault));
334 335 336 337 338 339 340
		}
	}
}

static void gen8_check_faults(struct intel_gt *gt)
{
	struct intel_uncore *uncore = gt->uncore;
341 342 343
	i915_reg_t fault_reg, fault_data0_reg, fault_data1_reg;
	u32 fault;

344
	if (GRAPHICS_VER(gt->i915) >= 12) {
345 346 347 348 349 350 351 352
		fault_reg = GEN12_RING_FAULT_REG;
		fault_data0_reg = GEN12_FAULT_TLB_DATA0;
		fault_data1_reg = GEN12_FAULT_TLB_DATA1;
	} else {
		fault_reg = GEN8_RING_FAULT_REG;
		fault_data0_reg = GEN8_FAULT_TLB_DATA0;
		fault_data1_reg = GEN8_FAULT_TLB_DATA1;
	}
353

354
	fault = intel_uncore_read(uncore, fault_reg);
355 356 357 358
	if (fault & RING_FAULT_VALID) {
		u32 fault_data0, fault_data1;
		u64 fault_addr;

359 360 361
		fault_data0 = intel_uncore_read(uncore, fault_data0_reg);
		fault_data1 = intel_uncore_read(uncore, fault_data1_reg);

362 363 364
		fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
			     ((u64)fault_data0 << 12);

365 366 367 368 369 370 371 372 373 374 375
		drm_dbg(&uncore->i915->drm, "Unexpected fault\n"
			"\tAddr: 0x%08x_%08x\n"
			"\tAddress space: %s\n"
			"\tEngine ID: %d\n"
			"\tSource ID: %d\n"
			"\tType: %d\n",
			upper_32_bits(fault_addr), lower_32_bits(fault_addr),
			fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
			GEN8_RING_FAULT_ENGINE_ID(fault),
			RING_FAULT_SRCID(fault),
			RING_FAULT_FAULT_TYPE(fault));
376 377 378 379 380 381 382 383
	}
}

void intel_gt_check_and_clear_faults(struct intel_gt *gt)
{
	struct drm_i915_private *i915 = gt->i915;

	/* From GEN8 onwards we only have one 'All Engine Fault Register' */
384
	if (GRAPHICS_VER(i915) >= 8)
385
		gen8_check_faults(gt);
386
	else if (GRAPHICS_VER(i915) >= 6)
387 388 389 390 391 392
		gen6_check_faults(gt);
	else
		return;

	intel_gt_clear_error_registers(gt, ALL_ENGINES);
}
393 394 395

void intel_gt_flush_ggtt_writes(struct intel_gt *gt)
{
396
	struct intel_uncore *uncore = gt->uncore;
397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419
	intel_wakeref_t wakeref;

	/*
	 * No actual flushing is required for the GTT write domain for reads
	 * from the GTT domain. Writes to it "immediately" go to main memory
	 * as far as we know, so there's no chipset flush. It also doesn't
	 * land in the GPU render cache.
	 *
	 * However, we do have to enforce the order so that all writes through
	 * the GTT land before any writes to the device, such as updates to
	 * the GATT itself.
	 *
	 * We also have to wait a bit for the writes to land from the GTT.
	 * An uncached read (i.e. mmio) seems to be ideal for the round-trip
	 * timing. This issue has only been observed when switching quickly
	 * between GTT writes and CPU reads from inside the kernel on recent hw,
	 * and it appears to only affect discrete GTT blocks (i.e. on LLC
	 * system agents we cannot reproduce this behaviour, until Cannonlake
	 * that was!).
	 */

	wmb();

420
	if (INTEL_INFO(gt->i915)->has_coherent_ggtt)
421 422
		return;

423
	intel_gt_chipset_flush(gt);
424

425
	with_intel_runtime_pm_if_in_use(uncore->rpm, wakeref) {
426
		unsigned long flags;
427

428
		spin_lock_irqsave(&uncore->lock, flags);
429 430
		intel_uncore_posting_read_fw(uncore,
					     RING_HEAD(RENDER_RING_BASE));
431
		spin_unlock_irqrestore(&uncore->lock, flags);
432 433
	}
}
434 435 436 437

void intel_gt_chipset_flush(struct intel_gt *gt)
{
	wmb();
438
	if (GRAPHICS_VER(gt->i915) < 6)
439 440
		intel_gtt_chipset_flush();
}
441

442 443
void intel_gt_driver_register(struct intel_gt *gt)
{
444
	intel_rps_driver_register(&gt->rps);
445

446
	intel_gt_debugfs_register(gt);
447 448 449
}

static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
450 451 452 453 454 455
{
	struct drm_i915_private *i915 = gt->i915;
	struct drm_i915_gem_object *obj;
	struct i915_vma *vma;
	int ret;

456 457 458
	obj = i915_gem_object_create_lmem(i915, size, I915_BO_ALLOC_VOLATILE);
	if (IS_ERR(obj))
		obj = i915_gem_object_create_stolen(i915, size);
459
	if (IS_ERR(obj))
460 461
		obj = i915_gem_object_create_internal(i915, size);
	if (IS_ERR(obj)) {
462
		drm_err(&i915->drm, "Failed to allocate scratch page\n");
463 464 465 466 467 468 469 470 471
		return PTR_ERR(obj);
	}

	vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
	if (IS_ERR(vma)) {
		ret = PTR_ERR(vma);
		goto err_unref;
	}

472
	ret = i915_ggtt_pin(vma, NULL, 0, PIN_HIGH);
473 474 475
	if (ret)
		goto err_unref;

476 477
	gt->scratch = i915_vma_make_unshrinkable(vma);

478 479 480 481 482 483 484
	return 0;

err_unref:
	i915_gem_object_put(obj);
	return ret;
}

485
static void intel_gt_fini_scratch(struct intel_gt *gt)
486 487 488
{
	i915_vma_unpin_and_release(&gt->scratch, 0);
}
489

490 491 492
static struct i915_address_space *kernel_vm(struct intel_gt *gt)
{
	if (INTEL_PPGTT(gt->i915) > INTEL_PPGTT_ALIASING)
493
		return &i915_ppgtt_create(gt, I915_BO_ALLOC_PM_EARLY)->vm;
494 495 496 497
	else
		return i915_vm_get(&gt->ggtt->vm);
}

498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518
static int __engines_record_defaults(struct intel_gt *gt)
{
	struct i915_request *requests[I915_NUM_ENGINES] = {};
	struct intel_engine_cs *engine;
	enum intel_engine_id id;
	int err = 0;

	/*
	 * As we reset the gpu during very early sanitisation, the current
	 * register state on the GPU should reflect its defaults values.
	 * We load a context onto the hw (with restore-inhibit), then switch
	 * over to a second context to save that default register state. We
	 * can then prime every new context with that state so they all start
	 * from the same default HW values.
	 */

	for_each_engine(engine, gt, id) {
		struct intel_renderstate so;
		struct intel_context *ce;
		struct i915_request *rq;

519 520 521
		/* We must be able to switch to something! */
		GEM_BUG_ON(!engine->kernel_context);

522 523 524 525 526 527
		ce = intel_context_create(engine);
		if (IS_ERR(ce)) {
			err = PTR_ERR(ce);
			goto out;
		}

528 529 530 531 532
		err = intel_renderstate_init(&so, ce);
		if (err)
			goto err;

		rq = i915_request_create(ce);
533 534
		if (IS_ERR(rq)) {
			err = PTR_ERR(rq);
535
			goto err_fini;
536 537 538 539 540 541 542 543 544 545 546 547 548
		}

		err = intel_engine_emit_ctx_wa(rq);
		if (err)
			goto err_rq;

		err = intel_renderstate_emit(&so, rq);
		if (err)
			goto err_rq;

err_rq:
		requests[id] = i915_request_get(rq);
		i915_request_add(rq);
549 550 551 552 553
err_fini:
		intel_renderstate_fini(&so, ce);
err:
		if (err) {
			intel_context_put(ce);
554
			goto out;
555
		}
556 557 558 559 560 561 562 563 564 565
	}

	/* Flush the default context image to memory, and enable powersaving. */
	if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
		err = -EIO;
		goto out;
	}

	for (id = 0; id < ARRAY_SIZE(requests); id++) {
		struct i915_request *rq;
566
		struct file *state;
567 568 569 570 571

		rq = requests[id];
		if (!rq)
			continue;

572 573 574 575 576
		if (rq->fence.error) {
			err = -EIO;
			goto out;
		}

577
		GEM_BUG_ON(!test_bit(CONTEXT_ALLOC_BIT, &rq->context->flags));
578
		if (!rq->context->state)
579 580
			continue;

581 582 583 584
		/* Keep a copy of the state's backing pages; free the obj */
		state = shmem_create_from_object(rq->context->state->obj);
		if (IS_ERR(state)) {
			err = PTR_ERR(state);
585 586
			goto out;
		}
587
		rq->engine->default_state = state;
588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627
	}

out:
	/*
	 * If we have to abandon now, we expect the engines to be idle
	 * and ready to be torn-down. The quickest way we can accomplish
	 * this is by declaring ourselves wedged.
	 */
	if (err)
		intel_gt_set_wedged(gt);

	for (id = 0; id < ARRAY_SIZE(requests); id++) {
		struct intel_context *ce;
		struct i915_request *rq;

		rq = requests[id];
		if (!rq)
			continue;

		ce = rq->context;
		i915_request_put(rq);
		intel_context_put(ce);
	}
	return err;
}

static int __engines_verify_workarounds(struct intel_gt *gt)
{
	struct intel_engine_cs *engine;
	enum intel_engine_id id;
	int err = 0;

	if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
		return 0;

	for_each_engine(engine, gt, id) {
		if (intel_engine_verify_workarounds(engine, "load"))
			err = -EIO;
	}

628 629 630 631
	/* Flush and restore the kernel context for safety */
	if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME)
		err = -EIO;

632 633 634 635 636
	return err;
}

static void __intel_gt_disable(struct intel_gt *gt)
{
637
	intel_gt_set_wedged_on_fini(gt);
638 639 640 641 642 643 644

	intel_gt_suspend_prepare(gt);
	intel_gt_suspend_late(gt);

	GEM_BUG_ON(intel_gt_pm_is_awake(gt));
}

645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663
int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout)
{
	long remaining_timeout;

	/* If the device is asleep, we have no requests outstanding */
	if (!intel_gt_pm_is_awake(gt))
		return 0;

	while ((timeout = intel_gt_retire_requests_timeout(gt, timeout,
							   &remaining_timeout)) > 0) {
		cond_resched();
		if (signal_pending(current))
			return -EINTR;
	}

	return timeout ? timeout : intel_uc_wait_for_idle(&gt->uc,
							  remaining_timeout);
}

664 665 666 667
int intel_gt_init(struct intel_gt *gt)
{
	int err;

668
	err = i915_inject_probe_error(gt->i915, -ENODEV);
669 670 671
	if (err)
		return err;

672 673
	intel_gt_init_workarounds(gt);

674 675 676 677 678 679 680 681 682
	/*
	 * This is just a security blanket to placate dragons.
	 * On some systems, we very sporadically observe that the first TLBs
	 * used by the CS may be stale, despite us poking the TLB reset. If
	 * we hold the forcewake during initialisation these problems
	 * just magically go away.
	 */
	intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);

683 684
	err = intel_gt_init_scratch(gt,
				    GRAPHICS_VER(gt->i915) == 2 ? SZ_256K : SZ_4K);
685 686 687
	if (err)
		goto out_fw;

688 689
	intel_gt_pm_init(gt);

690 691 692
	gt->vm = kernel_vm(gt);
	if (!gt->vm) {
		err = -ENOMEM;
693
		goto err_pm;
694 695
	}

696
	intel_set_mocs_index(gt);
697

698 699 700 701
	err = intel_engines_init(gt);
	if (err)
		goto err_engines;

702 703 704
	err = intel_uc_init(&gt->uc);
	if (err)
		goto err_engines;
705 706 707

	err = intel_gt_resume(gt);
	if (err)
708
		goto err_uc_init;
709 710 711 712 713 714 715 716 717

	err = __engines_record_defaults(gt);
	if (err)
		goto err_gt;

	err = __engines_verify_workarounds(gt);
	if (err)
		goto err_gt;

718 719
	intel_uc_init_late(&gt->uc);

720 721 722 723
	err = i915_inject_probe_error(gt->i915, -EIO);
	if (err)
		goto err_gt;

724 725
	intel_migrate_init(&gt->migrate, gt);

726 727
	intel_pxp_init(&gt->pxp);

728 729 730 731 732 733 734 735 736 737 738
	goto out_fw;
err_gt:
	__intel_gt_disable(gt);
	intel_uc_fini_hw(&gt->uc);
err_uc_init:
	intel_uc_fini(&gt->uc);
err_engines:
	intel_engines_release(gt);
	i915_vm_put(fetch_and_zero(&gt->vm));
err_pm:
	intel_gt_pm_fini(gt);
739
	intel_gt_fini_scratch(gt);
740 741 742 743
out_fw:
	if (err)
		intel_gt_set_wedged_on_init(gt);
	intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
744
	return err;
745 746 747 748
}

void intel_gt_driver_remove(struct intel_gt *gt)
{
749 750
	__intel_gt_disable(gt);

751
	intel_migrate_fini(&gt->migrate);
752
	intel_uc_driver_remove(&gt->uc);
753 754

	intel_engines_release(gt);
755 756

	intel_gt_flush_buffer_pool(gt);
757 758 759 760
}

void intel_gt_driver_unregister(struct intel_gt *gt)
{
761 762
	intel_wakeref_t wakeref;

763
	intel_rps_driver_unregister(&gt->rps);
764

765 766
	intel_pxp_fini(&gt->pxp);

767 768 769 770 771
	/*
	 * Upon unregistering the device to prevent any new users, cancel
	 * all in-flight requests so that we can quickly unbind the active
	 * resources.
	 */
772
	intel_gt_set_wedged_on_fini(gt);
773 774 775 776

	/* Scrub all HW state upon release */
	with_intel_runtime_pm(gt->uncore->rpm, wakeref)
		__intel_gt_reset(gt, ALL_ENGINES);
777 778 779 780
}

void intel_gt_driver_release(struct intel_gt *gt)
{
781 782 783 784 785 786
	struct i915_address_space *vm;

	vm = fetch_and_zero(&gt->vm);
	if (vm) /* FIXME being called twice on error paths :( */
		i915_vm_put(vm);

787
	intel_wa_list_free(&gt->wa_list);
788
	intel_gt_pm_fini(gt);
789
	intel_gt_fini_scratch(gt);
790
	intel_gt_fini_buffer_pool(gt);
791 792
}

793
void intel_gt_driver_late_release(struct intel_gt *gt)
794
{
795 796 797
	/* We need to wait for inflight RCU frees to release their grip */
	rcu_barrier();

798
	intel_uc_driver_late_release(&gt->uc);
799
	intel_gt_fini_requests(gt);
800
	intel_gt_fini_reset(gt);
801
	intel_gt_fini_timelines(gt);
802
	intel_engines_free(gt);
803
}
804

805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851
/**
 * intel_gt_reg_needs_read_steering - determine whether a register read
 *     requires explicit steering
 * @gt: GT structure
 * @reg: the register to check steering requirements for
 * @type: type of multicast steering to check
 *
 * Determines whether @reg needs explicit steering of a specific type for
 * reads.
 *
 * Returns false if @reg does not belong to a register range of the given
 * steering type, or if the default (subslice-based) steering IDs are suitable
 * for @type steering too.
 */
static bool intel_gt_reg_needs_read_steering(struct intel_gt *gt,
					     i915_reg_t reg,
					     enum intel_steering_type type)
{
	const u32 offset = i915_mmio_reg_offset(reg);
	const struct intel_mmio_range *entry;

	if (likely(!intel_gt_needs_read_steering(gt, type)))
		return false;

	for (entry = gt->steering_table[type]; entry->end; entry++) {
		if (offset >= entry->start && offset <= entry->end)
			return true;
	}

	return false;
}

/**
 * intel_gt_get_valid_steering - determines valid IDs for a class of MCR steering
 * @gt: GT structure
 * @type: multicast register type
 * @sliceid: Slice ID returned
 * @subsliceid: Subslice ID returned
 *
 * Determines sliceid and subsliceid values that will steer reads
 * of a specific multicast register class to a valid value.
 */
static void intel_gt_get_valid_steering(struct intel_gt *gt,
					enum intel_steering_type type,
					u8 *sliceid, u8 *subsliceid)
{
	switch (type) {
852 853 854 855 856 857
	case L3BANK:
		GEM_DEBUG_WARN_ON(!gt->info.l3bank_mask); /* should be impossible! */

		*sliceid = 0;		/* unused */
		*subsliceid = __ffs(gt->info.l3bank_mask);
		break;
858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873
	case MSLICE:
		GEM_DEBUG_WARN_ON(!gt->info.mslice_mask); /* should be impossible! */

		*sliceid = __ffs(gt->info.mslice_mask);
		*subsliceid = 0;	/* unused */
		break;
	case LNCF:
		GEM_DEBUG_WARN_ON(!gt->info.mslice_mask); /* should be impossible! */

		/*
		 * An LNCF is always present if its mslice is present, so we
		 * can safely just steer to LNCF 0 in all cases.
		 */
		*sliceid = __ffs(gt->info.mslice_mask) << 1;
		*subsliceid = 0;	/* unused */
		break;
874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910
	default:
		MISSING_CASE(type);
		*sliceid = 0;
		*subsliceid = 0;
	}
}

/**
 * intel_gt_read_register_fw - reads a GT register with support for multicast
 * @gt: GT structure
 * @reg: register to read
 *
 * This function will read a GT register.  If the register is a multicast
 * register, the read will be steered to a valid instance (i.e., one that
 * isn't fused off or powered down by power gating).
 *
 * Returns the value from a valid instance of @reg.
 */
u32 intel_gt_read_register_fw(struct intel_gt *gt, i915_reg_t reg)
{
	int type;
	u8 sliceid, subsliceid;

	for (type = 0; type < NUM_STEERING_TYPES; type++) {
		if (intel_gt_reg_needs_read_steering(gt, reg, type)) {
			intel_gt_get_valid_steering(gt, type, &sliceid,
						    &subsliceid);
			return intel_uncore_read_with_mcr_steering_fw(gt->uncore,
								      reg,
								      sliceid,
								      subsliceid);
		}
	}

	return intel_uncore_read_fw(gt->uncore, reg);
}

911 912 913 914
void intel_gt_info_print(const struct intel_gt_info *info,
			 struct drm_printer *p)
{
	drm_printf(p, "available engines: %x\n", info->engine_mask);
915 916

	intel_sseu_dump(&info->sseu, p);
917
}
918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023

struct reg_and_bit {
	i915_reg_t reg;
	u32 bit;
};

static struct reg_and_bit
get_reg_and_bit(const struct intel_engine_cs *engine, const bool gen8,
		const i915_reg_t *regs, const unsigned int num)
{
	const unsigned int class = engine->class;
	struct reg_and_bit rb = { };

	if (drm_WARN_ON_ONCE(&engine->i915->drm,
			     class >= num || !regs[class].reg))
		return rb;

	rb.reg = regs[class];
	if (gen8 && class == VIDEO_DECODE_CLASS)
		rb.reg.reg += 4 * engine->instance; /* GEN8_M2TCR */
	else
		rb.bit = engine->instance;

	rb.bit = BIT(rb.bit);

	return rb;
}

void intel_gt_invalidate_tlbs(struct intel_gt *gt)
{
	static const i915_reg_t gen8_regs[] = {
		[RENDER_CLASS]			= GEN8_RTCR,
		[VIDEO_DECODE_CLASS]		= GEN8_M1TCR, /* , GEN8_M2TCR */
		[VIDEO_ENHANCEMENT_CLASS]	= GEN8_VTCR,
		[COPY_ENGINE_CLASS]		= GEN8_BTCR,
	};
	static const i915_reg_t gen12_regs[] = {
		[RENDER_CLASS]			= GEN12_GFX_TLB_INV_CR,
		[VIDEO_DECODE_CLASS]		= GEN12_VD_TLB_INV_CR,
		[VIDEO_ENHANCEMENT_CLASS]	= GEN12_VE_TLB_INV_CR,
		[COPY_ENGINE_CLASS]		= GEN12_BLT_TLB_INV_CR,
	};
	struct drm_i915_private *i915 = gt->i915;
	struct intel_uncore *uncore = gt->uncore;
	struct intel_engine_cs *engine;
	enum intel_engine_id id;
	const i915_reg_t *regs;
	unsigned int num = 0;

	if (I915_SELFTEST_ONLY(gt->awake == -ENODEV))
		return;

	if (GRAPHICS_VER(i915) == 12) {
		regs = gen12_regs;
		num = ARRAY_SIZE(gen12_regs);
	} else if (GRAPHICS_VER(i915) >= 8 && GRAPHICS_VER(i915) <= 11) {
		regs = gen8_regs;
		num = ARRAY_SIZE(gen8_regs);
	} else if (GRAPHICS_VER(i915) < 8) {
		return;
	}

	if (drm_WARN_ONCE(&i915->drm, !num,
			  "Platform does not implement TLB invalidation!"))
		return;

	GEM_TRACE("\n");

	assert_rpm_wakelock_held(&i915->runtime_pm);

	mutex_lock(&gt->tlb_invalidate_lock);
	intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);

	for_each_engine(engine, gt, id) {
		/*
		 * HW architecture suggest typical invalidation time at 40us,
		 * with pessimistic cases up to 100us and a recommendation to
		 * cap at 1ms. We go a bit higher just in case.
		 */
		const unsigned int timeout_us = 100;
		const unsigned int timeout_ms = 4;
		struct reg_and_bit rb;

		rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num);
		if (!i915_mmio_reg_offset(rb.reg))
			continue;

		intel_uncore_write_fw(uncore, rb.reg, rb.bit);
		if (__intel_wait_for_register_fw(uncore,
						 rb.reg, rb.bit, 0,
						 timeout_us, timeout_ms,
						 NULL))
			drm_err_ratelimited(&gt->i915->drm,
					    "%s TLB invalidation did not complete in %ums!\n",
					    engine->name, timeout_ms);
	}

	/*
	 * Use delayed put since a) we mostly expect a flurry of TLB
	 * invalidations so it is good to avoid paying the forcewake cost and
	 * b) it works around a bug in Icelake which cannot cope with too rapid
	 * transitions.
	 */
	intel_uncore_forcewake_put_delayed(uncore, FORCEWAKE_ALL);
	mutex_unlock(&gt->tlb_invalidate_lock);
}