intel_gt_irq.c 15.9 KB
Newer Older
C
Chris Wilson 已提交
1
// SPDX-License-Identifier: MIT
2 3 4 5 6 7 8 9
/*
 * Copyright © 2019 Intel Corporation
 */

#include <linux/sched/clock.h>

#include "i915_drv.h"
#include "i915_irq.h"
10
#include "intel_breadcrumbs.h"
11 12
#include "intel_gt.h"
#include "intel_gt_irq.h"
13
#include "intel_gt_regs.h"
14
#include "intel_uncore.h"
15
#include "intel_rps.h"
16
#include "pxp/intel_pxp_irq.h"
17 18 19

static void guc_irq_handler(struct intel_guc *guc, u16 iir)
{
20 21 22
	if (unlikely(!guc->interrupts.enabled))
		return;

23 24 25 26 27 28 29 30 31 32 33 34
	if (iir & GUC_INTR_GUC2HOST)
		intel_guc_to_host_event_handler(guc);
}

static u32
gen11_gt_engine_identity(struct intel_gt *gt,
			 const unsigned int bank, const unsigned int bit)
{
	void __iomem * const regs = gt->uncore->regs;
	u32 timeout_ts;
	u32 ident;

35
	lockdep_assert_held(gt->irq_lock);
36 37 38 39 40 41 42 43 44 45 46 47 48 49

	raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit));

	/*
	 * NB: Specs do not specify how long to spin wait,
	 * so we do ~100us as an educated guess.
	 */
	timeout_ts = (local_clock() >> 10) + 100;
	do {
		ident = raw_reg_read(regs, GEN11_INTR_IDENTITY_REG(bank));
	} while (!(ident & GEN11_INTR_DATA_VALID) &&
		 !time_after32(local_clock() >> 10, timeout_ts));

	if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) {
50 51 52
		drm_err(&gt->i915->drm,
			"INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n",
			bank, bit, ident);
53 54 55 56 57 58 59 60 61 62 63 64 65
		return 0;
	}

	raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank),
		      GEN11_INTR_DATA_VALID);

	return ident;
}

static void
gen11_other_irq_handler(struct intel_gt *gt, const u8 instance,
			const u16 iir)
{
66 67
	struct intel_gt *media_gt = gt->i915->media_gt;

68 69
	if (instance == OTHER_GUC_INSTANCE)
		return guc_irq_handler(&gt->uc.guc, iir);
70 71
	if (instance == OTHER_MEDIA_GUC_INSTANCE && media_gt)
		return guc_irq_handler(&media_gt->uc.guc, iir);
72 73

	if (instance == OTHER_GTPM_INSTANCE)
74
		return gen11_rps_irq_handler(&gt->rps, iir);
75 76
	if (instance == OTHER_MEDIA_GTPM_INSTANCE && media_gt)
		return gen11_rps_irq_handler(&media_gt->rps, iir);
77

78
	if (instance == OTHER_KCR_INSTANCE)
79
		return intel_pxp_irq_handler(gt->i915->pxp, iir);
80

81 82 83
	if (instance == OTHER_GSC_INSTANCE)
		return intel_gsc_irq_handler(gt, iir);

84 85 86 87
	WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
		  instance, iir);
}

88
static struct intel_gt *pick_gt(struct intel_gt *gt, u8 class, u8 instance)
89
{
90
	struct intel_gt *media_gt = gt->i915->media_gt;
91

92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107
	/* we expect the non-media gt to be passed in */
	GEM_BUG_ON(gt == media_gt);

	if (!media_gt)
		return gt;

	switch (class) {
	case VIDEO_DECODE_CLASS:
	case VIDEO_ENHANCEMENT_CLASS:
		return media_gt;
	case OTHER_CLASS:
		if (instance == OTHER_GSC_INSTANCE && HAS_ENGINE(media_gt, GSC0))
			return media_gt;
		fallthrough;
	default:
		return gt;
108
	}
109 110 111 112 113 114 115 116 117 118 119 120
}

static void
gen11_gt_identity_handler(struct intel_gt *gt, const u32 identity)
{
	const u8 class = GEN11_INTR_ENGINE_CLASS(identity);
	const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity);
	const u16 intr = GEN11_INTR_ENGINE_INTR(identity);

	if (unlikely(!intr))
		return;

121 122 123 124 125 126 127 128 129 130 131
	/*
	 * Platforms with standalone media have the media and GSC engines in
	 * another GT.
	 */
	gt = pick_gt(gt, class, instance);

	if (class <= MAX_ENGINE_CLASS && instance <= MAX_ENGINE_INSTANCE) {
		struct intel_engine_cs *engine = gt->engine_class[class][instance];
		if (engine)
			return intel_engine_cs_irq(engine, intr);
	}
132 133 134 135 136 137 138 139 140 141 142 143 144 145 146

	if (class == OTHER_CLASS)
		return gen11_other_irq_handler(gt, instance, intr);

	WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n",
		  class, instance, intr);
}

static void
gen11_gt_bank_handler(struct intel_gt *gt, const unsigned int bank)
{
	void __iomem * const regs = gt->uncore->regs;
	unsigned long intr_dw;
	unsigned int bit;

147
	lockdep_assert_held(gt->irq_lock);
148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164

	intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));

	for_each_set_bit(bit, &intr_dw, 32) {
		const u32 ident = gen11_gt_engine_identity(gt, bank, bit);

		gen11_gt_identity_handler(gt, ident);
	}

	/* Clear must be after shared has been served for engine */
	raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw);
}

void gen11_gt_irq_handler(struct intel_gt *gt, const u32 master_ctl)
{
	unsigned int bank;

165
	spin_lock(gt->irq_lock);
166 167 168 169 170 171

	for (bank = 0; bank < 2; bank++) {
		if (master_ctl & GEN11_GT_DW_IRQ(bank))
			gen11_gt_bank_handler(gt, bank);
	}

172
	spin_unlock(gt->irq_lock);
173 174 175 176 177 178 179 180
}

bool gen11_gt_reset_one_iir(struct intel_gt *gt,
			    const unsigned int bank, const unsigned int bit)
{
	void __iomem * const regs = gt->uncore->regs;
	u32 dw;

181
	lockdep_assert_held(gt->irq_lock);
182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211

	dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
	if (dw & BIT(bit)) {
		/*
		 * According to the BSpec, DW_IIR bits cannot be cleared without
		 * first servicing the Selector & Shared IIR registers.
		 */
		gen11_gt_engine_identity(gt, bank, bit);

		/*
		 * We locked GT INT DW by reading it. If we want to (try
		 * to) recover from this successfully, we need to clear
		 * our bit, otherwise we are locking the register for
		 * everybody.
		 */
		raw_reg_write(regs, GEN11_GT_INTR_DW(bank), BIT(bit));

		return true;
	}

	return false;
}

void gen11_gt_irq_reset(struct intel_gt *gt)
{
	struct intel_uncore *uncore = gt->uncore;

	/* Disable RCS, BCS, VCS and VECS class engines. */
	intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, 0);
	intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE,	  0);
212 213
	if (CCS_MASK(gt))
		intel_uncore_write(uncore, GEN12_CCS_RSVD_INTR_ENABLE, 0);
214
	if (HAS_HECI_GSC(gt->i915) || HAS_ENGINE(gt, GSC0))
215
		intel_uncore_write(uncore, GEN11_GUNIT_CSME_INTR_ENABLE, 0);
216 217 218 219

	/* Restore masks irqs on RCS, BCS, VCS and VECS engines. */
	intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK,	~0);
	intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK,	~0);
220 221 222 223 224 225 226 227
	if (HAS_ENGINE(gt, BCS1) || HAS_ENGINE(gt, BCS2))
		intel_uncore_write(uncore, XEHPC_BCS1_BCS2_INTR_MASK, ~0);
	if (HAS_ENGINE(gt, BCS3) || HAS_ENGINE(gt, BCS4))
		intel_uncore_write(uncore, XEHPC_BCS3_BCS4_INTR_MASK, ~0);
	if (HAS_ENGINE(gt, BCS5) || HAS_ENGINE(gt, BCS6))
		intel_uncore_write(uncore, XEHPC_BCS5_BCS6_INTR_MASK, ~0);
	if (HAS_ENGINE(gt, BCS7) || HAS_ENGINE(gt, BCS8))
		intel_uncore_write(uncore, XEHPC_BCS7_BCS8_INTR_MASK, ~0);
228 229
	intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK,	~0);
	intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK,	~0);
230 231 232 233
	if (HAS_ENGINE(gt, VCS4) || HAS_ENGINE(gt, VCS5))
		intel_uncore_write(uncore, GEN12_VCS4_VCS5_INTR_MASK,   ~0);
	if (HAS_ENGINE(gt, VCS6) || HAS_ENGINE(gt, VCS7))
		intel_uncore_write(uncore, GEN12_VCS6_VCS7_INTR_MASK,   ~0);
234
	intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK,	~0);
235 236
	if (HAS_ENGINE(gt, VECS2) || HAS_ENGINE(gt, VECS3))
		intel_uncore_write(uncore, GEN12_VECS2_VECS3_INTR_MASK, ~0);
237 238 239 240
	if (HAS_ENGINE(gt, CCS0) || HAS_ENGINE(gt, CCS1))
		intel_uncore_write(uncore, GEN12_CCS0_CCS1_INTR_MASK, ~0);
	if (HAS_ENGINE(gt, CCS2) || HAS_ENGINE(gt, CCS3))
		intel_uncore_write(uncore, GEN12_CCS2_CCS3_INTR_MASK, ~0);
241
	if (HAS_HECI_GSC(gt->i915) || HAS_ENGINE(gt, GSC0))
242
		intel_uncore_write(uncore, GEN11_GUNIT_CSME_INTR_MASK, ~0);
243 244 245 246 247

	intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
	intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK,  ~0);
	intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
	intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK,  ~0);
248 249 250

	intel_uncore_write(uncore, GEN11_CRYPTO_RSVD_INTR_ENABLE, 0);
	intel_uncore_write(uncore, GEN11_CRYPTO_RSVD_INTR_MASK,  ~0);
251 252 253 254 255
}

void gen11_gt_irq_postinstall(struct intel_gt *gt)
{
	struct intel_uncore *uncore = gt->uncore;
256
	u32 irqs = GT_RENDER_USER_INTERRUPT;
257
	u32 guc_mask = intel_uc_wants_guc(&gt->uc) ? GUC_INTR_GUC2HOST : 0;
258
	u32 gsc_mask = 0;
259 260 261 262 263 264 265 266 267 268
	u32 dmask;
	u32 smask;

	if (!intel_uc_wants_guc_submission(&gt->uc))
		irqs |= GT_CS_MASTER_ERROR_INTERRUPT |
			GT_CONTEXT_SWITCH_INTERRUPT |
			GT_WAIT_SEMAPHORE_INTERRUPT;

	dmask = irqs << 16 | irqs;
	smask = irqs << 16;
269

270 271 272 273 274
	if (HAS_ENGINE(gt, GSC0))
		gsc_mask = irqs;
	else if (HAS_HECI_GSC(gt->i915))
		gsc_mask = GSC_IRQ_INTF(0) | GSC_IRQ_INTF(1);

275 276 277 278 279
	BUILD_BUG_ON(irqs & 0xffff0000);

	/* Enable RCS, BCS, VCS and VECS class interrupts. */
	intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, dmask);
	intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, dmask);
280 281
	if (CCS_MASK(gt))
		intel_uncore_write(uncore, GEN12_CCS_RSVD_INTR_ENABLE, smask);
282 283
	if (gsc_mask)
		intel_uncore_write(uncore, GEN11_GUNIT_CSME_INTR_ENABLE, gsc_mask);
284 285 286 287

	/* Unmask irqs on RCS, BCS, VCS and VECS engines. */
	intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~smask);
	intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~smask);
288 289 290 291 292 293 294 295
	if (HAS_ENGINE(gt, BCS1) || HAS_ENGINE(gt, BCS2))
		intel_uncore_write(uncore, XEHPC_BCS1_BCS2_INTR_MASK, ~dmask);
	if (HAS_ENGINE(gt, BCS3) || HAS_ENGINE(gt, BCS4))
		intel_uncore_write(uncore, XEHPC_BCS3_BCS4_INTR_MASK, ~dmask);
	if (HAS_ENGINE(gt, BCS5) || HAS_ENGINE(gt, BCS6))
		intel_uncore_write(uncore, XEHPC_BCS5_BCS6_INTR_MASK, ~dmask);
	if (HAS_ENGINE(gt, BCS7) || HAS_ENGINE(gt, BCS8))
		intel_uncore_write(uncore, XEHPC_BCS7_BCS8_INTR_MASK, ~dmask);
296 297
	intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~dmask);
	intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~dmask);
298 299 300 301
	if (HAS_ENGINE(gt, VCS4) || HAS_ENGINE(gt, VCS5))
		intel_uncore_write(uncore, GEN12_VCS4_VCS5_INTR_MASK, ~dmask);
	if (HAS_ENGINE(gt, VCS6) || HAS_ENGINE(gt, VCS7))
		intel_uncore_write(uncore, GEN12_VCS6_VCS7_INTR_MASK, ~dmask);
302
	intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK, ~dmask);
303 304
	if (HAS_ENGINE(gt, VECS2) || HAS_ENGINE(gt, VECS3))
		intel_uncore_write(uncore, GEN12_VECS2_VECS3_INTR_MASK, ~dmask);
305 306 307 308
	if (HAS_ENGINE(gt, CCS0) || HAS_ENGINE(gt, CCS1))
		intel_uncore_write(uncore, GEN12_CCS0_CCS1_INTR_MASK, ~dmask);
	if (HAS_ENGINE(gt, CCS2) || HAS_ENGINE(gt, CCS3))
		intel_uncore_write(uncore, GEN12_CCS2_CCS3_INTR_MASK, ~dmask);
309
	if (gsc_mask)
310
		intel_uncore_write(uncore, GEN11_GUNIT_CSME_INTR_MASK, ~gsc_mask);
311

312 313 314 315 316 317 318 319 320 321 322 323 324
	if (guc_mask) {
		/* the enable bit is common for both GTs but the masks are separate */
		u32 mask = gt->type == GT_MEDIA ?
			REG_FIELD_PREP(ENGINE0_MASK, guc_mask) :
			REG_FIELD_PREP(ENGINE1_MASK, guc_mask);

		intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE,
				   REG_FIELD_PREP(ENGINE1_MASK, guc_mask));

		/* we might not be the first GT to write this reg */
		intel_uncore_rmw(uncore, MTL_GUC_MGUC_INTR_MASK, mask, 0);
	}

325 326 327 328 329 330 331 332 333 334 335 336 337
	/*
	 * RPS interrupts will get enabled/disabled on demand when RPS itself
	 * is enabled/disabled.
	 */
	gt->pm_ier = 0x0;
	gt->pm_imr = ~gt->pm_ier;
	intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
	intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK,  ~0);
}

void gen5_gt_irq_handler(struct intel_gt *gt, u32 gt_iir)
{
	if (gt_iir & GT_RENDER_USER_INTERRUPT)
338 339 340
		intel_engine_cs_irq(gt->engine_class[RENDER_CLASS][0],
				    gt_iir);

341
	if (gt_iir & ILK_BSD_USER_INTERRUPT)
342 343
		intel_engine_cs_irq(gt->engine_class[VIDEO_DECODE_CLASS][0],
				    gt_iir);
344 345 346 347 348 349 350
}

static void gen7_parity_error_irq_handler(struct intel_gt *gt, u32 iir)
{
	if (!HAS_L3_DPF(gt->i915))
		return;

351
	spin_lock(gt->irq_lock);
352
	gen5_gt_disable_irq(gt, GT_PARITY_ERROR(gt->i915));
353
	spin_unlock(gt->irq_lock);
354 355 356 357 358 359 360 361 362 363 364 365 366

	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
		gt->i915->l3_parity.which_slice |= 1 << 1;

	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
		gt->i915->l3_parity.which_slice |= 1 << 0;

	schedule_work(&gt->i915->l3_parity.error_work);
}

void gen6_gt_irq_handler(struct intel_gt *gt, u32 gt_iir)
{
	if (gt_iir & GT_RENDER_USER_INTERRUPT)
367 368 369
		intel_engine_cs_irq(gt->engine_class[RENDER_CLASS][0],
				    gt_iir);

370
	if (gt_iir & GT_BSD_USER_INTERRUPT)
371 372 373
		intel_engine_cs_irq(gt->engine_class[VIDEO_DECODE_CLASS][0],
				    gt_iir >> 12);

374
	if (gt_iir & GT_BLT_USER_INTERRUPT)
375 376
		intel_engine_cs_irq(gt->engine_class[COPY_ENGINE_CLASS][0],
				    gt_iir >> 22);
377 378 379

	if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
		      GT_BSD_CS_ERROR_INTERRUPT |
380
		      GT_CS_MASTER_ERROR_INTERRUPT))
381 382
		drm_dbg(&gt->i915->drm, "Command parser error, gt_iir 0x%08x\n",
			gt_iir);
383 384 385 386 387

	if (gt_iir & GT_PARITY_ERROR(gt->i915))
		gen7_parity_error_irq_handler(gt, gt_iir);
}

388
void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl)
389 390
{
	void __iomem * const regs = gt->uncore->regs;
391
	u32 iir;
392 393

	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
394 395
		iir = raw_reg_read(regs, GEN8_GT_IIR(0));
		if (likely(iir)) {
396 397 398 399
			intel_engine_cs_irq(gt->engine_class[RENDER_CLASS][0],
					    iir >> GEN8_RCS_IRQ_SHIFT);
			intel_engine_cs_irq(gt->engine_class[COPY_ENGINE_CLASS][0],
					    iir >> GEN8_BCS_IRQ_SHIFT);
400 401
			raw_reg_write(regs, GEN8_GT_IIR(0), iir);
		}
402 403 404
	}

	if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
405 406
		iir = raw_reg_read(regs, GEN8_GT_IIR(1));
		if (likely(iir)) {
407 408 409 410
			intel_engine_cs_irq(gt->engine_class[VIDEO_DECODE_CLASS][0],
					    iir >> GEN8_VCS0_IRQ_SHIFT);
			intel_engine_cs_irq(gt->engine_class[VIDEO_DECODE_CLASS][1],
					    iir >> GEN8_VCS1_IRQ_SHIFT);
411 412
			raw_reg_write(regs, GEN8_GT_IIR(1), iir);
		}
413 414 415
	}

	if (master_ctl & GEN8_GT_VECS_IRQ) {
416 417
		iir = raw_reg_read(regs, GEN8_GT_IIR(3));
		if (likely(iir)) {
418 419
			intel_engine_cs_irq(gt->engine_class[VIDEO_ENHANCEMENT_CLASS][0],
					    iir >> GEN8_VECS_IRQ_SHIFT);
420 421
			raw_reg_write(regs, GEN8_GT_IIR(3), iir);
		}
422 423 424
	}

	if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
425 426 427 428 429 430
		iir = raw_reg_read(regs, GEN8_GT_IIR(2));
		if (likely(iir)) {
			gen6_rps_irq_handler(&gt->rps, iir);
			guc_irq_handler(&gt->uc.guc, iir >> 16);
			raw_reg_write(regs, GEN8_GT_IIR(2), iir);
		}
431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446
	}
}

void gen8_gt_irq_reset(struct intel_gt *gt)
{
	struct intel_uncore *uncore = gt->uncore;

	GEN8_IRQ_RESET_NDX(uncore, GT, 0);
	GEN8_IRQ_RESET_NDX(uncore, GT, 1);
	GEN8_IRQ_RESET_NDX(uncore, GT, 2);
	GEN8_IRQ_RESET_NDX(uncore, GT, 3);
}

void gen8_gt_irq_postinstall(struct intel_gt *gt)
{
	/* These are interrupts we'll toggle with the ring mask register */
447 448 449
	const u32 irqs =
		GT_CS_MASTER_ERROR_INTERRUPT |
		GT_RENDER_USER_INTERRUPT |
450 451
		GT_CONTEXT_SWITCH_INTERRUPT |
		GT_WAIT_SEMAPHORE_INTERRUPT;
452 453 454
	const u32 gt_interrupts[] = {
		irqs << GEN8_RCS_IRQ_SHIFT | irqs << GEN8_BCS_IRQ_SHIFT,
		irqs << GEN8_VCS0_IRQ_SHIFT | irqs << GEN8_VCS1_IRQ_SHIFT,
455
		0,
456
		irqs << GEN8_VECS_IRQ_SHIFT,
457
	};
458
	struct intel_uncore *uncore = gt->uncore;
459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475

	gt->pm_ier = 0x0;
	gt->pm_imr = ~gt->pm_ier;
	GEN8_IRQ_INIT_NDX(uncore, GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
	GEN8_IRQ_INIT_NDX(uncore, GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
	/*
	 * RPS interrupts will get enabled/disabled on demand when RPS itself
	 * is enabled/disabled. Same wil be the case for GuC interrupts.
	 */
	GEN8_IRQ_INIT_NDX(uncore, GT, 2, gt->pm_imr, gt->pm_ier);
	GEN8_IRQ_INIT_NDX(uncore, GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
}

static void gen5_gt_update_irq(struct intel_gt *gt,
			       u32 interrupt_mask,
			       u32 enabled_irq_mask)
{
476
	lockdep_assert_held(gt->irq_lock);
477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500

	GEM_BUG_ON(enabled_irq_mask & ~interrupt_mask);

	gt->gt_imr &= ~interrupt_mask;
	gt->gt_imr |= (~enabled_irq_mask & interrupt_mask);
	intel_uncore_write(gt->uncore, GTIMR, gt->gt_imr);
}

void gen5_gt_enable_irq(struct intel_gt *gt, u32 mask)
{
	gen5_gt_update_irq(gt, mask, mask);
	intel_uncore_posting_read_fw(gt->uncore, GTIMR);
}

void gen5_gt_disable_irq(struct intel_gt *gt, u32 mask)
{
	gen5_gt_update_irq(gt, mask, 0);
}

void gen5_gt_irq_reset(struct intel_gt *gt)
{
	struct intel_uncore *uncore = gt->uncore;

	GEN3_IRQ_RESET(uncore, GT);
501
	if (GRAPHICS_VER(gt->i915) >= 6)
502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518
		GEN3_IRQ_RESET(uncore, GEN6_PM);
}

void gen5_gt_irq_postinstall(struct intel_gt *gt)
{
	struct intel_uncore *uncore = gt->uncore;
	u32 pm_irqs = 0;
	u32 gt_irqs = 0;

	gt->gt_imr = ~0;
	if (HAS_L3_DPF(gt->i915)) {
		/* L3 parity interrupt is always unmasked. */
		gt->gt_imr = ~GT_PARITY_ERROR(gt->i915);
		gt_irqs |= GT_PARITY_ERROR(gt->i915);
	}

	gt_irqs |= GT_RENDER_USER_INTERRUPT;
519
	if (GRAPHICS_VER(gt->i915) == 5)
520 521 522 523 524 525
		gt_irqs |= ILK_BSD_USER_INTERRUPT;
	else
		gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;

	GEN3_IRQ_INIT(uncore, GT, gt->gt_imr, gt_irqs);

526
	if (GRAPHICS_VER(gt->i915) >= 6) {
527 528 529 530
		/*
		 * RPS interrupts will get enabled/disabled on demand when RPS
		 * itself is enabled/disabled.
		 */
531
		if (HAS_ENGINE(gt, VECS0)) {
532 533 534 535 536 537 538 539
			pm_irqs |= PM_VEBOX_USER_INTERRUPT;
			gt->pm_ier |= PM_VEBOX_USER_INTERRUPT;
		}

		gt->pm_imr = 0xffffffff;
		GEN3_IRQ_INIT(uncore, GEN6_PM, gt->pm_imr, pm_irqs);
	}
}