i915_irq.c 141.3 KB
Newer Older
D
Dave Airlie 已提交
1
/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
L
Linus Torvalds 已提交
2
 */
D
Dave Airlie 已提交
3
/*
L
Linus Torvalds 已提交
4 5
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
 * All Rights Reserved.
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
D
Dave Airlie 已提交
27
 */
L
Linus Torvalds 已提交
28

29 30
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

31
#include <linux/circ_buf.h>
32 33 34 35
#include <linux/cpuidle.h>
#include <linux/slab.h>
#include <linux/sysrq.h>

36
#include <drm/drm_drv.h>
37
#include <drm/drm_irq.h>
38
#include <drm/i915_drm.h>
39

40 41 42 43 44
#include "display/intel_fifo_underrun.h"
#include "display/intel_hotplug.h"
#include "display/intel_lpe_audio.h"
#include "display/intel_psr.h"

45 46
#include "gt/intel_gt.h"

L
Linus Torvalds 已提交
47
#include "i915_drv.h"
48
#include "i915_irq.h"
C
Chris Wilson 已提交
49
#include "i915_trace.h"
J
Jesse Barnes 已提交
50
#include "intel_drv.h"
51
#include "intel_pm.h"
L
Linus Torvalds 已提交
52

53 54 55 56 57 58 59 60
/**
 * DOC: interrupt handling
 *
 * These functions provide the basic support for enabling and disabling the
 * interrupt handling support. There's a lot more functionality in i915_irq.c
 * and related files, but that will be described in separate chapters.
 */

61 62
typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val);

63 64 65 66
static const u32 hpd_ilk[HPD_NUM_PINS] = {
	[HPD_PORT_A] = DE_DP_A_HOTPLUG,
};

67 68 69 70
static const u32 hpd_ivb[HPD_NUM_PINS] = {
	[HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
};

71 72 73 74
static const u32 hpd_bdw[HPD_NUM_PINS] = {
	[HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
};

75
static const u32 hpd_ibx[HPD_NUM_PINS] = {
76 77 78 79 80 81 82
	[HPD_CRT] = SDE_CRT_HOTPLUG,
	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG
};

83
static const u32 hpd_cpt[HPD_NUM_PINS] = {
84
	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
85
	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
86 87 88 89 90
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
};

X
Xiong Zhang 已提交
91
static const u32 hpd_spt[HPD_NUM_PINS] = {
92
	[HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
X
Xiong Zhang 已提交
93 94 95 96 97 98
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
	[HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
};

99
static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
100 101 102 103 104 105 106 107
	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
};

108
static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
109 110 111 112 113 114 115 116
	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
};

117
static const u32 hpd_status_i915[HPD_NUM_PINS] = {
118 119 120 121 122 123 124 125
	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
};

126 127
/* BXT hpd list */
static const u32 hpd_bxt[HPD_NUM_PINS] = {
128
	[HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
129 130 131 132
	[HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
	[HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
};

133 134 135 136 137
static const u32 hpd_gen11[HPD_NUM_PINS] = {
	[HPD_PORT_C] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG,
	[HPD_PORT_D] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG,
	[HPD_PORT_E] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG,
	[HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG
138 139
};

140 141 142 143 144 145 146 147 148
static const u32 hpd_gen12[HPD_NUM_PINS] = {
	[HPD_PORT_D] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG,
	[HPD_PORT_E] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG,
	[HPD_PORT_F] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG,
	[HPD_PORT_G] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG,
	[HPD_PORT_H] = GEN12_TC5_HOTPLUG | GEN12_TBT5_HOTPLUG,
	[HPD_PORT_I] = GEN12_TC6_HOTPLUG | GEN12_TBT6_HOTPLUG
};

149 150 151 152 153 154 155 156 157
static const u32 hpd_icp[HPD_NUM_PINS] = {
	[HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP,
	[HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP,
	[HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP,
	[HPD_PORT_D] = SDE_TC2_HOTPLUG_ICP,
	[HPD_PORT_E] = SDE_TC3_HOTPLUG_ICP,
	[HPD_PORT_F] = SDE_TC4_HOTPLUG_ICP
};

158 159 160 161 162 163
static const u32 hpd_mcc[HPD_NUM_PINS] = {
	[HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP,
	[HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP,
	[HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP
};

164 165 166 167 168 169 170 171 172 173 174 175
static const u32 hpd_tgp[HPD_NUM_PINS] = {
	[HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP,
	[HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP,
	[HPD_PORT_C] = SDE_DDIC_HOTPLUG_TGP,
	[HPD_PORT_D] = SDE_TC1_HOTPLUG_ICP,
	[HPD_PORT_E] = SDE_TC2_HOTPLUG_ICP,
	[HPD_PORT_F] = SDE_TC3_HOTPLUG_ICP,
	[HPD_PORT_G] = SDE_TC4_HOTPLUG_ICP,
	[HPD_PORT_H] = SDE_TC5_HOTPLUG_TGP,
	[HPD_PORT_I] = SDE_TC6_HOTPLUG_TGP,
};

176
static void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
177 178
			   i915_reg_t iir, i915_reg_t ier)
{
179 180
	intel_uncore_write(uncore, imr, 0xffffffff);
	intel_uncore_posting_read(uncore, imr);
181

182
	intel_uncore_write(uncore, ier, 0);
183 184

	/* IIR can theoretically queue up two events. Be paranoid. */
185 186 187 188
	intel_uncore_write(uncore, iir, 0xffffffff);
	intel_uncore_posting_read(uncore, iir);
	intel_uncore_write(uncore, iir, 0xffffffff);
	intel_uncore_posting_read(uncore, iir);
189 190
}

191
static void gen2_irq_reset(struct intel_uncore *uncore)
192
{
193 194
	intel_uncore_write16(uncore, GEN2_IMR, 0xffff);
	intel_uncore_posting_read16(uncore, GEN2_IMR);
195

196
	intel_uncore_write16(uncore, GEN2_IER, 0);
197 198

	/* IIR can theoretically queue up two events. Be paranoid. */
199 200 201 202
	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
	intel_uncore_posting_read16(uncore, GEN2_IIR);
	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
	intel_uncore_posting_read16(uncore, GEN2_IIR);
203 204
}

205
#define GEN8_IRQ_RESET_NDX(uncore, type, which) \
206 207
({ \
	unsigned int which_ = which; \
208
	gen3_irq_reset((uncore), GEN8_##type##_IMR(which_), \
209 210 211
		       GEN8_##type##_IIR(which_), GEN8_##type##_IER(which_)); \
})

212 213
#define GEN3_IRQ_RESET(uncore, type) \
	gen3_irq_reset((uncore), type##IMR, type##IIR, type##IER)
214

215 216
#define GEN2_IRQ_RESET(uncore) \
	gen2_irq_reset(uncore)
217

218 219 220
/*
 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
 */
221
static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
222
{
223
	u32 val = intel_uncore_read(uncore, reg);
224 225 226 227 228

	if (val == 0)
		return;

	WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
229
	     i915_mmio_reg_offset(reg), val);
230 231 232 233
	intel_uncore_write(uncore, reg, 0xffffffff);
	intel_uncore_posting_read(uncore, reg);
	intel_uncore_write(uncore, reg, 0xffffffff);
	intel_uncore_posting_read(uncore, reg);
234
}
235

236
static void gen2_assert_iir_is_zero(struct intel_uncore *uncore)
237
{
238
	u16 val = intel_uncore_read16(uncore, GEN2_IIR);
239 240 241 242 243

	if (val == 0)
		return;

	WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
244
	     i915_mmio_reg_offset(GEN2_IIR), val);
245 246 247 248
	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
	intel_uncore_posting_read16(uncore, GEN2_IIR);
	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
	intel_uncore_posting_read16(uncore, GEN2_IIR);
249 250
}

251
static void gen3_irq_init(struct intel_uncore *uncore,
252 253 254 255
			  i915_reg_t imr, u32 imr_val,
			  i915_reg_t ier, u32 ier_val,
			  i915_reg_t iir)
{
256
	gen3_assert_iir_is_zero(uncore, iir);
257

258 259 260
	intel_uncore_write(uncore, ier, ier_val);
	intel_uncore_write(uncore, imr, imr_val);
	intel_uncore_posting_read(uncore, imr);
261 262
}

263
static void gen2_irq_init(struct intel_uncore *uncore,
264
			  u32 imr_val, u32 ier_val)
265
{
266
	gen2_assert_iir_is_zero(uncore);
267

268 269 270
	intel_uncore_write16(uncore, GEN2_IER, ier_val);
	intel_uncore_write16(uncore, GEN2_IMR, imr_val);
	intel_uncore_posting_read16(uncore, GEN2_IMR);
271 272
}

273
#define GEN8_IRQ_INIT_NDX(uncore, type, which, imr_val, ier_val) \
274 275
({ \
	unsigned int which_ = which; \
276
	gen3_irq_init((uncore), \
277 278 279 280 281
		      GEN8_##type##_IMR(which_), imr_val, \
		      GEN8_##type##_IER(which_), ier_val, \
		      GEN8_##type##_IIR(which_)); \
})

282 283
#define GEN3_IRQ_INIT(uncore, type, imr_val, ier_val) \
	gen3_irq_init((uncore), \
284 285 286 287
		      type##IMR, imr_val, \
		      type##IER, ier_val, \
		      type##IIR)

288 289
#define GEN2_IRQ_INIT(uncore, imr_val, ier_val) \
	gen2_irq_init((uncore), imr_val, ier_val)
290

291
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
292
static void guc_irq_handler(struct intel_guc *guc, u16 guc_iir);
293

294 295 296
/* For display hotplug interrupt */
static inline void
i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
297 298
				     u32 mask,
				     u32 bits)
299
{
300
	u32 val;
301

302
	lockdep_assert_held(&dev_priv->irq_lock);
303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323
	WARN_ON(bits & ~mask);

	val = I915_READ(PORT_HOTPLUG_EN);
	val &= ~mask;
	val |= bits;
	I915_WRITE(PORT_HOTPLUG_EN, val);
}

/**
 * i915_hotplug_interrupt_update - update hotplug interrupt enable
 * @dev_priv: driver private
 * @mask: bits to update
 * @bits: bits to enable
 * NOTE: the HPD enable bits are modified both inside and outside
 * of an interrupt context. To avoid that read-modify-write cycles
 * interfer, these bits are protected by a spinlock. Since this
 * function is usually not called from a context where the lock is
 * held already, this function acquires the lock itself. A non-locking
 * version is also available.
 */
void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
324 325
				   u32 mask,
				   u32 bits)
326 327 328 329 330 331
{
	spin_lock_irq(&dev_priv->irq_lock);
	i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
	spin_unlock_irq(&dev_priv->irq_lock);
}

332
static u32
333
gen11_gt_engine_identity(struct intel_gt *gt,
334 335
			 const unsigned int bank, const unsigned int bit);

336
static bool gen11_reset_one_iir(struct intel_gt *gt,
337 338
				const unsigned int bank,
				const unsigned int bit)
339
{
340
	void __iomem * const regs = gt->uncore->regs;
341 342
	u32 dw;

343
	lockdep_assert_held(&gt->i915->irq_lock);
344 345 346 347 348 349 350

	dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
	if (dw & BIT(bit)) {
		/*
		 * According to the BSpec, DW_IIR bits cannot be cleared without
		 * first servicing the Selector & Shared IIR registers.
		 */
351
		gen11_gt_engine_identity(gt, bank, bit);
352 353 354 355 356 357 358 359 360 361 362 363 364 365 366

		/*
		 * We locked GT INT DW by reading it. If we want to (try
		 * to) recover from this succesfully, we need to clear
		 * our bit, otherwise we are locking the register for
		 * everybody.
		 */
		raw_reg_write(regs, GEN11_GT_INTR_DW(bank), BIT(bit));

		return true;
	}

	return false;
}

367 368 369 370 371 372
/**
 * ilk_update_display_irq - update DEIMR
 * @dev_priv: driver private
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
373
void ilk_update_display_irq(struct drm_i915_private *dev_priv,
374 375
			    u32 interrupt_mask,
			    u32 enabled_irq_mask)
376
{
377
	u32 new_val;
378

379
	lockdep_assert_held(&dev_priv->irq_lock);
380

381 382
	WARN_ON(enabled_irq_mask & ~interrupt_mask);

383
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
384 385
		return;

386 387 388 389 390 391
	new_val = dev_priv->irq_mask;
	new_val &= ~interrupt_mask;
	new_val |= (~enabled_irq_mask & interrupt_mask);

	if (new_val != dev_priv->irq_mask) {
		dev_priv->irq_mask = new_val;
392
		I915_WRITE(DEIMR, dev_priv->irq_mask);
393
		POSTING_READ(DEIMR);
394 395 396
	}
}

P
Paulo Zanoni 已提交
397 398 399 400 401 402 403
/**
 * ilk_update_gt_irq - update GTIMR
 * @dev_priv: driver private
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
404 405
			      u32 interrupt_mask,
			      u32 enabled_irq_mask)
P
Paulo Zanoni 已提交
406
{
407
	lockdep_assert_held(&dev_priv->irq_lock);
P
Paulo Zanoni 已提交
408

409 410
	WARN_ON(enabled_irq_mask & ~interrupt_mask);

411
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
412 413
		return;

P
Paulo Zanoni 已提交
414 415 416 417 418
	dev_priv->gt_irq_mask &= ~interrupt_mask;
	dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
}

419
void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, u32 mask)
P
Paulo Zanoni 已提交
420 421
{
	ilk_update_gt_irq(dev_priv, mask, mask);
422
	intel_uncore_posting_read_fw(&dev_priv->uncore, GTIMR);
P
Paulo Zanoni 已提交
423 424
}

425
void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, u32 mask)
P
Paulo Zanoni 已提交
426 427 428 429
{
	ilk_update_gt_irq(dev_priv, mask, 0);
}

430
static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
431
{
432 433
	WARN_ON_ONCE(INTEL_GEN(dev_priv) >= 11);

434
	return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
435 436
}

437
static void write_pm_imr(struct intel_gt *gt)
438
{
439 440 441
	struct drm_i915_private *i915 = gt->i915;
	struct intel_uncore *uncore = gt->uncore;
	u32 mask = gt->pm_imr;
442 443
	i915_reg_t reg;

444
	if (INTEL_GEN(i915) >= 11) {
445 446 447
		reg = GEN11_GPM_WGBOXPERF_INTR_MASK;
		/* pm is in upper half */
		mask = mask << 16;
448
	} else if (INTEL_GEN(i915) >= 8) {
449 450 451 452 453
		reg = GEN8_GT_IMR(2);
	} else {
		reg = GEN6_PMIMR;
	}

454 455
	intel_uncore_write(uncore, reg, mask);
	intel_uncore_posting_read(uncore, reg);
456 457
}

458
static void write_pm_ier(struct intel_gt *gt)
459
{
460 461 462
	struct drm_i915_private *i915 = gt->i915;
	struct intel_uncore *uncore = gt->uncore;
	u32 mask = gt->pm_ier;
463 464
	i915_reg_t reg;

465
	if (INTEL_GEN(i915) >= 11) {
466 467 468
		reg = GEN11_GPM_WGBOXPERF_INTR_ENABLE;
		/* pm is in upper half */
		mask = mask << 16;
469
	} else if (INTEL_GEN(i915) >= 8) {
470 471 472 473 474
		reg = GEN8_GT_IER(2);
	} else {
		reg = GEN6_PMIER;
	}

475
	intel_uncore_write(uncore, reg, mask);
476 477
}

P
Paulo Zanoni 已提交
478
/**
479
 * snb_update_pm_irq - update GEN6_PMIMR
480
 * @gt: gt for the interrupts
481 482 483
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
484
static void snb_update_pm_irq(struct intel_gt *gt,
485 486
			      u32 interrupt_mask,
			      u32 enabled_irq_mask)
P
Paulo Zanoni 已提交
487
{
488
	u32 new_val;
P
Paulo Zanoni 已提交
489

490 491
	WARN_ON(enabled_irq_mask & ~interrupt_mask);

492
	lockdep_assert_held(&gt->i915->irq_lock);
P
Paulo Zanoni 已提交
493

494
	new_val = gt->pm_imr;
495 496 497
	new_val &= ~interrupt_mask;
	new_val |= (~enabled_irq_mask & interrupt_mask);

498 499 500
	if (new_val != gt->pm_imr) {
		gt->pm_imr = new_val;
		write_pm_imr(gt);
501
	}
P
Paulo Zanoni 已提交
502 503
}

504
void gen6_unmask_pm_irq(struct intel_gt *gt, u32 mask)
P
Paulo Zanoni 已提交
505
{
506
	if (WARN_ON(!intel_irqs_enabled(gt->i915)))
507 508
		return;

509
	snb_update_pm_irq(gt, mask, mask);
P
Paulo Zanoni 已提交
510 511
}

512
static void __gen6_mask_pm_irq(struct intel_gt *gt, u32 mask)
P
Paulo Zanoni 已提交
513
{
514
	snb_update_pm_irq(gt, mask, 0);
P
Paulo Zanoni 已提交
515 516
}

517
void gen6_mask_pm_irq(struct intel_gt *gt, u32 mask)
518
{
519
	if (WARN_ON(!intel_irqs_enabled(gt->i915)))
520 521
		return;

522
	__gen6_mask_pm_irq(gt, mask);
523 524
}

525
static void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
I
Imre Deak 已提交
526
{
527
	i915_reg_t reg = gen6_pm_iir(dev_priv);
I
Imre Deak 已提交
528

529
	lockdep_assert_held(&dev_priv->irq_lock);
530 531 532

	I915_WRITE(reg, reset_mask);
	I915_WRITE(reg, reset_mask);
I
Imre Deak 已提交
533
	POSTING_READ(reg);
534 535
}

536
static void gen6_enable_pm_irq(struct intel_gt *gt, u32 enable_mask)
537
{
538
	lockdep_assert_held(&gt->i915->irq_lock);
539

540 541 542
	gt->pm_ier |= enable_mask;
	write_pm_ier(gt);
	gen6_unmask_pm_irq(gt, enable_mask);
543 544 545
	/* unmask_pm_irq provides an implicit barrier (POSTING_READ) */
}

546
static void gen6_disable_pm_irq(struct intel_gt *gt, u32 disable_mask)
547
{
548
	lockdep_assert_held(&gt->i915->irq_lock);
549

550 551 552
	gt->pm_ier &= ~disable_mask;
	__gen6_mask_pm_irq(gt, disable_mask);
	write_pm_ier(gt);
553 554 555
	/* though a barrier is missing here, but don't really need a one */
}

556 557 558 559
void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv)
{
	spin_lock_irq(&dev_priv->irq_lock);

560
	while (gen11_reset_one_iir(&dev_priv->gt, 0, GEN11_GTPM))
561
		;
562 563 564 565 566 567

	dev_priv->gt_pm.rps.pm_iir = 0;

	spin_unlock_irq(&dev_priv->irq_lock);
}

568 569 570
void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
{
	spin_lock_irq(&dev_priv->irq_lock);
571
	gen6_reset_pm_iir(dev_priv, GEN6_PM_RPS_EVENTS);
572
	dev_priv->gt_pm.rps.pm_iir = 0;
I
Imre Deak 已提交
573 574 575
	spin_unlock_irq(&dev_priv->irq_lock);
}

576
void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
577
{
578
	struct intel_gt *gt = &dev_priv->gt;
579 580 581
	struct intel_rps *rps = &dev_priv->gt_pm.rps;

	if (READ_ONCE(rps->interrupts_enabled))
582 583
		return;

584
	spin_lock_irq(&dev_priv->irq_lock);
585
	WARN_ON_ONCE(rps->pm_iir);
586

587
	if (INTEL_GEN(dev_priv) >= 11)
588
		WARN_ON_ONCE(gen11_reset_one_iir(gt, 0, GEN11_GTPM));
589 590
	else
		WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
591

592
	rps->interrupts_enabled = true;
593
	gen6_enable_pm_irq(gt, dev_priv->pm_rps_events);
594

595 596 597
	spin_unlock_irq(&dev_priv->irq_lock);
}

598
void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
599
{
600 601 602
	struct intel_rps *rps = &dev_priv->gt_pm.rps;

	if (!READ_ONCE(rps->interrupts_enabled))
603 604
		return;

I
Imre Deak 已提交
605
	spin_lock_irq(&dev_priv->irq_lock);
606
	rps->interrupts_enabled = false;
607

608
	I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
609

610
	gen6_disable_pm_irq(&dev_priv->gt, GEN6_PM_RPS_EVENTS);
611 612

	spin_unlock_irq(&dev_priv->irq_lock);
613
	intel_synchronize_irq(dev_priv);
614 615

	/* Now that we will not be generating any more work, flush any
616
	 * outstanding tasks. As we are called on the RPS idle path,
617 618 619
	 * we will reset the GPU to minimum frequencies, so the current
	 * state of the worker can be discarded.
	 */
620
	cancel_work_sync(&rps->work);
621 622 623 624
	if (INTEL_GEN(dev_priv) >= 11)
		gen11_reset_rps_interrupts(dev_priv);
	else
		gen6_reset_rps_interrupts(dev_priv);
625 626
}

627
void gen9_reset_guc_interrupts(struct intel_guc *guc)
628
{
629 630
	struct intel_gt *gt = guc_to_gt(guc);
	struct drm_i915_private *i915 = gt->i915;
631

632
	assert_rpm_wakelock_held(&i915->runtime_pm);
633

634 635 636
	spin_lock_irq(&i915->irq_lock);
	gen6_reset_pm_iir(i915, gt->pm_guc_events);
	spin_unlock_irq(&i915->irq_lock);
637 638
}

639
void gen9_enable_guc_interrupts(struct intel_guc *guc)
640
{
641 642
	struct intel_gt *gt = guc_to_gt(guc);
	struct drm_i915_private *i915 = gt->i915;
643

644
	assert_rpm_wakelock_held(&i915->runtime_pm);
645

646
	spin_lock_irq(&i915->irq_lock);
647
	if (!guc->interrupts.enabled) {
648 649
		WARN_ON_ONCE(intel_uncore_read(gt->uncore, gen6_pm_iir(i915)) &
			     gt->pm_guc_events);
650
		guc->interrupts.enabled = true;
651
		gen6_enable_pm_irq(gt, gt->pm_guc_events);
652
	}
653
	spin_unlock_irq(&i915->irq_lock);
654 655
}

656
void gen9_disable_guc_interrupts(struct intel_guc *guc)
657
{
658 659
	struct intel_gt *gt = guc_to_gt(guc);
	struct drm_i915_private *i915 = gt->i915;
660

661
	assert_rpm_wakelock_held(&i915->runtime_pm);
662

663
	spin_lock_irq(&i915->irq_lock);
664
	guc->interrupts.enabled = false;
665

666
	gen6_disable_pm_irq(gt, gt->pm_guc_events);
667

668 669
	spin_unlock_irq(&i915->irq_lock);
	intel_synchronize_irq(i915);
670

671
	gen9_reset_guc_interrupts(guc);
672 673
}

674
void gen11_reset_guc_interrupts(struct intel_guc *guc)
675
{
676 677
	struct intel_gt *gt = guc_to_gt(guc);
	struct drm_i915_private *i915 = gt->i915;
678

679
	spin_lock_irq(&i915->irq_lock);
680
	gen11_reset_one_iir(gt, 0, GEN11_GUC);
681 682 683
	spin_unlock_irq(&i915->irq_lock);
}

684
void gen11_enable_guc_interrupts(struct intel_guc *guc)
685
{
686
	struct intel_gt *gt = guc_to_gt(guc);
687

688
	spin_lock_irq(&gt->i915->irq_lock);
689
	if (!guc->interrupts.enabled) {
690
		u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST);
691

692 693 694
		WARN_ON_ONCE(gen11_reset_one_iir(gt, 0, GEN11_GUC));
		intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, events);
		intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~events);
695
		guc->interrupts.enabled = true;
696
	}
697
	spin_unlock_irq(&gt->i915->irq_lock);
698 699
}

700
void gen11_disable_guc_interrupts(struct intel_guc *guc)
701
{
702 703
	struct intel_gt *gt = guc_to_gt(guc);
	struct drm_i915_private *i915 = gt->i915;
704

705
	spin_lock_irq(&i915->irq_lock);
706
	guc->interrupts.enabled = false;
707

708 709
	intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~0);
	intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
710

711 712
	spin_unlock_irq(&i915->irq_lock);
	intel_synchronize_irq(i915);
713

714
	gen11_reset_guc_interrupts(guc);
715 716
}

717
/**
718 719 720 721 722
 * bdw_update_port_irq - update DE port interrupt
 * @dev_priv: driver private
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
723
static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
724 725
				u32 interrupt_mask,
				u32 enabled_irq_mask)
726
{
727 728
	u32 new_val;
	u32 old_val;
729

730
	lockdep_assert_held(&dev_priv->irq_lock);
731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748

	WARN_ON(enabled_irq_mask & ~interrupt_mask);

	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
		return;

	old_val = I915_READ(GEN8_DE_PORT_IMR);

	new_val = old_val;
	new_val &= ~interrupt_mask;
	new_val |= (~enabled_irq_mask & interrupt_mask);

	if (new_val != old_val) {
		I915_WRITE(GEN8_DE_PORT_IMR, new_val);
		POSTING_READ(GEN8_DE_PORT_IMR);
	}
}

749 750 751 752 753 754 755 756 757
/**
 * bdw_update_pipe_irq - update DE pipe interrupt
 * @dev_priv: driver private
 * @pipe: pipe whose interrupt to update
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
			 enum pipe pipe,
758 759
			 u32 interrupt_mask,
			 u32 enabled_irq_mask)
760
{
761
	u32 new_val;
762

763
	lockdep_assert_held(&dev_priv->irq_lock);
764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780

	WARN_ON(enabled_irq_mask & ~interrupt_mask);

	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
		return;

	new_val = dev_priv->de_irq_mask[pipe];
	new_val &= ~interrupt_mask;
	new_val |= (~enabled_irq_mask & interrupt_mask);

	if (new_val != dev_priv->de_irq_mask[pipe]) {
		dev_priv->de_irq_mask[pipe] = new_val;
		I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
		POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
	}
}

781 782 783 784 785 786
/**
 * ibx_display_interrupt_update - update SDEIMR
 * @dev_priv: driver private
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
787
void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
788 789
				  u32 interrupt_mask,
				  u32 enabled_irq_mask)
790
{
791
	u32 sdeimr = I915_READ(SDEIMR);
792 793 794
	sdeimr &= ~interrupt_mask;
	sdeimr |= (~enabled_irq_mask & interrupt_mask);

795 796
	WARN_ON(enabled_irq_mask & ~interrupt_mask);

797
	lockdep_assert_held(&dev_priv->irq_lock);
798

799
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
800 801
		return;

802 803 804
	I915_WRITE(SDEIMR, sdeimr);
	POSTING_READ(SDEIMR);
}
805

806 807
u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
			      enum pipe pipe)
808
{
809 810
	u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
	u32 enable_mask = status_mask << 16;
811

812
	lockdep_assert_held(&dev_priv->irq_lock);
813

814 815
	if (INTEL_GEN(dev_priv) < 5)
		goto out;
816 817

	/*
818 819
	 * On pipe A we don't support the PSR interrupt yet,
	 * on pipe B and C the same bit MBZ.
820 821 822
	 */
	if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
		return 0;
823 824 825 826 827 828
	/*
	 * On pipe B and C we don't support the PSR interrupt yet, on pipe
	 * A the same bit is for perf counters which we don't use either.
	 */
	if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
		return 0;
829 830 831 832 833 834 835 836 837

	enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
			 SPRITE0_FLIP_DONE_INT_EN_VLV |
			 SPRITE1_FLIP_DONE_INT_EN_VLV);
	if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
		enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
	if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
		enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;

838 839 840 841 842 843
out:
	WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
		  status_mask & ~PIPESTAT_INT_STATUS_MASK,
		  "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
		  pipe_name(pipe), enable_mask, status_mask);

844 845 846
	return enable_mask;
}

847 848
void i915_enable_pipestat(struct drm_i915_private *dev_priv,
			  enum pipe pipe, u32 status_mask)
849
{
850
	i915_reg_t reg = PIPESTAT(pipe);
851 852
	u32 enable_mask;

853 854 855 856 857 858 859 860 861 862 863 864 865 866 867
	WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
		  "pipe %c: status_mask=0x%x\n",
		  pipe_name(pipe), status_mask);

	lockdep_assert_held(&dev_priv->irq_lock);
	WARN_ON(!intel_irqs_enabled(dev_priv));

	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
		return;

	dev_priv->pipestat_irq_mask[pipe] |= status_mask;
	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);

	I915_WRITE(reg, enable_mask | status_mask);
	POSTING_READ(reg);
868 869
}

870 871
void i915_disable_pipestat(struct drm_i915_private *dev_priv,
			   enum pipe pipe, u32 status_mask)
872
{
873
	i915_reg_t reg = PIPESTAT(pipe);
874 875
	u32 enable_mask;

876 877 878 879 880 881 882 883 884 885 886 887 888 889 890
	WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
		  "pipe %c: status_mask=0x%x\n",
		  pipe_name(pipe), status_mask);

	lockdep_assert_held(&dev_priv->irq_lock);
	WARN_ON(!intel_irqs_enabled(dev_priv));

	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
		return;

	dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);

	I915_WRITE(reg, enable_mask | status_mask);
	POSTING_READ(reg);
891 892
}

893 894 895 896 897 898 899 900
static bool i915_has_asle(struct drm_i915_private *dev_priv)
{
	if (!dev_priv->opregion.asle)
		return false;

	return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
}

901
/**
902
 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
903
 * @dev_priv: i915 device private
904
 */
905
static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
906
{
907
	if (!i915_has_asle(dev_priv))
908 909
		return;

910
	spin_lock_irq(&dev_priv->irq_lock);
911

912
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
913
	if (INTEL_GEN(dev_priv) >= 4)
914
		i915_enable_pipestat(dev_priv, PIPE_A,
915
				     PIPE_LEGACY_BLC_EVENT_STATUS);
916

917
	spin_unlock_irq(&dev_priv->irq_lock);
918 919
}

920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969
/*
 * This timing diagram depicts the video signal in and
 * around the vertical blanking period.
 *
 * Assumptions about the fictitious mode used in this example:
 *  vblank_start >= 3
 *  vsync_start = vblank_start + 1
 *  vsync_end = vblank_start + 2
 *  vtotal = vblank_start + 3
 *
 *           start of vblank:
 *           latch double buffered registers
 *           increment frame counter (ctg+)
 *           generate start of vblank interrupt (gen4+)
 *           |
 *           |          frame start:
 *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
 *           |          may be shifted forward 1-3 extra lines via PIPECONF
 *           |          |
 *           |          |  start of vsync:
 *           |          |  generate vsync interrupt
 *           |          |  |
 * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
 *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
 * ----va---> <-----------------vb--------------------> <--------va-------------
 *       |          |       <----vs----->                     |
 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
 *       |          |                                         |
 *       last visible pixel                                   first visible pixel
 *                  |                                         increment frame counter (gen3/4)
 *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
 *
 * x  = horizontal active
 * _  = horizontal blanking
 * hs = horizontal sync
 * va = vertical active
 * vb = vertical blanking
 * vs = vertical sync
 * vbs = vblank_start (number)
 *
 * Summary:
 * - most events happen at the start of horizontal sync
 * - frame start happens at the start of horizontal blank, 1-4 lines
 *   (depending on PIPECONF settings) after the start of vblank
 * - gen3/4 pixel and frame counter are synchronized with the start
 *   of horizontal active on the first line of vertical active
 */

970 971 972
/* Called from drm generic code, passed a 'crtc', which
 * we use as a pipe index
 */
973
u32 i915_get_vblank_counter(struct drm_crtc *crtc)
974
{
975 976
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
977
	const struct drm_display_mode *mode = &vblank->hwmode;
978
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
979
	i915_reg_t high_frame, low_frame;
980
	u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
981
	unsigned long irqflags;
982

983 984 985 986 987 988 989 990 991 992 993 994 995 996
	/*
	 * On i965gm TV output the frame counter only works up to
	 * the point when we enable the TV encoder. After that the
	 * frame counter ceases to work and reads zero. We need a
	 * vblank wait before enabling the TV encoder and so we
	 * have to enable vblank interrupts while the frame counter
	 * is still in a working state. However the core vblank code
	 * does not like us returning non-zero frame counter values
	 * when we've told it that we don't have a working frame
	 * counter. Thus we must stop non-zero values leaking out.
	 */
	if (!vblank->max_vblank_count)
		return 0;

997 998 999 1000 1001
	htotal = mode->crtc_htotal;
	hsync_start = mode->crtc_hsync_start;
	vbl_start = mode->crtc_vblank_start;
	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
		vbl_start = DIV_ROUND_UP(vbl_start, 2);
1002

1003 1004 1005 1006 1007 1008
	/* Convert to pixel count */
	vbl_start *= htotal;

	/* Start of vblank event occurs at start of hsync */
	vbl_start -= htotal - hsync_start;

1009 1010
	high_frame = PIPEFRAME(pipe);
	low_frame = PIPEFRAMEPIXEL(pipe);
1011

1012 1013
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);

1014 1015 1016 1017 1018 1019
	/*
	 * High & low register fields aren't synchronized, so make sure
	 * we get a low value that's stable across two reads of the high
	 * register.
	 */
	do {
1020 1021 1022
		high1 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
		low   = I915_READ_FW(low_frame);
		high2 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
1023 1024
	} while (high1 != high2);

1025 1026
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);

1027
	high1 >>= PIPE_FRAME_HIGH_SHIFT;
1028
	pixel = low & PIPE_PIXEL_MASK;
1029
	low >>= PIPE_FRAME_LOW_SHIFT;
1030 1031 1032 1033 1034 1035

	/*
	 * The frame counter increments at beginning of active.
	 * Cook up a vblank counter by also checking the pixel
	 * counter against vblank start.
	 */
1036
	return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
1037 1038
}

1039
u32 g4x_get_vblank_counter(struct drm_crtc *crtc)
1040
{
1041 1042
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
1043

1044
	return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
1045 1046
}

1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097
/*
 * On certain encoders on certain platforms, pipe
 * scanline register will not work to get the scanline,
 * since the timings are driven from the PORT or issues
 * with scanline register updates.
 * This function will use Framestamp and current
 * timestamp registers to calculate the scanline.
 */
static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
{
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
	struct drm_vblank_crtc *vblank =
		&crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
	const struct drm_display_mode *mode = &vblank->hwmode;
	u32 vblank_start = mode->crtc_vblank_start;
	u32 vtotal = mode->crtc_vtotal;
	u32 htotal = mode->crtc_htotal;
	u32 clock = mode->crtc_clock;
	u32 scanline, scan_prev_time, scan_curr_time, scan_post_time;

	/*
	 * To avoid the race condition where we might cross into the
	 * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR
	 * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR
	 * during the same frame.
	 */
	do {
		/*
		 * This field provides read back of the display
		 * pipe frame time stamp. The time stamp value
		 * is sampled at every start of vertical blank.
		 */
		scan_prev_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));

		/*
		 * The TIMESTAMP_CTR register has the current
		 * time stamp value.
		 */
		scan_curr_time = I915_READ_FW(IVB_TIMESTAMP_CTR);

		scan_post_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
	} while (scan_post_time != scan_prev_time);

	scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
					clock), 1000 * htotal);
	scanline = min(scanline, vtotal - 1);
	scanline = (scanline + vblank_start) % vtotal;

	return scanline;
}

1098
/* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
1099 1100 1101
static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
{
	struct drm_device *dev = crtc->base.dev;
1102
	struct drm_i915_private *dev_priv = to_i915(dev);
1103 1104
	const struct drm_display_mode *mode;
	struct drm_vblank_crtc *vblank;
1105
	enum pipe pipe = crtc->pipe;
1106
	int position, vtotal;
1107

1108 1109 1110
	if (!crtc->active)
		return -1;

1111 1112 1113
	vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
	mode = &vblank->hwmode;

1114 1115 1116
	if (mode->private_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP)
		return __intel_get_crtc_scanline_from_timestamp(crtc);

1117
	vtotal = mode->crtc_vtotal;
1118 1119 1120
	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
		vtotal /= 2;

1121
	if (IS_GEN(dev_priv, 2))
1122
		position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
1123
	else
1124
		position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
1125

1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137
	/*
	 * On HSW, the DSL reg (0x70000) appears to return 0 if we
	 * read it just before the start of vblank.  So try it again
	 * so we don't accidentally end up spanning a vblank frame
	 * increment, causing the pipe_update_end() code to squak at us.
	 *
	 * The nature of this problem means we can't simply check the ISR
	 * bit and return the vblank start value; nor can we use the scanline
	 * debug register in the transcoder as it appears to have the same
	 * problem.  We may need to extend this to include other platforms,
	 * but so far testing only shows the problem on HSW.
	 */
1138
	if (HAS_DDI(dev_priv) && !position) {
1139 1140 1141 1142
		int i, temp;

		for (i = 0; i < 100; i++) {
			udelay(1);
1143
			temp = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
1144 1145 1146 1147 1148 1149 1150
			if (temp != position) {
				position = temp;
				break;
			}
		}
	}

1151
	/*
1152 1153
	 * See update_scanline_offset() for the details on the
	 * scanline_offset adjustment.
1154
	 */
1155
	return (position + crtc->scanline_offset) % vtotal;
1156 1157
}

1158 1159 1160 1161
bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
			      bool in_vblank_irq, int *vpos, int *hpos,
			      ktime_t *stime, ktime_t *etime,
			      const struct drm_display_mode *mode)
1162
{
1163
	struct drm_i915_private *dev_priv = to_i915(dev);
1164 1165
	struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
								pipe);
1166
	int position;
1167
	int vbl_start, vbl_end, hsync_start, htotal, vtotal;
1168
	unsigned long irqflags;
1169 1170 1171
	bool use_scanline_counter = INTEL_GEN(dev_priv) >= 5 ||
		IS_G4X(dev_priv) || IS_GEN(dev_priv, 2) ||
		mode->private_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER;
1172

1173
	if (WARN_ON(!mode->crtc_clock)) {
1174
		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
1175
				 "pipe %c\n", pipe_name(pipe));
1176
		return false;
1177 1178
	}

1179
	htotal = mode->crtc_htotal;
1180
	hsync_start = mode->crtc_hsync_start;
1181 1182 1183
	vtotal = mode->crtc_vtotal;
	vbl_start = mode->crtc_vblank_start;
	vbl_end = mode->crtc_vblank_end;
1184

1185 1186 1187 1188 1189 1190
	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
		vbl_start = DIV_ROUND_UP(vbl_start, 2);
		vbl_end /= 2;
		vtotal /= 2;
	}

1191 1192 1193 1194 1195 1196
	/*
	 * Lock uncore.lock, as we will do multiple timing critical raw
	 * register reads, potentially with preemption disabled, so the
	 * following code must not block on uncore.lock.
	 */
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1197

1198 1199 1200 1201 1202 1203
	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */

	/* Get optional system timestamp before query. */
	if (stime)
		*stime = ktime_get();

1204
	if (use_scanline_counter) {
1205 1206 1207
		/* No obvious pixelcount register. Only query vertical
		 * scanout position from Display scan line register.
		 */
1208
		position = __intel_get_crtc_scanline(intel_crtc);
1209 1210 1211 1212 1213
	} else {
		/* Have access to pixelcount since start of frame.
		 * We can split this into vertical and horizontal
		 * scanout position.
		 */
1214
		position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
1215

1216 1217 1218 1219
		/* convert to pixel counts */
		vbl_start *= htotal;
		vbl_end *= htotal;
		vtotal *= htotal;
1220

1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232
		/*
		 * In interlaced modes, the pixel counter counts all pixels,
		 * so one field will have htotal more pixels. In order to avoid
		 * the reported position from jumping backwards when the pixel
		 * counter is beyond the length of the shorter field, just
		 * clamp the position the length of the shorter field. This
		 * matches how the scanline counter based position works since
		 * the scanline counter doesn't count the two half lines.
		 */
		if (position >= vtotal)
			position = vtotal - 1;

1233 1234 1235 1236 1237 1238 1239 1240 1241 1242
		/*
		 * Start of vblank interrupt is triggered at start of hsync,
		 * just prior to the first active line of vblank. However we
		 * consider lines to start at the leading edge of horizontal
		 * active. So, should we get here before we've crossed into
		 * the horizontal active of the first line in vblank, we would
		 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
		 * always add htotal-hsync_start to the current pixel position.
		 */
		position = (position + htotal - hsync_start) % vtotal;
1243 1244
	}

1245 1246 1247 1248 1249 1250 1251 1252
	/* Get optional system timestamp after query. */
	if (etime)
		*etime = ktime_get();

	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */

	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);

1253 1254 1255 1256 1257 1258 1259 1260 1261 1262
	/*
	 * While in vblank, position will be negative
	 * counting up towards 0 at vbl_end. And outside
	 * vblank, position will be positive counting
	 * up since vbl_end.
	 */
	if (position >= vbl_start)
		position -= vbl_end;
	else
		position += vtotal - vbl_end;
1263

1264
	if (use_scanline_counter) {
1265 1266 1267 1268 1269 1270
		*vpos = position;
		*hpos = 0;
	} else {
		*vpos = position / htotal;
		*hpos = position - (*vpos * htotal);
	}
1271

1272
	return true;
1273 1274
}

1275 1276
int intel_get_crtc_scanline(struct intel_crtc *crtc)
{
1277
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1278 1279 1280 1281 1282 1283 1284 1285 1286 1287
	unsigned long irqflags;
	int position;

	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
	position = __intel_get_crtc_scanline(crtc);
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);

	return position;
}

1288
static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
1289
{
1290
	struct intel_uncore *uncore = &dev_priv->uncore;
1291
	u32 busy_up, busy_down, max_avg, min_avg;
1292 1293
	u8 new_delay;

1294
	spin_lock(&mchdev_lock);
1295

1296 1297 1298
	intel_uncore_write16(uncore,
			     MEMINTRSTS,
			     intel_uncore_read(uncore, MEMINTRSTS));
1299

1300
	new_delay = dev_priv->ips.cur_delay;
1301

1302 1303 1304 1305 1306
	intel_uncore_write16(uncore, MEMINTRSTS, MEMINT_EVAL_CHG);
	busy_up = intel_uncore_read(uncore, RCPREVBSYTUPAVG);
	busy_down = intel_uncore_read(uncore, RCPREVBSYTDNAVG);
	max_avg = intel_uncore_read(uncore, RCBMAXAVG);
	min_avg = intel_uncore_read(uncore, RCBMINAVG);
1307 1308

	/* Handle RCS change request from hw */
1309
	if (busy_up > max_avg) {
1310 1311 1312 1313
		if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
			new_delay = dev_priv->ips.cur_delay - 1;
		if (new_delay < dev_priv->ips.max_delay)
			new_delay = dev_priv->ips.max_delay;
1314
	} else if (busy_down < min_avg) {
1315 1316 1317 1318
		if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
			new_delay = dev_priv->ips.cur_delay + 1;
		if (new_delay > dev_priv->ips.min_delay)
			new_delay = dev_priv->ips.min_delay;
1319 1320
	}

1321
	if (ironlake_set_drps(dev_priv, new_delay))
1322
		dev_priv->ips.cur_delay = new_delay;
1323

1324
	spin_unlock(&mchdev_lock);
1325

1326 1327 1328
	return;
}

1329 1330
static void vlv_c0_read(struct drm_i915_private *dev_priv,
			struct intel_rps_ei *ei)
1331
{
1332
	ei->ktime = ktime_get_raw();
1333 1334 1335
	ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
	ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
}
1336

1337
void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
1338
{
1339
	memset(&dev_priv->gt_pm.rps.ei, 0, sizeof(dev_priv->gt_pm.rps.ei));
1340
}
1341

1342 1343
static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
{
1344 1345
	struct intel_rps *rps = &dev_priv->gt_pm.rps;
	const struct intel_rps_ei *prev = &rps->ei;
1346 1347
	struct intel_rps_ei now;
	u32 events = 0;
1348

1349
	if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
1350
		return 0;
1351

1352
	vlv_c0_read(dev_priv, &now);
1353

1354
	if (prev->ktime) {
1355
		u64 time, c0;
1356
		u32 render, media;
1357

1358
		time = ktime_us_delta(now.ktime, prev->ktime);
1359

1360 1361 1362 1363 1364 1365 1366
		time *= dev_priv->czclk_freq;

		/* Workload can be split between render + media,
		 * e.g. SwapBuffers being blitted in X after being rendered in
		 * mesa. To account for this we need to combine both engines
		 * into our activity counter.
		 */
1367 1368 1369
		render = now.render_c0 - prev->render_c0;
		media = now.media_c0 - prev->media_c0;
		c0 = max(render, media);
1370
		c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */
1371

C
Chris Wilson 已提交
1372
		if (c0 > time * rps->power.up_threshold)
1373
			events = GEN6_PM_RP_UP_THRESHOLD;
C
Chris Wilson 已提交
1374
		else if (c0 < time * rps->power.down_threshold)
1375
			events = GEN6_PM_RP_DOWN_THRESHOLD;
1376 1377
	}

1378
	rps->ei = now;
1379
	return events;
1380 1381
}

1382
static void gen6_pm_rps_work(struct work_struct *work)
1383
{
1384
	struct drm_i915_private *dev_priv =
1385 1386
		container_of(work, struct drm_i915_private, gt_pm.rps.work);
	struct intel_rps *rps = &dev_priv->gt_pm.rps;
1387
	bool client_boost = false;
1388
	int new_delay, adj, min, max;
1389
	u32 pm_iir = 0;
1390

1391
	spin_lock_irq(&dev_priv->irq_lock);
1392 1393 1394
	if (rps->interrupts_enabled) {
		pm_iir = fetch_and_zero(&rps->pm_iir);
		client_boost = atomic_read(&rps->num_waiters);
I
Imre Deak 已提交
1395
	}
1396
	spin_unlock_irq(&dev_priv->irq_lock);
1397

1398
	/* Make sure we didn't queue anything we're not going to process. */
1399
	WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1400
	if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
1401
		goto out;
1402

1403
	mutex_lock(&rps->lock);
1404

1405 1406
	pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);

1407 1408 1409 1410
	adj = rps->last_adj;
	new_delay = rps->cur_freq;
	min = rps->min_freq_softlimit;
	max = rps->max_freq_softlimit;
1411
	if (client_boost)
1412 1413 1414
		max = rps->max_freq;
	if (client_boost && new_delay < rps->boost_freq) {
		new_delay = rps->boost_freq;
1415 1416
		adj = 0;
	} else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1417 1418
		if (adj > 0)
			adj *= 2;
1419 1420
		else /* CHV needs even encode values */
			adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
1421

1422
		if (new_delay >= rps->max_freq_softlimit)
1423
			adj = 0;
1424
	} else if (client_boost) {
1425
		adj = 0;
1426
	} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1427 1428 1429 1430
		if (rps->cur_freq > rps->efficient_freq)
			new_delay = rps->efficient_freq;
		else if (rps->cur_freq > rps->min_freq_softlimit)
			new_delay = rps->min_freq_softlimit;
1431 1432 1433 1434
		adj = 0;
	} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
		if (adj < 0)
			adj *= 2;
1435 1436
		else /* CHV needs even encode values */
			adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
1437

1438
		if (new_delay <= rps->min_freq_softlimit)
1439
			adj = 0;
1440
	} else { /* unknown event */
1441
		adj = 0;
1442
	}
1443

1444
	rps->last_adj = adj;
1445

C
Chris Wilson 已提交
1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457
	/*
	 * Limit deboosting and boosting to keep ourselves at the extremes
	 * when in the respective power modes (i.e. slowly decrease frequencies
	 * while in the HIGH_POWER zone and slowly increase frequencies while
	 * in the LOW_POWER zone). On idle, we will hit the timeout and drop
	 * to the next level quickly, and conversely if busy we expect to
	 * hit a waitboost and rapidly switch into max power.
	 */
	if ((adj < 0 && rps->power.mode == HIGH_POWER) ||
	    (adj > 0 && rps->power.mode == LOW_POWER))
		rps->last_adj = 0;

1458 1459 1460
	/* sysfs frequency interfaces may have snuck in while servicing the
	 * interrupt
	 */
1461
	new_delay += adj;
1462
	new_delay = clamp_t(int, new_delay, min, max);
1463

1464 1465
	if (intel_set_rps(dev_priv, new_delay)) {
		DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n");
1466
		rps->last_adj = 0;
1467
	}
1468

1469
	mutex_unlock(&rps->lock);
1470 1471 1472 1473

out:
	/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
	spin_lock_irq(&dev_priv->irq_lock);
1474
	if (rps->interrupts_enabled)
1475
		gen6_unmask_pm_irq(&dev_priv->gt, dev_priv->pm_rps_events);
1476
	spin_unlock_irq(&dev_priv->irq_lock);
1477 1478
}

1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490

/**
 * ivybridge_parity_work - Workqueue called when a parity error interrupt
 * occurred.
 * @work: workqueue struct
 *
 * Doesn't actually do anything except notify userspace. As a consequence of
 * this event, userspace should try to remap the bad rows since statistically
 * it is likely the same row is more likely to go bad again.
 */
static void ivybridge_parity_work(struct work_struct *work)
{
1491
	struct drm_i915_private *dev_priv =
1492
		container_of(work, typeof(*dev_priv), l3_parity.error_work);
1493
	u32 error_status, row, bank, subbank;
1494
	char *parity_event[6];
1495 1496
	u32 misccpctl;
	u8 slice = 0;
1497 1498 1499 1500 1501

	/* We must turn off DOP level clock gating to access the L3 registers.
	 * In order to prevent a get/put style interface, acquire struct mutex
	 * any time we access those registers.
	 */
1502
	mutex_lock(&dev_priv->drm.struct_mutex);
1503

1504 1505 1506 1507
	/* If we've screwed up tracking, just let the interrupt fire again */
	if (WARN_ON(!dev_priv->l3_parity.which_slice))
		goto out;

1508 1509 1510 1511
	misccpctl = I915_READ(GEN7_MISCCPCTL);
	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
	POSTING_READ(GEN7_MISCCPCTL);

1512
	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1513
		i915_reg_t reg;
1514

1515
		slice--;
1516
		if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv)))
1517
			break;
1518

1519
		dev_priv->l3_parity.which_slice &= ~(1<<slice);
1520

1521
		reg = GEN7_L3CDERRST1(slice);
1522

1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537
		error_status = I915_READ(reg);
		row = GEN7_PARITY_ERROR_ROW(error_status);
		bank = GEN7_PARITY_ERROR_BANK(error_status);
		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);

		I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
		POSTING_READ(reg);

		parity_event[0] = I915_L3_PARITY_UEVENT "=1";
		parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
		parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
		parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
		parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
		parity_event[5] = NULL;

1538
		kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
1539
				   KOBJ_CHANGE, parity_event);
1540

1541 1542
		DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
			  slice, row, bank, subbank);
1543

1544 1545 1546 1547 1548
		kfree(parity_event[4]);
		kfree(parity_event[3]);
		kfree(parity_event[2]);
		kfree(parity_event[1]);
	}
1549

1550
	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1551

1552 1553
out:
	WARN_ON(dev_priv->l3_parity.which_slice);
1554
	spin_lock_irq(&dev_priv->irq_lock);
1555
	gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1556
	spin_unlock_irq(&dev_priv->irq_lock);
1557

1558
	mutex_unlock(&dev_priv->drm.struct_mutex);
1559 1560
}

1561 1562
static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv,
					       u32 iir)
1563
{
1564
	if (!HAS_L3_DPF(dev_priv))
1565 1566
		return;

1567
	spin_lock(&dev_priv->irq_lock);
1568
	gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1569
	spin_unlock(&dev_priv->irq_lock);
1570

1571
	iir &= GT_PARITY_ERROR(dev_priv);
1572 1573 1574 1575 1576 1577
	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
		dev_priv->l3_parity.which_slice |= 1 << 1;

	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
		dev_priv->l3_parity.which_slice |= 1 << 0;

1578
	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1579 1580
}

1581
static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
1582 1583
			       u32 gt_iir)
{
1584
	if (gt_iir & GT_RENDER_USER_INTERRUPT)
1585
		intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
1586
	if (gt_iir & ILK_BSD_USER_INTERRUPT)
1587
		intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
1588 1589
}

1590
static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
1591 1592
			       u32 gt_iir)
{
1593
	if (gt_iir & GT_RENDER_USER_INTERRUPT)
1594
		intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
1595
	if (gt_iir & GT_BSD_USER_INTERRUPT)
1596
		intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
1597
	if (gt_iir & GT_BLT_USER_INTERRUPT)
1598
		intel_engine_breadcrumbs_irq(dev_priv->engine[BCS0]);
1599

1600 1601
	if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
		      GT_BSD_CS_ERROR_INTERRUPT |
1602 1603
		      GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
		DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
1604

1605 1606
	if (gt_iir & GT_PARITY_ERROR(dev_priv))
		ivybridge_parity_error_irq_handler(dev_priv, gt_iir);
1607 1608
}

1609
static void
1610
gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
1611
{
1612
	bool tasklet = false;
1613

C
Chris Wilson 已提交
1614 1615
	if (iir & GT_CONTEXT_SWITCH_INTERRUPT)
		tasklet = true;
1616

1617
	if (iir & GT_RENDER_USER_INTERRUPT) {
1618
		intel_engine_breadcrumbs_irq(engine);
1619
		tasklet |= intel_engine_needs_breadcrumb_tasklet(engine);
1620 1621 1622
	}

	if (tasklet)
C
Chris Wilson 已提交
1623
		tasklet_hi_schedule(&engine->execlists.tasklet);
1624 1625
}

1626
static void gen8_gt_irq_ack(struct drm_i915_private *i915,
1627
			    u32 master_ctl, u32 gt_iir[4])
1628
{
1629
	void __iomem * const regs = i915->uncore.regs;
1630

1631 1632
#define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \
		      GEN8_GT_BCS_IRQ | \
1633
		      GEN8_GT_VCS0_IRQ | \
1634 1635 1636 1637 1638
		      GEN8_GT_VCS1_IRQ | \
		      GEN8_GT_VECS_IRQ | \
		      GEN8_GT_PM_IRQ | \
		      GEN8_GT_GUC_IRQ)

1639
	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1640 1641 1642
		gt_iir[0] = raw_reg_read(regs, GEN8_GT_IIR(0));
		if (likely(gt_iir[0]))
			raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]);
1643 1644
	}

1645
	if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
1646 1647 1648
		gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1));
		if (likely(gt_iir[1]))
			raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]);
1649 1650
	}

1651 1652
	if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
		gt_iir[2] = raw_reg_read(regs, GEN8_GT_IIR(2));
1653 1654
		if (likely(gt_iir[2]))
			raw_reg_write(regs, GEN8_GT_IIR(2), gt_iir[2]);
1655 1656
	}

1657 1658 1659 1660
	if (master_ctl & GEN8_GT_VECS_IRQ) {
		gt_iir[3] = raw_reg_read(regs, GEN8_GT_IIR(3));
		if (likely(gt_iir[3]))
			raw_reg_write(regs, GEN8_GT_IIR(3), gt_iir[3]);
1661
	}
1662 1663
}

1664
static void gen8_gt_irq_handler(struct drm_i915_private *i915,
1665
				u32 master_ctl, u32 gt_iir[4])
1666
{
1667
	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1668
		gen8_cs_irq_handler(i915->engine[RCS0],
1669
				    gt_iir[0] >> GEN8_RCS_IRQ_SHIFT);
1670
		gen8_cs_irq_handler(i915->engine[BCS0],
1671
				    gt_iir[0] >> GEN8_BCS_IRQ_SHIFT);
1672 1673
	}

1674 1675 1676 1677
	if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
		gen8_cs_irq_handler(i915->engine[VCS0],
				    gt_iir[1] >> GEN8_VCS0_IRQ_SHIFT);
		gen8_cs_irq_handler(i915->engine[VCS1],
1678
				    gt_iir[1] >> GEN8_VCS1_IRQ_SHIFT);
1679 1680
	}

1681
	if (master_ctl & GEN8_GT_VECS_IRQ) {
1682
		gen8_cs_irq_handler(i915->engine[VECS0],
1683
				    gt_iir[3] >> GEN8_VECS_IRQ_SHIFT);
1684
	}
1685

1686
	if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
1687
		gen6_rps_irq_handler(i915, gt_iir[2]);
1688
		guc_irq_handler(&i915->gt.uc.guc, gt_iir[2] >> 16);
1689
	}
1690 1691
}

1692
static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1693
{
1694 1695
	switch (pin) {
	case HPD_PORT_C:
1696
		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1);
1697
	case HPD_PORT_D:
1698
		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2);
1699
	case HPD_PORT_E:
1700
		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3);
1701
	case HPD_PORT_F:
1702 1703 1704 1705 1706 1707
		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4);
	default:
		return false;
	}
}

1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727
static bool gen12_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
{
	switch (pin) {
	case HPD_PORT_D:
		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1);
	case HPD_PORT_E:
		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2);
	case HPD_PORT_F:
		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3);
	case HPD_PORT_G:
		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4);
	case HPD_PORT_H:
		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC5);
	case HPD_PORT_I:
		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC6);
	default:
		return false;
	}
}

1728
static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1729
{
1730 1731
	switch (pin) {
	case HPD_PORT_A:
1732
		return val & PORTA_HOTPLUG_LONG_DETECT;
1733
	case HPD_PORT_B:
1734
		return val & PORTB_HOTPLUG_LONG_DETECT;
1735
	case HPD_PORT_C:
1736 1737 1738 1739 1740 1741
		return val & PORTC_HOTPLUG_LONG_DETECT;
	default:
		return false;
	}
}

1742
static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1743
{
1744 1745
	switch (pin) {
	case HPD_PORT_A:
1746
		return val & ICP_DDIA_HPD_LONG_DETECT;
1747
	case HPD_PORT_B:
1748
		return val & ICP_DDIB_HPD_LONG_DETECT;
1749 1750
	case HPD_PORT_C:
		return val & TGP_DDIC_HPD_LONG_DETECT;
1751 1752 1753 1754 1755
	default:
		return false;
	}
}

1756
static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1757
{
1758 1759
	switch (pin) {
	case HPD_PORT_C:
1760
		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1);
1761
	case HPD_PORT_D:
1762
		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2);
1763
	case HPD_PORT_E:
1764
		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3);
1765
	case HPD_PORT_F:
1766 1767 1768 1769 1770 1771
		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4);
	default:
		return false;
	}
}

1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805
static bool tgp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
{
	switch (pin) {
	case HPD_PORT_A:
		return val & ICP_DDIA_HPD_LONG_DETECT;
	case HPD_PORT_B:
		return val & ICP_DDIB_HPD_LONG_DETECT;
	case HPD_PORT_C:
		return val & TGP_DDIC_HPD_LONG_DETECT;
	default:
		return false;
	}
}

static bool tgp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
{
	switch (pin) {
	case HPD_PORT_D:
		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1);
	case HPD_PORT_E:
		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2);
	case HPD_PORT_F:
		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3);
	case HPD_PORT_G:
		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4);
	case HPD_PORT_H:
		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC5);
	case HPD_PORT_I:
		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC6);
	default:
		return false;
	}
}

1806
static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val)
1807
{
1808 1809
	switch (pin) {
	case HPD_PORT_E:
1810 1811 1812 1813 1814 1815
		return val & PORTE_HOTPLUG_LONG_DETECT;
	default:
		return false;
	}
}

1816
static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1817
{
1818 1819
	switch (pin) {
	case HPD_PORT_A:
1820
		return val & PORTA_HOTPLUG_LONG_DETECT;
1821
	case HPD_PORT_B:
1822
		return val & PORTB_HOTPLUG_LONG_DETECT;
1823
	case HPD_PORT_C:
1824
		return val & PORTC_HOTPLUG_LONG_DETECT;
1825
	case HPD_PORT_D:
1826 1827 1828 1829 1830 1831
		return val & PORTD_HOTPLUG_LONG_DETECT;
	default:
		return false;
	}
}

1832
static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1833
{
1834 1835
	switch (pin) {
	case HPD_PORT_A:
1836 1837 1838 1839 1840 1841
		return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
	default:
		return false;
	}
}

1842
static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1843
{
1844 1845
	switch (pin) {
	case HPD_PORT_B:
1846
		return val & PORTB_HOTPLUG_LONG_DETECT;
1847
	case HPD_PORT_C:
1848
		return val & PORTC_HOTPLUG_LONG_DETECT;
1849
	case HPD_PORT_D:
1850 1851 1852
		return val & PORTD_HOTPLUG_LONG_DETECT;
	default:
		return false;
1853 1854 1855
	}
}

1856
static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1857
{
1858 1859
	switch (pin) {
	case HPD_PORT_B:
1860
		return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1861
	case HPD_PORT_C:
1862
		return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1863
	case HPD_PORT_D:
1864 1865 1866
		return val & PORTD_HOTPLUG_INT_LONG_PULSE;
	default:
		return false;
1867 1868 1869
	}
}

1870 1871 1872 1873 1874 1875 1876
/*
 * Get a bit mask of pins that have triggered, and which ones may be long.
 * This can be called multiple times with the same masks to accumulate
 * hotplug detection results from several registers.
 *
 * Note that the caller is expected to zero out the masks initially.
 */
1877 1878 1879 1880
static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
			       u32 *pin_mask, u32 *long_mask,
			       u32 hotplug_trigger, u32 dig_hotplug_reg,
			       const u32 hpd[HPD_NUM_PINS],
1881
			       bool long_pulse_detect(enum hpd_pin pin, u32 val))
1882
{
1883
	enum hpd_pin pin;
1884

1885 1886
	BUILD_BUG_ON(BITS_PER_TYPE(*pin_mask) < HPD_NUM_PINS);

1887 1888
	for_each_hpd_pin(pin) {
		if ((hpd[pin] & hotplug_trigger) == 0)
1889
			continue;
1890

1891
		*pin_mask |= BIT(pin);
1892

1893
		if (long_pulse_detect(pin, dig_hotplug_reg))
1894
			*long_mask |= BIT(pin);
1895 1896
	}

1897 1898
	DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
			 hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
1899 1900 1901

}

1902
static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
1903
{
1904
	wake_up_all(&dev_priv->gmbus_wait_queue);
1905 1906
}

1907
static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
1908
{
1909
	wake_up_all(&dev_priv->gmbus_wait_queue);
1910 1911
}

1912
#if defined(CONFIG_DEBUG_FS)
1913 1914
static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
					 enum pipe pipe,
1915 1916 1917
					 u32 crc0, u32 crc1,
					 u32 crc2, u32 crc3,
					 u32 crc4)
1918 1919
{
	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
T
Tomeu Vizoso 已提交
1920
	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
1921 1922 1923
	u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };

	trace_intel_pipe_crc(crtc, crcs);
1924

1925
	spin_lock(&pipe_crc->lock);
1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936
	/*
	 * For some not yet identified reason, the first CRC is
	 * bonkers. So let's just wait for the next vblank and read
	 * out the buggy result.
	 *
	 * On GEN8+ sometimes the second CRC is bonkers as well, so
	 * don't trust that one either.
	 */
	if (pipe_crc->skipped <= 0 ||
	    (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
		pipe_crc->skipped++;
T
Tomeu Vizoso 已提交
1937
		spin_unlock(&pipe_crc->lock);
1938
		return;
T
Tomeu Vizoso 已提交
1939
	}
1940 1941 1942 1943 1944
	spin_unlock(&pipe_crc->lock);

	drm_crtc_add_crc_entry(&crtc->base, true,
				drm_crtc_accurate_vblank_count(&crtc->base),
				crcs);
1945
}
1946 1947
#else
static inline void
1948 1949
display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
			     enum pipe pipe,
1950 1951 1952
			     u32 crc0, u32 crc1,
			     u32 crc2, u32 crc3,
			     u32 crc4) {}
1953 1954
#endif

1955

1956 1957
static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
				     enum pipe pipe)
D
Daniel Vetter 已提交
1958
{
1959
	display_pipe_crc_irq_handler(dev_priv, pipe,
1960 1961
				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
				     0, 0, 0, 0);
D
Daniel Vetter 已提交
1962 1963
}

1964 1965
static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
				     enum pipe pipe)
1966
{
1967
	display_pipe_crc_irq_handler(dev_priv, pipe,
1968 1969 1970 1971 1972
				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
				     I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
				     I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
				     I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
				     I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1973
}
1974

1975 1976
static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
				      enum pipe pipe)
1977
{
1978
	u32 res1, res2;
1979

1980
	if (INTEL_GEN(dev_priv) >= 3)
1981 1982 1983 1984
		res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
	else
		res1 = 0;

1985
	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1986 1987 1988
		res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
	else
		res2 = 0;
1989

1990
	display_pipe_crc_irq_handler(dev_priv, pipe,
1991 1992 1993 1994
				     I915_READ(PIPE_CRC_RES_RED(pipe)),
				     I915_READ(PIPE_CRC_RES_GREEN(pipe)),
				     I915_READ(PIPE_CRC_RES_BLUE(pipe)),
				     res1, res2);
1995
}
1996

1997 1998 1999
/* The RPS events need forcewake, so we add them to a work queue and mask their
 * IMR bits until the work is done. Other interrupts can be processed without
 * the work queue. */
2000
static void gen11_rps_irq_handler(struct intel_gt *gt, u32 pm_iir)
2001
{
2002
	struct drm_i915_private *i915 = gt->i915;
2003 2004 2005 2006 2007 2008 2009 2010
	struct intel_rps *rps = &i915->gt_pm.rps;
	const u32 events = i915->pm_rps_events & pm_iir;

	lockdep_assert_held(&i915->irq_lock);

	if (unlikely(!events))
		return;

2011
	gen6_mask_pm_irq(gt, events);
2012 2013 2014 2015 2016 2017 2018 2019

	if (!rps->interrupts_enabled)
		return;

	rps->pm_iir |= events;
	schedule_work(&rps->work);
}

2020
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
2021
{
2022 2023
	struct intel_rps *rps = &dev_priv->gt_pm.rps;

2024
	if (pm_iir & dev_priv->pm_rps_events) {
2025
		spin_lock(&dev_priv->irq_lock);
2026 2027
		gen6_mask_pm_irq(&dev_priv->gt,
				 pm_iir & dev_priv->pm_rps_events);
2028 2029 2030
		if (rps->interrupts_enabled) {
			rps->pm_iir |= pm_iir & dev_priv->pm_rps_events;
			schedule_work(&rps->work);
I
Imre Deak 已提交
2031
		}
2032
		spin_unlock(&dev_priv->irq_lock);
2033 2034
	}

2035
	if (INTEL_GEN(dev_priv) >= 8)
2036 2037
		return;

2038
	if (pm_iir & PM_VEBOX_USER_INTERRUPT)
2039
		intel_engine_breadcrumbs_irq(dev_priv->engine[VECS0]);
B
Ben Widawsky 已提交
2040

2041 2042
	if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
		DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
2043 2044
}

2045
static void guc_irq_handler(struct intel_guc *guc, u16 iir)
2046
{
2047 2048
	if (iir & GUC_INTR_GUC2HOST)
		intel_guc_to_host_event_handler(guc);
2049 2050
}

2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063
static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
{
	enum pipe pipe;

	for_each_pipe(dev_priv, pipe) {
		I915_WRITE(PIPESTAT(pipe),
			   PIPESTAT_INT_STATUS_MASK |
			   PIPE_FIFO_UNDERRUN_STATUS);

		dev_priv->pipestat_irq_mask[pipe] = 0;
	}
}

2064 2065
static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
				  u32 iir, u32 pipe_stats[I915_MAX_PIPES])
2066 2067 2068
{
	int pipe;

2069
	spin_lock(&dev_priv->irq_lock);
2070 2071 2072 2073 2074 2075

	if (!dev_priv->display_irqs_enabled) {
		spin_unlock(&dev_priv->irq_lock);
		return;
	}

2076
	for_each_pipe(dev_priv, pipe) {
2077
		i915_reg_t reg;
2078
		u32 status_mask, enable_mask, iir_bit = 0;
2079

2080 2081 2082 2083 2084 2085 2086
		/*
		 * PIPESTAT bits get signalled even when the interrupt is
		 * disabled with the mask bits, and some of the status bits do
		 * not generate interrupts at all (like the underrun bit). Hence
		 * we need to be careful that we only handle what we want to
		 * handle.
		 */
2087 2088

		/* fifo underruns are filterered in the underrun handler. */
2089
		status_mask = PIPE_FIFO_UNDERRUN_STATUS;
2090 2091 2092 2093 2094 2095 2096 2097

		switch (pipe) {
		case PIPE_A:
			iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
			break;
		case PIPE_B:
			iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
			break;
2098 2099 2100
		case PIPE_C:
			iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
			break;
2101 2102
		}
		if (iir & iir_bit)
2103
			status_mask |= dev_priv->pipestat_irq_mask[pipe];
2104

2105
		if (!status_mask)
2106 2107 2108
			continue;

		reg = PIPESTAT(pipe);
2109 2110
		pipe_stats[pipe] = I915_READ(reg) & status_mask;
		enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
2111 2112 2113

		/*
		 * Clear the PIPE*STAT regs before the IIR
2114 2115 2116 2117 2118 2119
		 *
		 * Toggle the enable bits to make sure we get an
		 * edge in the ISR pipe event bit if we don't clear
		 * all the enabled status bits. Otherwise the edge
		 * triggered IIR on i965/g4x wouldn't notice that
		 * an interrupt is still pending.
2120
		 */
2121 2122 2123 2124
		if (pipe_stats[pipe]) {
			I915_WRITE(reg, pipe_stats[pipe]);
			I915_WRITE(reg, enable_mask);
		}
2125
	}
2126
	spin_unlock(&dev_priv->irq_lock);
2127 2128
}

2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196
static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
				      u16 iir, u32 pipe_stats[I915_MAX_PIPES])
{
	enum pipe pipe;

	for_each_pipe(dev_priv, pipe) {
		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
			drm_handle_vblank(&dev_priv->drm, pipe);

		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
			i9xx_pipe_crc_irq_handler(dev_priv, pipe);

		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
	}
}

static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
{
	bool blc_event = false;
	enum pipe pipe;

	for_each_pipe(dev_priv, pipe) {
		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
			drm_handle_vblank(&dev_priv->drm, pipe);

		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
			blc_event = true;

		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
			i9xx_pipe_crc_irq_handler(dev_priv, pipe);

		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
	}

	if (blc_event || (iir & I915_ASLE_INTERRUPT))
		intel_opregion_asle_intr(dev_priv);
}

static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
{
	bool blc_event = false;
	enum pipe pipe;

	for_each_pipe(dev_priv, pipe) {
		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
			drm_handle_vblank(&dev_priv->drm, pipe);

		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
			blc_event = true;

		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
			i9xx_pipe_crc_irq_handler(dev_priv, pipe);

		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
	}

	if (blc_event || (iir & I915_ASLE_INTERRUPT))
		intel_opregion_asle_intr(dev_priv);

	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
		gmbus_irq_handler(dev_priv);
}

2197
static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
2198 2199 2200
					    u32 pipe_stats[I915_MAX_PIPES])
{
	enum pipe pipe;
2201

2202
	for_each_pipe(dev_priv, pipe) {
2203 2204
		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
			drm_handle_vblank(&dev_priv->drm, pipe);
2205 2206

		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
2207
			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2208

2209 2210
		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2211 2212 2213
	}

	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
2214
		gmbus_irq_handler(dev_priv);
2215 2216
}

2217
static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
2218
{
2219 2220 2221 2222 2223 2224 2225 2226 2227
	u32 hotplug_status = 0, hotplug_status_mask;
	int i;

	if (IS_G4X(dev_priv) ||
	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
		hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
			DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
	else
		hotplug_status_mask = HOTPLUG_INT_STATUS_I915;
2228

2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244
	/*
	 * We absolutely have to clear all the pending interrupt
	 * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port
	 * interrupt bit won't have an edge, and the i965/g4x
	 * edge triggered IIR will not notice that an interrupt
	 * is still pending. We can't use PORT_HOTPLUG_EN to
	 * guarantee the edge as the act of toggling the enable
	 * bits can itself generate a new hotplug interrupt :(
	 */
	for (i = 0; i < 10; i++) {
		u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask;

		if (tmp == 0)
			return hotplug_status;

		hotplug_status |= tmp;
2245
		I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2246 2247 2248 2249 2250
	}

	WARN_ONCE(1,
		  "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
		  I915_READ(PORT_HOTPLUG_STAT));
2251

2252 2253 2254
	return hotplug_status;
}

2255
static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
2256 2257 2258
				 u32 hotplug_status)
{
	u32 pin_mask = 0, long_mask = 0;
2259

2260 2261
	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
	    IS_CHERRYVIEW(dev_priv)) {
2262
		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
2263

2264
		if (hotplug_trigger) {
2265 2266 2267
			intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
					   hotplug_trigger, hotplug_trigger,
					   hpd_status_g4x,
2268 2269
					   i9xx_port_hotplug_long_detect);

2270
			intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2271
		}
2272 2273

		if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
2274
			dp_aux_irq_handler(dev_priv);
2275 2276
	} else {
		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
2277

2278
		if (hotplug_trigger) {
2279 2280 2281
			intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
					   hotplug_trigger, hotplug_trigger,
					   hpd_status_i915,
2282
					   i9xx_port_hotplug_long_detect);
2283
			intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2284
		}
2285
	}
2286 2287
}

2288
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
J
Jesse Barnes 已提交
2289
{
2290
	struct drm_i915_private *dev_priv = arg;
J
Jesse Barnes 已提交
2291 2292
	irqreturn_t ret = IRQ_NONE;

2293 2294 2295
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

2296
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2297
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2298

2299
	do {
2300
		u32 iir, gt_iir, pm_iir;
2301
		u32 pipe_stats[I915_MAX_PIPES] = {};
2302
		u32 hotplug_status = 0;
2303
		u32 ier = 0;
2304

J
Jesse Barnes 已提交
2305 2306
		gt_iir = I915_READ(GTIIR);
		pm_iir = I915_READ(GEN6_PMIIR);
2307
		iir = I915_READ(VLV_IIR);
J
Jesse Barnes 已提交
2308 2309

		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
2310
			break;
J
Jesse Barnes 已提交
2311 2312 2313

		ret = IRQ_HANDLED;

2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326
		/*
		 * Theory on interrupt generation, based on empirical evidence:
		 *
		 * x = ((VLV_IIR & VLV_IER) ||
		 *      (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
		 *       (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
		 *
		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
		 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
		 * guarantee the CPU interrupt will be raised again even if we
		 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
		 * bits this time around.
		 */
2327
		I915_WRITE(VLV_MASTER_IER, 0);
2328 2329
		ier = I915_READ(VLV_IER);
		I915_WRITE(VLV_IER, 0);
2330 2331 2332 2333 2334 2335

		if (gt_iir)
			I915_WRITE(GTIIR, gt_iir);
		if (pm_iir)
			I915_WRITE(GEN6_PMIIR, pm_iir);

2336
		if (iir & I915_DISPLAY_PORT_INTERRUPT)
2337
			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
2338

2339 2340
		/* Call regardless, as some status bits might not be
		 * signalled in iir */
2341
		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
2342

2343 2344 2345 2346
		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
			   I915_LPE_PIPE_B_INTERRUPT))
			intel_lpe_audio_irq_handler(dev_priv);

2347 2348 2349 2350 2351 2352
		/*
		 * VLV_IIR is single buffered, and reflects the level
		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
		 */
		if (iir)
			I915_WRITE(VLV_IIR, iir);
2353

2354
		I915_WRITE(VLV_IER, ier);
2355
		I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
2356

2357
		if (gt_iir)
2358
			snb_gt_irq_handler(dev_priv, gt_iir);
2359 2360 2361
		if (pm_iir)
			gen6_rps_irq_handler(dev_priv, pm_iir);

2362
		if (hotplug_status)
2363
			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
2364

2365
		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
2366
	} while (0);
J
Jesse Barnes 已提交
2367

2368
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2369

J
Jesse Barnes 已提交
2370 2371 2372
	return ret;
}

2373 2374
static irqreturn_t cherryview_irq_handler(int irq, void *arg)
{
2375
	struct drm_i915_private *dev_priv = arg;
2376 2377
	irqreturn_t ret = IRQ_NONE;

2378 2379 2380
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

2381
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2382
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2383

2384
	do {
2385
		u32 master_ctl, iir;
2386
		u32 pipe_stats[I915_MAX_PIPES] = {};
2387
		u32 hotplug_status = 0;
2388
		u32 gt_iir[4];
2389 2390
		u32 ier = 0;

2391 2392
		master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
		iir = I915_READ(VLV_IIR);
2393

2394 2395
		if (master_ctl == 0 && iir == 0)
			break;
2396

2397 2398
		ret = IRQ_HANDLED;

2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411
		/*
		 * Theory on interrupt generation, based on empirical evidence:
		 *
		 * x = ((VLV_IIR & VLV_IER) ||
		 *      ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
		 *       (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
		 *
		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
		 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
		 * guarantee the CPU interrupt will be raised again even if we
		 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
		 * bits this time around.
		 */
2412
		I915_WRITE(GEN8_MASTER_IRQ, 0);
2413 2414
		ier = I915_READ(VLV_IER);
		I915_WRITE(VLV_IER, 0);
2415

2416
		gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
2417

2418
		if (iir & I915_DISPLAY_PORT_INTERRUPT)
2419
			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
2420

2421 2422
		/* Call regardless, as some status bits might not be
		 * signalled in iir */
2423
		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
2424

2425 2426 2427 2428 2429
		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
			   I915_LPE_PIPE_B_INTERRUPT |
			   I915_LPE_PIPE_C_INTERRUPT))
			intel_lpe_audio_irq_handler(dev_priv);

2430 2431 2432 2433 2434 2435 2436
		/*
		 * VLV_IIR is single buffered, and reflects the level
		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
		 */
		if (iir)
			I915_WRITE(VLV_IIR, iir);

2437
		I915_WRITE(VLV_IER, ier);
2438
		I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2439

2440
		gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
2441

2442
		if (hotplug_status)
2443
			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
2444

2445
		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
2446
	} while (0);
2447

2448
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2449

2450 2451 2452
	return ret;
}

2453 2454
static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
				u32 hotplug_trigger,
2455 2456 2457 2458
				const u32 hpd[HPD_NUM_PINS])
{
	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;

2459 2460 2461 2462 2463 2464
	/*
	 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
	 * unless we touch the hotplug register, even if hotplug_trigger is
	 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
	 * errors.
	 */
2465
	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2466 2467 2468 2469 2470 2471 2472 2473
	if (!hotplug_trigger) {
		u32 mask = PORTA_HOTPLUG_STATUS_MASK |
			PORTD_HOTPLUG_STATUS_MASK |
			PORTC_HOTPLUG_STATUS_MASK |
			PORTB_HOTPLUG_STATUS_MASK;
		dig_hotplug_reg &= ~mask;
	}

2474
	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2475 2476
	if (!hotplug_trigger)
		return;
2477

2478
	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
2479 2480 2481
			   dig_hotplug_reg, hpd,
			   pch_port_hotplug_long_detect);

2482
	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2483 2484
}

2485
static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2486
{
2487
	int pipe;
2488
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
2489

2490
	ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
2491

2492 2493 2494
	if (pch_iir & SDE_AUDIO_POWER_MASK) {
		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
			       SDE_AUDIO_POWER_SHIFT);
2495
		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
2496 2497
				 port_name(port));
	}
2498

2499
	if (pch_iir & SDE_AUX_MASK)
2500
		dp_aux_irq_handler(dev_priv);
2501

2502
	if (pch_iir & SDE_GMBUS)
2503
		gmbus_irq_handler(dev_priv);
2504 2505 2506 2507 2508 2509 2510 2511 2512 2513

	if (pch_iir & SDE_AUDIO_HDCP_MASK)
		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");

	if (pch_iir & SDE_AUDIO_TRANS_MASK)
		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");

	if (pch_iir & SDE_POISON)
		DRM_ERROR("PCH poison interrupt\n");

2514
	if (pch_iir & SDE_FDI_MASK)
2515
		for_each_pipe(dev_priv, pipe)
2516 2517 2518
			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
					 pipe_name(pipe),
					 I915_READ(FDI_RX_IIR(pipe)));
2519 2520 2521 2522 2523 2524 2525 2526

	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");

	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");

	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
2527
		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
2528 2529

	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
2530
		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
2531 2532
}

2533
static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
2534 2535
{
	u32 err_int = I915_READ(GEN7_ERR_INT);
D
Daniel Vetter 已提交
2536
	enum pipe pipe;
2537

2538 2539 2540
	if (err_int & ERR_INT_POISON)
		DRM_ERROR("Poison interrupt\n");

2541
	for_each_pipe(dev_priv, pipe) {
2542 2543
		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2544

D
Daniel Vetter 已提交
2545
		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
2546 2547
			if (IS_IVYBRIDGE(dev_priv))
				ivb_pipe_crc_irq_handler(dev_priv, pipe);
D
Daniel Vetter 已提交
2548
			else
2549
				hsw_pipe_crc_irq_handler(dev_priv, pipe);
D
Daniel Vetter 已提交
2550 2551
		}
	}
2552

2553 2554 2555
	I915_WRITE(GEN7_ERR_INT, err_int);
}

2556
static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
2557 2558
{
	u32 serr_int = I915_READ(SERR_INT);
2559
	enum pipe pipe;
2560

2561 2562 2563
	if (serr_int & SERR_INT_POISON)
		DRM_ERROR("PCH poison interrupt\n");

2564 2565 2566
	for_each_pipe(dev_priv, pipe)
		if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
			intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
2567 2568

	I915_WRITE(SERR_INT, serr_int);
2569 2570
}

2571
static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2572 2573
{
	int pipe;
2574
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
2575

2576
	ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
2577

2578 2579 2580 2581 2582 2583
	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
			       SDE_AUDIO_POWER_SHIFT_CPT);
		DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
				 port_name(port));
	}
2584 2585

	if (pch_iir & SDE_AUX_MASK_CPT)
2586
		dp_aux_irq_handler(dev_priv);
2587 2588

	if (pch_iir & SDE_GMBUS_CPT)
2589
		gmbus_irq_handler(dev_priv);
2590 2591 2592 2593 2594 2595 2596 2597

	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
		DRM_DEBUG_DRIVER("Audio CP request interrupt\n");

	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
		DRM_DEBUG_DRIVER("Audio CP change interrupt\n");

	if (pch_iir & SDE_FDI_MASK_CPT)
2598
		for_each_pipe(dev_priv, pipe)
2599 2600 2601
			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
					 pipe_name(pipe),
					 I915_READ(FDI_RX_IIR(pipe)));
2602 2603

	if (pch_iir & SDE_ERROR_CPT)
2604
		cpt_serr_int_handler(dev_priv);
2605 2606
}

2607 2608
static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir,
			    const u32 *pins)
2609
{
2610 2611
	u32 ddi_hotplug_trigger;
	u32 tc_hotplug_trigger;
2612 2613
	u32 pin_mask = 0, long_mask = 0;

2614 2615 2616 2617 2618 2619 2620 2621
	if (HAS_PCH_MCC(dev_priv)) {
		ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP;
		tc_hotplug_trigger = 0;
	} else {
		ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
		tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP;
	}

2622 2623 2624 2625 2626 2627 2628 2629
	if (ddi_hotplug_trigger) {
		u32 dig_hotplug_reg;

		dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI);
		I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg);

		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
				   ddi_hotplug_trigger,
2630
				   dig_hotplug_reg, pins,
2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641
				   icp_ddi_port_hotplug_long_detect);
	}

	if (tc_hotplug_trigger) {
		u32 dig_hotplug_reg;

		dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC);
		I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg);

		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
				   tc_hotplug_trigger,
2642
				   dig_hotplug_reg, pins,
2643 2644 2645 2646 2647 2648 2649 2650 2651 2652
				   icp_tc_port_hotplug_long_detect);
	}

	if (pin_mask)
		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);

	if (pch_iir & SDE_GMBUS_ICP)
		gmbus_irq_handler(dev_priv);
}

2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689
static void tgp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
{
	u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP;
	u32 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_TGP;
	u32 pin_mask = 0, long_mask = 0;

	if (ddi_hotplug_trigger) {
		u32 dig_hotplug_reg;

		dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI);
		I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg);

		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
				   ddi_hotplug_trigger,
				   dig_hotplug_reg, hpd_tgp,
				   tgp_ddi_port_hotplug_long_detect);
	}

	if (tc_hotplug_trigger) {
		u32 dig_hotplug_reg;

		dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC);
		I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg);

		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
				   tc_hotplug_trigger,
				   dig_hotplug_reg, hpd_tgp,
				   tgp_tc_port_hotplug_long_detect);
	}

	if (pin_mask)
		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);

	if (pch_iir & SDE_GMBUS_ICP)
		gmbus_irq_handler(dev_priv);
}

2690
static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702
{
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
		~SDE_PORTE_HOTPLUG_SPT;
	u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
	u32 pin_mask = 0, long_mask = 0;

	if (hotplug_trigger) {
		u32 dig_hotplug_reg;

		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
		I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);

2703 2704
		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
				   hotplug_trigger, dig_hotplug_reg, hpd_spt,
2705
				   spt_port_hotplug_long_detect);
2706 2707 2708 2709 2710 2711 2712 2713
	}

	if (hotplug2_trigger) {
		u32 dig_hotplug_reg;

		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
		I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);

2714 2715
		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
				   hotplug2_trigger, dig_hotplug_reg, hpd_spt,
2716 2717 2718 2719
				   spt_port_hotplug2_long_detect);
	}

	if (pin_mask)
2720
		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2721 2722

	if (pch_iir & SDE_GMBUS_CPT)
2723
		gmbus_irq_handler(dev_priv);
2724 2725
}

2726 2727
static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
				u32 hotplug_trigger,
2728 2729 2730 2731 2732 2733 2734
				const u32 hpd[HPD_NUM_PINS])
{
	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;

	dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);

2735
	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
2736 2737 2738
			   dig_hotplug_reg, hpd,
			   ilk_port_hotplug_long_detect);

2739
	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2740 2741
}

2742 2743
static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
				    u32 de_iir)
2744
{
2745
	enum pipe pipe;
2746 2747
	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;

2748
	if (hotplug_trigger)
2749
		ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk);
2750 2751

	if (de_iir & DE_AUX_CHANNEL_A)
2752
		dp_aux_irq_handler(dev_priv);
2753 2754

	if (de_iir & DE_GSE)
2755
		intel_opregion_asle_intr(dev_priv);
2756 2757 2758 2759

	if (de_iir & DE_POISON)
		DRM_ERROR("Poison interrupt\n");

2760
	for_each_pipe(dev_priv, pipe) {
2761 2762
		if (de_iir & DE_PIPE_VBLANK(pipe))
			drm_handle_vblank(&dev_priv->drm, pipe);
2763

2764
		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2765
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2766

2767
		if (de_iir & DE_PIPE_CRC_DONE(pipe))
2768
			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2769 2770 2771 2772 2773 2774
	}

	/* check event from PCH */
	if (de_iir & DE_PCH_EVENT) {
		u32 pch_iir = I915_READ(SDEIIR);

2775 2776
		if (HAS_PCH_CPT(dev_priv))
			cpt_irq_handler(dev_priv, pch_iir);
2777
		else
2778
			ibx_irq_handler(dev_priv, pch_iir);
2779 2780 2781 2782 2783

		/* should clear PCH hotplug event before clear CPU irq */
		I915_WRITE(SDEIIR, pch_iir);
	}

2784
	if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT)
2785
		ironlake_rps_change_irq_handler(dev_priv);
2786 2787
}

2788 2789
static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
				    u32 de_iir)
2790
{
2791
	enum pipe pipe;
2792 2793
	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;

2794
	if (hotplug_trigger)
2795
		ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb);
2796 2797

	if (de_iir & DE_ERR_INT_IVB)
2798
		ivb_err_int_handler(dev_priv);
2799

2800 2801 2802 2803 2804 2805
	if (de_iir & DE_EDP_PSR_INT_HSW) {
		u32 psr_iir = I915_READ(EDP_PSR_IIR);

		intel_psr_irq_handler(dev_priv, psr_iir);
		I915_WRITE(EDP_PSR_IIR, psr_iir);
	}
2806

2807
	if (de_iir & DE_AUX_CHANNEL_A_IVB)
2808
		dp_aux_irq_handler(dev_priv);
2809 2810

	if (de_iir & DE_GSE_IVB)
2811
		intel_opregion_asle_intr(dev_priv);
2812

2813
	for_each_pipe(dev_priv, pipe) {
2814 2815
		if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
			drm_handle_vblank(&dev_priv->drm, pipe);
2816 2817 2818
	}

	/* check event from PCH */
2819
	if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
2820 2821
		u32 pch_iir = I915_READ(SDEIIR);

2822
		cpt_irq_handler(dev_priv, pch_iir);
2823 2824 2825 2826 2827 2828

		/* clear PCH hotplug event before clear CPU irq */
		I915_WRITE(SDEIIR, pch_iir);
	}
}

2829 2830 2831 2832 2833 2834 2835 2836
/*
 * To handle irqs with the minimum potential races with fresh interrupts, we:
 * 1 - Disable Master Interrupt Control.
 * 2 - Find the source(s) of the interrupt.
 * 3 - Clear the Interrupt Identity bits (IIR).
 * 4 - Process the interrupt(s) that had bits set in the IIRs.
 * 5 - Re-enable Master Interrupt Control.
 */
2837
static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2838
{
2839
	struct drm_i915_private *dev_priv = arg;
2840
	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2841
	irqreturn_t ret = IRQ_NONE;
2842

2843 2844 2845
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

2846
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2847
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2848

2849 2850 2851 2852
	/* disable master interrupt before clearing iir  */
	de_ier = I915_READ(DEIER);
	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);

2853 2854 2855 2856 2857
	/* Disable south interrupts. We'll only write to SDEIIR once, so further
	 * interrupts will will be stored on its back queue, and then we'll be
	 * able to process them after we restore SDEIER (as soon as we restore
	 * it, we'll get an interrupt if SDEIIR still has something to process
	 * due to its back queue). */
2858
	if (!HAS_PCH_NOP(dev_priv)) {
2859 2860 2861
		sde_ier = I915_READ(SDEIER);
		I915_WRITE(SDEIER, 0);
	}
2862

2863 2864
	/* Find, clear, then process each source of interrupt */

2865
	gt_iir = I915_READ(GTIIR);
2866
	if (gt_iir) {
2867 2868
		I915_WRITE(GTIIR, gt_iir);
		ret = IRQ_HANDLED;
2869
		if (INTEL_GEN(dev_priv) >= 6)
2870
			snb_gt_irq_handler(dev_priv, gt_iir);
2871
		else
2872
			ilk_gt_irq_handler(dev_priv, gt_iir);
2873 2874
	}

2875 2876
	de_iir = I915_READ(DEIIR);
	if (de_iir) {
2877 2878
		I915_WRITE(DEIIR, de_iir);
		ret = IRQ_HANDLED;
2879 2880
		if (INTEL_GEN(dev_priv) >= 7)
			ivb_display_irq_handler(dev_priv, de_iir);
2881
		else
2882
			ilk_display_irq_handler(dev_priv, de_iir);
2883 2884
	}

2885
	if (INTEL_GEN(dev_priv) >= 6) {
2886 2887 2888 2889
		u32 pm_iir = I915_READ(GEN6_PMIIR);
		if (pm_iir) {
			I915_WRITE(GEN6_PMIIR, pm_iir);
			ret = IRQ_HANDLED;
2890
			gen6_rps_irq_handler(dev_priv, pm_iir);
2891
		}
2892
	}
2893 2894

	I915_WRITE(DEIER, de_ier);
2895
	if (!HAS_PCH_NOP(dev_priv))
2896
		I915_WRITE(SDEIER, sde_ier);
2897

2898
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2899
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2900

2901 2902 2903
	return ret;
}

2904 2905
static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
				u32 hotplug_trigger,
2906
				const u32 hpd[HPD_NUM_PINS])
2907
{
2908
	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2909

2910 2911
	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2912

2913
	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
2914
			   dig_hotplug_reg, hpd,
2915
			   bxt_port_hotplug_long_detect);
2916

2917
	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2918 2919
}

2920 2921 2922
static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
{
	u32 pin_mask = 0, long_mask = 0;
2923 2924
	u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
	u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
2925 2926 2927 2928 2929 2930 2931 2932 2933 2934
	long_pulse_detect_func long_pulse_detect;
	const u32 *hpd;

	if (INTEL_GEN(dev_priv) >= 12) {
		long_pulse_detect = gen12_port_hotplug_long_detect;
		hpd = hpd_gen12;
	} else {
		long_pulse_detect = gen11_port_hotplug_long_detect;
		hpd = hpd_gen11;
	}
2935 2936

	if (trigger_tc) {
2937 2938
		u32 dig_hotplug_reg;

2939 2940 2941 2942
		dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL);
		I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);

		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tc,
2943
				   dig_hotplug_reg, hpd, long_pulse_detect);
2944 2945 2946 2947 2948 2949 2950 2951 2952
	}

	if (trigger_tbt) {
		u32 dig_hotplug_reg;

		dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL);
		I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);

		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tbt,
2953
				   dig_hotplug_reg, hpd, long_pulse_detect);
2954 2955 2956
	}

	if (pin_mask)
2957
		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2958
	else
2959 2960 2961
		DRM_ERROR("Unexpected DE HPD interrupt 0x%08x\n", iir);
}

2962 2963
static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
{
2964
	u32 mask;
2965

2966 2967 2968 2969 2970 2971 2972
	if (INTEL_GEN(dev_priv) >= 12)
		/* TODO: Add AUX entries for USBC */
		return TGL_DE_PORT_AUX_DDIA |
			TGL_DE_PORT_AUX_DDIB |
			TGL_DE_PORT_AUX_DDIC;

	mask = GEN8_AUX_CHANNEL_A;
2973 2974 2975 2976 2977
	if (INTEL_GEN(dev_priv) >= 9)
		mask |= GEN9_AUX_CHANNEL_B |
			GEN9_AUX_CHANNEL_C |
			GEN9_AUX_CHANNEL_D;

2978
	if (IS_CNL_WITH_PORT_F(dev_priv) || IS_GEN(dev_priv, 11))
2979 2980
		mask |= CNL_AUX_CHANNEL_F;

2981 2982
	if (IS_GEN(dev_priv, 11))
		mask |= ICL_AUX_CHANNEL_E;
2983 2984 2985 2986

	return mask;
}

2987 2988 2989 2990 2991 2992 2993 2994
static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
{
	if (INTEL_GEN(dev_priv) >= 9)
		return GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
	else
		return GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
}

2995 2996
static irqreturn_t
gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2997 2998
{
	irqreturn_t ret = IRQ_NONE;
2999
	u32 iir;
3000
	enum pipe pipe;
J
Jesse Barnes 已提交
3001

3002
	if (master_ctl & GEN8_DE_MISC_IRQ) {
3003 3004
		iir = I915_READ(GEN8_DE_MISC_IIR);
		if (iir) {
3005 3006
			bool found = false;

3007
			I915_WRITE(GEN8_DE_MISC_IIR, iir);
3008
			ret = IRQ_HANDLED;
3009 3010

			if (iir & GEN8_DE_MISC_GSE) {
3011
				intel_opregion_asle_intr(dev_priv);
3012 3013 3014 3015
				found = true;
			}

			if (iir & GEN8_DE_EDP_PSR) {
3016 3017 3018 3019
				u32 psr_iir = I915_READ(EDP_PSR_IIR);

				intel_psr_irq_handler(dev_priv, psr_iir);
				I915_WRITE(EDP_PSR_IIR, psr_iir);
3020 3021 3022 3023
				found = true;
			}

			if (!found)
3024
				DRM_ERROR("Unexpected DE Misc interrupt\n");
3025
		}
3026 3027
		else
			DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
3028 3029
	}

3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040
	if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
		iir = I915_READ(GEN11_DE_HPD_IIR);
		if (iir) {
			I915_WRITE(GEN11_DE_HPD_IIR, iir);
			ret = IRQ_HANDLED;
			gen11_hpd_irq_handler(dev_priv, iir);
		} else {
			DRM_ERROR("The master control interrupt lied, (DE HPD)!\n");
		}
	}

3041
	if (master_ctl & GEN8_DE_PORT_IRQ) {
3042 3043 3044
		iir = I915_READ(GEN8_DE_PORT_IIR);
		if (iir) {
			u32 tmp_mask;
3045
			bool found = false;
3046

3047
			I915_WRITE(GEN8_DE_PORT_IIR, iir);
3048
			ret = IRQ_HANDLED;
J
Jesse Barnes 已提交
3049

3050
			if (iir & gen8_de_port_aux_mask(dev_priv)) {
3051
				dp_aux_irq_handler(dev_priv);
3052 3053 3054
				found = true;
			}

3055
			if (IS_GEN9_LP(dev_priv)) {
3056 3057
				tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
				if (tmp_mask) {
3058 3059
					bxt_hpd_irq_handler(dev_priv, tmp_mask,
							    hpd_bxt);
3060 3061 3062 3063 3064
					found = true;
				}
			} else if (IS_BROADWELL(dev_priv)) {
				tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
				if (tmp_mask) {
3065 3066
					ilk_hpd_irq_handler(dev_priv,
							    tmp_mask, hpd_bdw);
3067 3068
					found = true;
				}
3069 3070
			}

3071
			if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
3072
				gmbus_irq_handler(dev_priv);
S
Shashank Sharma 已提交
3073 3074 3075
				found = true;
			}

3076
			if (!found)
3077
				DRM_ERROR("Unexpected DE Port interrupt\n");
3078
		}
3079 3080
		else
			DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
3081 3082
	}

3083
	for_each_pipe(dev_priv, pipe) {
3084
		u32 fault_errors;
3085

3086 3087
		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
			continue;
3088

3089 3090 3091 3092 3093
		iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
		if (!iir) {
			DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
			continue;
		}
3094

3095 3096
		ret = IRQ_HANDLED;
		I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
3097

3098 3099
		if (iir & GEN8_PIPE_VBLANK)
			drm_handle_vblank(&dev_priv->drm, pipe);
3100

3101
		if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
3102
			hsw_pipe_crc_irq_handler(dev_priv, pipe);
3103

3104 3105
		if (iir & GEN8_PIPE_FIFO_UNDERRUN)
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
3106

3107
		fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv);
3108
		if (fault_errors)
3109
			DRM_ERROR("Fault errors on pipe %c: 0x%08x\n",
3110 3111
				  pipe_name(pipe),
				  fault_errors);
3112 3113
	}

3114
	if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
3115
	    master_ctl & GEN8_DE_PCH_IRQ) {
3116 3117 3118 3119 3120
		/*
		 * FIXME(BDW): Assume for now that the new interrupt handling
		 * scheme also closed the SDE interrupt handling race we've seen
		 * on older pch-split platforms. But this needs testing.
		 */
3121 3122 3123
		iir = I915_READ(SDEIIR);
		if (iir) {
			I915_WRITE(SDEIIR, iir);
3124
			ret = IRQ_HANDLED;
3125

3126 3127 3128
			if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP)
				tgp_irq_handler(dev_priv, iir);
			else if (INTEL_PCH_TYPE(dev_priv) >= PCH_MCC)
3129 3130 3131
				icp_irq_handler(dev_priv, iir, hpd_mcc);
			else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
				icp_irq_handler(dev_priv, iir, hpd_icp);
3132
			else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
3133
				spt_irq_handler(dev_priv, iir);
3134
			else
3135
				cpt_irq_handler(dev_priv, iir);
3136 3137 3138 3139 3140 3141 3142
		} else {
			/*
			 * Like on previous PCH there seems to be something
			 * fishy going on with forwarding PCH interrupts.
			 */
			DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
		}
3143 3144
	}

3145 3146 3147
	return ret;
}

3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165
static inline u32 gen8_master_intr_disable(void __iomem * const regs)
{
	raw_reg_write(regs, GEN8_MASTER_IRQ, 0);

	/*
	 * Now with master disabled, get a sample of level indications
	 * for this interrupt. Indications will be cleared on related acks.
	 * New indications can and will light up during processing,
	 * and will generate new interrupt after enabling master.
	 */
	return raw_reg_read(regs, GEN8_MASTER_IRQ);
}

static inline void gen8_master_intr_enable(void __iomem * const regs)
{
	raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
}

3166 3167
static irqreturn_t gen8_irq_handler(int irq, void *arg)
{
3168
	struct drm_i915_private *dev_priv = arg;
3169
	void __iomem * const regs = dev_priv->uncore.regs;
3170
	u32 master_ctl;
3171
	u32 gt_iir[4];
3172 3173 3174 3175

	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

3176 3177 3178
	master_ctl = gen8_master_intr_disable(regs);
	if (!master_ctl) {
		gen8_master_intr_enable(regs);
3179
		return IRQ_NONE;
3180
	}
3181 3182

	/* Find, clear, then process each source of interrupt */
3183
	gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
3184 3185 3186

	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
	if (master_ctl & ~GEN8_GT_IRQS) {
3187
		disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3188
		gen8_de_irq_handler(dev_priv, master_ctl);
3189
		enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3190
	}
3191

3192
	gen8_master_intr_enable(regs);
3193

3194
	gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
3195

3196
	return IRQ_HANDLED;
3197 3198
}

M
Mika Kuoppala 已提交
3199
static u32
3200
gen11_gt_engine_identity(struct intel_gt *gt,
3201
			 const unsigned int bank, const unsigned int bit)
M
Mika Kuoppala 已提交
3202
{
3203
	void __iomem * const regs = gt->uncore->regs;
M
Mika Kuoppala 已提交
3204 3205 3206
	u32 timeout_ts;
	u32 ident;

3207
	lockdep_assert_held(&gt->i915->irq_lock);
3208

M
Mika Kuoppala 已提交
3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229
	raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit));

	/*
	 * NB: Specs do not specify how long to spin wait,
	 * so we do ~100us as an educated guess.
	 */
	timeout_ts = (local_clock() >> 10) + 100;
	do {
		ident = raw_reg_read(regs, GEN11_INTR_IDENTITY_REG(bank));
	} while (!(ident & GEN11_INTR_DATA_VALID) &&
		 !time_after32(local_clock() >> 10, timeout_ts));

	if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) {
		DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n",
			  bank, bit, ident);
		return 0;
	}

	raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank),
		      GEN11_INTR_DATA_VALID);

3230 3231 3232 3233
	return ident;
}

static void
3234 3235
gen11_other_irq_handler(struct intel_gt *gt, const u8 instance,
			const u16 iir)
3236
{
3237
	if (instance == OTHER_GUC_INSTANCE)
3238
		return guc_irq_handler(&gt->uc.guc, iir);
3239

3240
	if (instance == OTHER_GTPM_INSTANCE)
3241
		return gen11_rps_irq_handler(gt, iir);
3242

3243 3244 3245 3246 3247
	WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
		  instance, iir);
}

static void
3248 3249
gen11_engine_irq_handler(struct intel_gt *gt, const u8 class,
			 const u8 instance, const u16 iir)
3250 3251 3252 3253
{
	struct intel_engine_cs *engine;

	if (instance <= MAX_ENGINE_INSTANCE)
3254
		engine = gt->engine_class[class][instance];
3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265
	else
		engine = NULL;

	if (likely(engine))
		return gen8_cs_irq_handler(engine, iir);

	WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n",
		  class, instance);
}

static void
3266
gen11_gt_identity_handler(struct intel_gt *gt, const u32 identity)
3267 3268 3269 3270 3271 3272 3273 3274 3275
{
	const u8 class = GEN11_INTR_ENGINE_CLASS(identity);
	const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity);
	const u16 intr = GEN11_INTR_ENGINE_INTR(identity);

	if (unlikely(!intr))
		return;

	if (class <= COPY_ENGINE_CLASS)
3276
		return gen11_engine_irq_handler(gt, class, instance, intr);
3277 3278

	if (class == OTHER_CLASS)
3279
		return gen11_other_irq_handler(gt, instance, intr);
3280 3281 3282

	WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n",
		  class, instance, intr);
M
Mika Kuoppala 已提交
3283 3284 3285
}

static void
3286
gen11_gt_bank_handler(struct intel_gt *gt, const unsigned int bank)
M
Mika Kuoppala 已提交
3287
{
3288
	void __iomem * const regs = gt->uncore->regs;
3289 3290
	unsigned long intr_dw;
	unsigned int bit;
M
Mika Kuoppala 已提交
3291

3292
	lockdep_assert_held(&gt->i915->irq_lock);
M
Mika Kuoppala 已提交
3293

3294
	intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
M
Mika Kuoppala 已提交
3295

3296
	for_each_set_bit(bit, &intr_dw, 32) {
3297
		const u32 ident = gen11_gt_engine_identity(gt, bank, bit);
M
Mika Kuoppala 已提交
3298

3299
		gen11_gt_identity_handler(gt, ident);
3300
	}
M
Mika Kuoppala 已提交
3301

3302 3303 3304
	/* Clear must be after shared has been served for engine */
	raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw);
}
M
Mika Kuoppala 已提交
3305

3306
static void
3307
gen11_gt_irq_handler(struct intel_gt *gt, const u32 master_ctl)
3308
{
3309
	struct drm_i915_private *i915 = gt->i915;
3310 3311 3312 3313 3314 3315
	unsigned int bank;

	spin_lock(&i915->irq_lock);

	for (bank = 0; bank < 2; bank++) {
		if (master_ctl & GEN11_GT_DW_IRQ(bank))
3316
			gen11_gt_bank_handler(gt, bank);
M
Mika Kuoppala 已提交
3317
	}
3318 3319

	spin_unlock(&i915->irq_lock);
M
Mika Kuoppala 已提交
3320 3321
}

3322
static u32
3323
gen11_gu_misc_irq_ack(struct intel_gt *gt, const u32 master_ctl)
3324
{
3325
	void __iomem * const regs = gt->uncore->regs;
3326
	u32 iir;
3327 3328

	if (!(master_ctl & GEN11_GU_MISC_IRQ))
3329 3330 3331 3332 3333
		return 0;

	iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
	if (likely(iir))
		raw_reg_write(regs, GEN11_GU_MISC_IIR, iir);
3334

3335
	return iir;
3336 3337 3338
}

static void
3339
gen11_gu_misc_irq_handler(struct intel_gt *gt, const u32 iir)
3340 3341
{
	if (iir & GEN11_GU_MISC_GSE)
3342
		intel_opregion_asle_intr(gt->i915);
3343 3344
}

3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362
static inline u32 gen11_master_intr_disable(void __iomem * const regs)
{
	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);

	/*
	 * Now with master disabled, get a sample of level indications
	 * for this interrupt. Indications will be cleared on related acks.
	 * New indications can and will light up during processing,
	 * and will generate new interrupt after enabling master.
	 */
	return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
}

static inline void gen11_master_intr_enable(void __iomem * const regs)
{
	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
}

M
Mika Kuoppala 已提交
3363 3364
static irqreturn_t gen11_irq_handler(int irq, void *arg)
{
3365
	struct drm_i915_private * const i915 = arg;
3366
	void __iomem * const regs = i915->uncore.regs;
3367
	struct intel_gt *gt = &i915->gt;
M
Mika Kuoppala 已提交
3368
	u32 master_ctl;
3369
	u32 gu_misc_iir;
M
Mika Kuoppala 已提交
3370 3371 3372 3373

	if (!intel_irqs_enabled(i915))
		return IRQ_NONE;

3374 3375 3376
	master_ctl = gen11_master_intr_disable(regs);
	if (!master_ctl) {
		gen11_master_intr_enable(regs);
M
Mika Kuoppala 已提交
3377
		return IRQ_NONE;
3378
	}
M
Mika Kuoppala 已提交
3379 3380

	/* Find, clear, then process each source of interrupt. */
3381
	gen11_gt_irq_handler(gt, master_ctl);
M
Mika Kuoppala 已提交
3382 3383 3384 3385 3386

	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
	if (master_ctl & GEN11_DISPLAY_IRQ) {
		const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);

3387
		disable_rpm_wakeref_asserts(&i915->runtime_pm);
M
Mika Kuoppala 已提交
3388 3389 3390 3391 3392
		/*
		 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
		 * for the display related bits.
		 */
		gen8_de_irq_handler(i915, disp_ctl);
3393
		enable_rpm_wakeref_asserts(&i915->runtime_pm);
M
Mika Kuoppala 已提交
3394 3395
	}

3396
	gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl);
3397

3398
	gen11_master_intr_enable(regs);
M
Mika Kuoppala 已提交
3399

3400
	gen11_gu_misc_irq_handler(gt, gu_misc_iir);
3401

M
Mika Kuoppala 已提交
3402 3403 3404
	return IRQ_HANDLED;
}

3405 3406 3407
/* Called from drm generic code, passed 'crtc' which
 * we use as a pipe index
 */
3408
int i8xx_enable_vblank(struct drm_crtc *crtc)
3409
{
3410 3411
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
3412
	unsigned long irqflags;
3413

3414
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3415
	i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
3416
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3417

3418 3419 3420
	return 0;
}

3421
int i945gm_enable_vblank(struct drm_crtc *crtc)
3422
{
3423
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
3424 3425 3426 3427

	if (dev_priv->i945gm_vblank.enabled++ == 0)
		schedule_work(&dev_priv->i945gm_vblank.work);

3428
	return i8xx_enable_vblank(crtc);
3429 3430
}

3431
int i965_enable_vblank(struct drm_crtc *crtc)
3432
{
3433 3434
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
3435 3436 3437
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3438 3439
	i915_enable_pipestat(dev_priv, pipe,
			     PIPE_START_VBLANK_INTERRUPT_STATUS);
3440 3441 3442 3443 3444
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);

	return 0;
}

3445
int ilk_enable_vblank(struct drm_crtc *crtc)
J
Jesse Barnes 已提交
3446
{
3447 3448
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
J
Jesse Barnes 已提交
3449
	unsigned long irqflags;
3450
	u32 bit = INTEL_GEN(dev_priv) >= 7 ?
3451
		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
J
Jesse Barnes 已提交
3452 3453

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3454
	ilk_enable_display_irq(dev_priv, bit);
J
Jesse Barnes 已提交
3455 3456
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);

3457 3458 3459 3460
	/* Even though there is no DMC, frame counter can get stuck when
	 * PSR is active as no frames are generated.
	 */
	if (HAS_PSR(dev_priv))
3461
		drm_crtc_vblank_restore(crtc);
3462

J
Jesse Barnes 已提交
3463 3464 3465
	return 0;
}

3466
int bdw_enable_vblank(struct drm_crtc *crtc)
3467
{
3468 3469
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
3470 3471 3472
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3473
	bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
3474
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3475

3476 3477 3478 3479
	/* Even if there is no DMC, frame counter can get stuck when
	 * PSR is active as no frames are generated, so check only for PSR.
	 */
	if (HAS_PSR(dev_priv))
3480
		drm_crtc_vblank_restore(crtc);
3481

3482 3483 3484
	return 0;
}

3485 3486 3487
/* Called from drm generic code, passed 'crtc' which
 * we use as a pipe index
 */
3488
void i8xx_disable_vblank(struct drm_crtc *crtc)
3489
{
3490 3491
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
3492
	unsigned long irqflags;
3493

3494
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3495
	i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
3496 3497 3498
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

3499
void i945gm_disable_vblank(struct drm_crtc *crtc)
3500
{
3501
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
3502

3503
	i8xx_disable_vblank(crtc);
3504 3505 3506 3507 3508

	if (--dev_priv->i945gm_vblank.enabled == 0)
		schedule_work(&dev_priv->i945gm_vblank.work);
}

3509
void i965_disable_vblank(struct drm_crtc *crtc)
3510
{
3511 3512
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
3513 3514 3515
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3516 3517
	i915_disable_pipestat(dev_priv, pipe,
			      PIPE_START_VBLANK_INTERRUPT_STATUS);
3518 3519 3520
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

3521
void ilk_disable_vblank(struct drm_crtc *crtc)
J
Jesse Barnes 已提交
3522
{
3523 3524
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
J
Jesse Barnes 已提交
3525
	unsigned long irqflags;
3526
	u32 bit = INTEL_GEN(dev_priv) >= 7 ?
3527
		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
J
Jesse Barnes 已提交
3528 3529

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3530
	ilk_disable_display_irq(dev_priv, bit);
J
Jesse Barnes 已提交
3531 3532 3533
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

3534
void bdw_disable_vblank(struct drm_crtc *crtc)
3535
{
3536 3537
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
3538 3539 3540
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3541
	bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
3542 3543 3544
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

3545
static void i945gm_vblank_work_func(struct work_struct *work)
3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598
{
	struct drm_i915_private *dev_priv =
		container_of(work, struct drm_i915_private, i945gm_vblank.work);

	/*
	 * Vblank interrupts fail to wake up the device from C3,
	 * hence we want to prevent C3 usage while vblank interrupts
	 * are enabled.
	 */
	pm_qos_update_request(&dev_priv->i945gm_vblank.pm_qos,
			      READ_ONCE(dev_priv->i945gm_vblank.enabled) ?
			      dev_priv->i945gm_vblank.c3_disable_latency :
			      PM_QOS_DEFAULT_VALUE);
}

static int cstate_disable_latency(const char *name)
{
	const struct cpuidle_driver *drv;
	int i;

	drv = cpuidle_get_driver();
	if (!drv)
		return 0;

	for (i = 0; i < drv->state_count; i++) {
		const struct cpuidle_state *state = &drv->states[i];

		if (!strcmp(state->name, name))
			return state->exit_latency ?
				state->exit_latency - 1 : 0;
	}

	return 0;
}

static void i945gm_vblank_work_init(struct drm_i915_private *dev_priv)
{
	INIT_WORK(&dev_priv->i945gm_vblank.work,
		  i945gm_vblank_work_func);

	dev_priv->i945gm_vblank.c3_disable_latency =
		cstate_disable_latency("C3");
	pm_qos_add_request(&dev_priv->i945gm_vblank.pm_qos,
			   PM_QOS_CPU_DMA_LATENCY,
			   PM_QOS_DEFAULT_VALUE);
}

static void i945gm_vblank_work_fini(struct drm_i915_private *dev_priv)
{
	cancel_work_sync(&dev_priv->i945gm_vblank.work);
	pm_qos_remove_request(&dev_priv->i945gm_vblank.pm_qos);
}

3599
static void ibx_irq_reset(struct drm_i915_private *dev_priv)
P
Paulo Zanoni 已提交
3600
{
3601 3602
	struct intel_uncore *uncore = &dev_priv->uncore;

3603
	if (HAS_PCH_NOP(dev_priv))
P
Paulo Zanoni 已提交
3604 3605
		return;

3606
	GEN3_IRQ_RESET(uncore, SDE);
3607

3608
	if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3609
		I915_WRITE(SERR_INT, 0xffffffff);
P
Paulo Zanoni 已提交
3610
}
3611

P
Paulo Zanoni 已提交
3612 3613 3614 3615 3616 3617 3618 3619
/*
 * SDEIER is also touched by the interrupt handler to work around missed PCH
 * interrupts. Hence we can't update it after the interrupt handler is enabled -
 * instead we unconditionally enable all PCH interrupt sources here, but then
 * only unmask them as needed with SDEIMR.
 *
 * This function needs to be called before interrupts are enabled.
 */
3620
static void ibx_irq_pre_postinstall(struct drm_i915_private *dev_priv)
P
Paulo Zanoni 已提交
3621
{
3622
	if (HAS_PCH_NOP(dev_priv))
P
Paulo Zanoni 已提交
3623 3624 3625
		return;

	WARN_ON(I915_READ(SDEIER) != 0);
P
Paulo Zanoni 已提交
3626 3627 3628 3629
	I915_WRITE(SDEIER, 0xffffffff);
	POSTING_READ(SDEIER);
}

3630
static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv)
3631
{
3632 3633 3634
	struct intel_uncore *uncore = &dev_priv->uncore;

	GEN3_IRQ_RESET(uncore, GT);
3635
	if (INTEL_GEN(dev_priv) >= 6)
3636
		GEN3_IRQ_RESET(uncore, GEN6_PM);
3637 3638
}

3639 3640
static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
{
3641 3642
	struct intel_uncore *uncore = &dev_priv->uncore;

3643
	if (IS_CHERRYVIEW(dev_priv))
3644
		intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3645
	else
3646
		intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK);
3647

3648
	i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
3649
	intel_uncore_write(uncore, PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3650

3651
	i9xx_pipestat_irq_reset(dev_priv);
3652

3653
	GEN3_IRQ_RESET(uncore, VLV_);
3654
	dev_priv->irq_mask = ~0u;
3655 3656
}

3657 3658
static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
{
3659 3660
	struct intel_uncore *uncore = &dev_priv->uncore;

3661
	u32 pipestat_mask;
3662
	u32 enable_mask;
3663 3664
	enum pipe pipe;

3665
	pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
3666 3667 3668 3669 3670

	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
	for_each_pipe(dev_priv, pipe)
		i915_enable_pipestat(dev_priv, pipe, pipestat_mask);

3671 3672
	enable_mask = I915_DISPLAY_PORT_INTERRUPT |
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3673 3674 3675 3676
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
		I915_LPE_PIPE_A_INTERRUPT |
		I915_LPE_PIPE_B_INTERRUPT;

3677
	if (IS_CHERRYVIEW(dev_priv))
3678 3679
		enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
			I915_LPE_PIPE_C_INTERRUPT;
3680

3681
	WARN_ON(dev_priv->irq_mask != ~0u);
3682

3683 3684
	dev_priv->irq_mask = ~enable_mask;

3685
	GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask);
3686 3687 3688 3689
}

/* drm_dma.h hooks
*/
3690
static void ironlake_irq_reset(struct drm_i915_private *dev_priv)
3691
{
3692
	struct intel_uncore *uncore = &dev_priv->uncore;
3693

3694
	GEN3_IRQ_RESET(uncore, DE);
3695
	if (IS_GEN(dev_priv, 7))
3696
		intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff);
3697

3698
	if (IS_HASWELL(dev_priv)) {
3699 3700
		intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
		intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
3701 3702
	}

3703
	gen5_gt_irq_reset(dev_priv);
3704

3705
	ibx_irq_reset(dev_priv);
3706 3707
}

3708
static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
J
Jesse Barnes 已提交
3709
{
3710 3711 3712
	I915_WRITE(VLV_MASTER_IER, 0);
	POSTING_READ(VLV_MASTER_IER);

3713
	gen5_gt_irq_reset(dev_priv);
J
Jesse Barnes 已提交
3714

3715
	spin_lock_irq(&dev_priv->irq_lock);
3716 3717
	if (dev_priv->display_irqs_enabled)
		vlv_display_irq_reset(dev_priv);
3718
	spin_unlock_irq(&dev_priv->irq_lock);
J
Jesse Barnes 已提交
3719 3720
}

3721 3722
static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
{
3723 3724 3725 3726 3727 3728
	struct intel_uncore *uncore = &dev_priv->uncore;

	GEN8_IRQ_RESET_NDX(uncore, GT, 0);
	GEN8_IRQ_RESET_NDX(uncore, GT, 1);
	GEN8_IRQ_RESET_NDX(uncore, GT, 2);
	GEN8_IRQ_RESET_NDX(uncore, GT, 3);
3729 3730
}

3731
static void gen8_irq_reset(struct drm_i915_private *dev_priv)
3732
{
3733
	struct intel_uncore *uncore = &dev_priv->uncore;
3734 3735
	int pipe;

3736
	gen8_master_intr_disable(dev_priv->uncore.regs);
3737

3738
	gen8_gt_irq_reset(dev_priv);
3739

3740 3741
	intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
	intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
3742

3743
	for_each_pipe(dev_priv, pipe)
3744 3745
		if (intel_display_power_is_enabled(dev_priv,
						   POWER_DOMAIN_PIPE(pipe)))
3746
			GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3747

3748 3749 3750
	GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
	GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3751

3752
	if (HAS_PCH_SPLIT(dev_priv))
3753
		ibx_irq_reset(dev_priv);
3754
}
3755

3756
static void gen11_gt_irq_reset(struct intel_gt *gt)
M
Mika Kuoppala 已提交
3757
{
3758
	struct intel_uncore *uncore = gt->uncore;
3759

M
Mika Kuoppala 已提交
3760
	/* Disable RCS, BCS, VCS and VECS class engines. */
3761 3762
	intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, 0);
	intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE,	  0);
M
Mika Kuoppala 已提交
3763 3764

	/* Restore masks irqs on RCS, BCS, VCS and VECS engines. */
3765 3766 3767 3768 3769 3770 3771 3772 3773 3774
	intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK,	~0);
	intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK,	~0);
	intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK,	~0);
	intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK,	~0);
	intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK,	~0);

	intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
	intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK,  ~0);
	intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
	intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK,  ~0);
M
Mika Kuoppala 已提交
3775 3776
}

3777
static void gen11_irq_reset(struct drm_i915_private *dev_priv)
M
Mika Kuoppala 已提交
3778
{
3779
	struct intel_uncore *uncore = &dev_priv->uncore;
M
Mika Kuoppala 已提交
3780 3781
	int pipe;

3782
	gen11_master_intr_disable(dev_priv->uncore.regs);
M
Mika Kuoppala 已提交
3783

3784
	gen11_gt_irq_reset(&dev_priv->gt);
M
Mika Kuoppala 已提交
3785

3786
	intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0);
M
Mika Kuoppala 已提交
3787

3788 3789
	intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
	intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
3790

M
Mika Kuoppala 已提交
3791 3792 3793
	for_each_pipe(dev_priv, pipe)
		if (intel_display_power_is_enabled(dev_priv,
						   POWER_DOMAIN_PIPE(pipe)))
3794
			GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
M
Mika Kuoppala 已提交
3795

3796 3797 3798 3799 3800
	GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
	GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
	GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_);
	GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3801

3802
	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3803
		GEN3_IRQ_RESET(uncore, SDE);
M
Mika Kuoppala 已提交
3804 3805
}

3806
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3807
				     u8 pipe_mask)
3808
{
3809 3810
	struct intel_uncore *uncore = &dev_priv->uncore;

3811
	u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3812
	enum pipe pipe;
3813

3814
	spin_lock_irq(&dev_priv->irq_lock);
3815 3816 3817 3818 3819 3820

	if (!intel_irqs_enabled(dev_priv)) {
		spin_unlock_irq(&dev_priv->irq_lock);
		return;
	}

3821
	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3822
		GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
3823 3824
				  dev_priv->de_irq_mask[pipe],
				  ~dev_priv->de_irq_mask[pipe] | extra_ier);
3825

3826
	spin_unlock_irq(&dev_priv->irq_lock);
3827 3828
}

3829
void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
3830
				     u8 pipe_mask)
3831
{
3832
	struct intel_uncore *uncore = &dev_priv->uncore;
3833 3834
	enum pipe pipe;

3835
	spin_lock_irq(&dev_priv->irq_lock);
3836 3837 3838 3839 3840 3841

	if (!intel_irqs_enabled(dev_priv)) {
		spin_unlock_irq(&dev_priv->irq_lock);
		return;
	}

3842
	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3843
		GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3844

3845 3846 3847
	spin_unlock_irq(&dev_priv->irq_lock);

	/* make sure we're done processing display irqs */
3848
	intel_synchronize_irq(dev_priv);
3849 3850
}

3851
static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
3852
{
3853
	struct intel_uncore *uncore = &dev_priv->uncore;
3854 3855 3856 3857

	I915_WRITE(GEN8_MASTER_IRQ, 0);
	POSTING_READ(GEN8_MASTER_IRQ);

3858
	gen8_gt_irq_reset(dev_priv);
3859

3860
	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3861

3862
	spin_lock_irq(&dev_priv->irq_lock);
3863 3864
	if (dev_priv->display_irqs_enabled)
		vlv_display_irq_reset(dev_priv);
3865
	spin_unlock_irq(&dev_priv->irq_lock);
3866 3867
}

3868
static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
3869 3870 3871 3872 3873
				  const u32 hpd[HPD_NUM_PINS])
{
	struct intel_encoder *encoder;
	u32 enabled_irqs = 0;

3874
	for_each_intel_encoder(&dev_priv->drm, encoder)
3875 3876 3877 3878 3879 3880
		if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
			enabled_irqs |= hpd[encoder->hpd_pin];

	return enabled_irqs;
}

3881
static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
3882
{
3883
	u32 hotplug;
3884 3885 3886

	/*
	 * Enable digital hotplug on the PCH, and configure the DP short pulse
3887 3888
	 * duration to 2ms (which is the minimum in the Display Port spec).
	 * The pulse duration bits are reserved on LPT+.
3889
	 */
3890
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3891 3892 3893
	hotplug &= ~(PORTB_PULSE_DURATION_MASK |
		     PORTC_PULSE_DURATION_MASK |
		     PORTD_PULSE_DURATION_MASK);
3894
	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3895 3896
	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3897 3898 3899 3900
	/*
	 * When CPU and PCH are on the same package, port A
	 * HPD must be enabled in both north and south.
	 */
3901
	if (HAS_PCH_LPT_LP(dev_priv))
3902
		hotplug |= PORTA_HOTPLUG_ENABLE;
3903
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3904
}
X
Xiong Zhang 已提交
3905

3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922
static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug_irqs, enabled_irqs;

	if (HAS_PCH_IBX(dev_priv)) {
		hotplug_irqs = SDE_HOTPLUG_MASK;
		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
	} else {
		hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
	}

	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);

	ibx_hpd_detection_setup(dev_priv);
}

3923 3924 3925
static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv,
				    u32 ddi_hotplug_enable_mask,
				    u32 tc_hotplug_enable_mask)
3926 3927 3928 3929
{
	u32 hotplug;

	hotplug = I915_READ(SHOTPLUG_CTL_DDI);
3930
	hotplug |= ddi_hotplug_enable_mask;
3931 3932
	I915_WRITE(SHOTPLUG_CTL_DDI, hotplug);

3933 3934 3935 3936 3937
	if (tc_hotplug_enable_mask) {
		hotplug = I915_READ(SHOTPLUG_CTL_TC);
		hotplug |= tc_hotplug_enable_mask;
		I915_WRITE(SHOTPLUG_CTL_TC, hotplug);
	}
3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948
}

static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug_irqs, enabled_irqs;

	hotplug_irqs = SDE_DDI_MASK_ICP | SDE_TC_MASK_ICP;
	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_icp);

	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);

3949 3950 3951 3952
	icp_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK,
				ICP_TC_HPD_ENABLE_MASK);
}

3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964
static void mcc_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug_irqs, enabled_irqs;

	hotplug_irqs = SDE_DDI_MASK_TGP;
	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_mcc);

	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);

	icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK, 0);
}

3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975
static void tgp_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug_irqs, enabled_irqs;

	hotplug_irqs = SDE_DDI_MASK_TGP | SDE_TC_MASK_TGP;
	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_tgp);

	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);

	icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK,
				TGP_TC_HPD_ENABLE_MASK);
3976 3977
}

3978 3979 3980 3981 3982 3983 3984 3985 3986 3987
static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug;

	hotplug = I915_READ(GEN11_TC_HOTPLUG_CTL);
	hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
	I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug);
3988 3989 3990 3991 3992 3993 3994

	hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL);
	hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
	I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug);
3995 3996 3997 3998 3999
}

static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug_irqs, enabled_irqs;
4000
	const u32 *hpd;
4001 4002
	u32 val;

4003 4004
	hpd = INTEL_GEN(dev_priv) >= 12 ? hpd_gen12 : hpd_gen11;
	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd);
4005
	hotplug_irqs = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK;
4006 4007 4008 4009 4010 4011 4012

	val = I915_READ(GEN11_DE_HPD_IMR);
	val &= ~hotplug_irqs;
	I915_WRITE(GEN11_DE_HPD_IMR, val);
	POSTING_READ(GEN11_DE_HPD_IMR);

	gen11_hpd_detection_setup(dev_priv);
4013

4014 4015 4016
	if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP)
		tgp_hpd_irq_setup(dev_priv);
	else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
4017
		icp_hpd_irq_setup(dev_priv);
4018 4019
}

4020
static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
4021
{
4022 4023 4024 4025 4026 4027 4028 4029 4030
	u32 val, hotplug;

	/* Display WA #1179 WaHardHangonHotPlug: cnp */
	if (HAS_PCH_CNP(dev_priv)) {
		val = I915_READ(SOUTH_CHICKEN1);
		val &= ~CHASSIS_CLK_REQ_DURATION_MASK;
		val |= CHASSIS_CLK_REQ_DURATION(0xf);
		I915_WRITE(SOUTH_CHICKEN1, val);
	}
4031 4032 4033

	/* Enable digital hotplug on the PCH */
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
4034 4035 4036 4037
	hotplug |= PORTA_HOTPLUG_ENABLE |
		   PORTB_HOTPLUG_ENABLE |
		   PORTC_HOTPLUG_ENABLE |
		   PORTD_HOTPLUG_ENABLE;
4038 4039 4040 4041 4042
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);

	hotplug = I915_READ(PCH_PORT_HOTPLUG2);
	hotplug |= PORTE_HOTPLUG_ENABLE;
	I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
4043 4044
}

4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056
static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug_irqs, enabled_irqs;

	hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);

	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);

	spt_hpd_detection_setup(dev_priv);
}

4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072
static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug;

	/*
	 * Enable digital hotplug on the CPU, and configure the DP short pulse
	 * duration to 2ms (which is the minimum in the Display Port spec)
	 * The pulse duration bits are reserved on HSW+.
	 */
	hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
	hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
	hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE |
		   DIGITAL_PORTA_PULSE_DURATION_2ms;
	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
}

4073
static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
4074
{
4075
	u32 hotplug_irqs, enabled_irqs;
4076

4077
	if (INTEL_GEN(dev_priv) >= 8) {
4078
		hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
4079
		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw);
4080 4081

		bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
4082
	} else if (INTEL_GEN(dev_priv) >= 7) {
4083
		hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
4084
		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb);
4085 4086

		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
4087 4088
	} else {
		hotplug_irqs = DE_DP_A_HOTPLUG;
4089
		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk);
4090

4091 4092
		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
	}
4093

4094
	ilk_hpd_detection_setup(dev_priv);
4095

4096
	ibx_hpd_irq_setup(dev_priv);
4097 4098
}

4099 4100
static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv,
				      u32 enabled_irqs)
4101
{
4102
	u32 hotplug;
4103

4104
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
4105 4106 4107
	hotplug |= PORTA_HOTPLUG_ENABLE |
		   PORTB_HOTPLUG_ENABLE |
		   PORTC_HOTPLUG_ENABLE;
4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126

	DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n",
		      hotplug, enabled_irqs);
	hotplug &= ~BXT_DDI_HPD_INVERT_MASK;

	/*
	 * For BXT invert bit has to be set based on AOB design
	 * for HPD detection logic, update it based on VBT fields.
	 */
	if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
		hotplug |= BXT_DDIA_HPD_INVERT;
	if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) &&
	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_B))
		hotplug |= BXT_DDIB_HPD_INVERT;
	if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) &&
	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_C))
		hotplug |= BXT_DDIC_HPD_INVERT;

4127
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
4128 4129
}

4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146
static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
	__bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK);
}

static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug_irqs, enabled_irqs;

	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
	hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;

	bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);

	__bxt_hpd_detection_setup(dev_priv, enabled_irqs);
}

4147
static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
P
Paulo Zanoni 已提交
4148
{
4149
	u32 mask;
4150

4151
	if (HAS_PCH_NOP(dev_priv))
D
Daniel Vetter 已提交
4152 4153
		return;

4154
	if (HAS_PCH_IBX(dev_priv))
4155
		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
4156
	else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
4157
		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
4158 4159
	else
		mask = SDE_GMBUS_CPT;
4160

4161
	gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
P
Paulo Zanoni 已提交
4162
	I915_WRITE(SDEIMR, ~mask);
4163 4164 4165

	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
	    HAS_PCH_LPT(dev_priv))
4166
		ibx_hpd_detection_setup(dev_priv);
4167 4168
	else
		spt_hpd_detection_setup(dev_priv);
P
Paulo Zanoni 已提交
4169 4170
}

4171
static void gen5_gt_irq_postinstall(struct drm_i915_private *dev_priv)
4172
{
4173
	struct intel_uncore *uncore = &dev_priv->uncore;
4174 4175 4176 4177 4178
	u32 pm_irqs, gt_irqs;

	pm_irqs = gt_irqs = 0;

	dev_priv->gt_irq_mask = ~0;
4179
	if (HAS_L3_DPF(dev_priv)) {
4180
		/* L3 parity interrupt is always unmasked. */
4181 4182
		dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv);
		gt_irqs |= GT_PARITY_ERROR(dev_priv);
4183 4184 4185
	}

	gt_irqs |= GT_RENDER_USER_INTERRUPT;
4186
	if (IS_GEN(dev_priv, 5)) {
4187
		gt_irqs |= ILK_BSD_USER_INTERRUPT;
4188 4189 4190 4191
	} else {
		gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
	}

4192
	GEN3_IRQ_INIT(uncore, GT, dev_priv->gt_irq_mask, gt_irqs);
4193

4194
	if (INTEL_GEN(dev_priv) >= 6) {
4195 4196 4197 4198
		/*
		 * RPS interrupts will get enabled/disabled on demand when RPS
		 * itself is enabled/disabled.
		 */
4199
		if (HAS_ENGINE(dev_priv, VECS0)) {
4200
			pm_irqs |= PM_VEBOX_USER_INTERRUPT;
4201
			dev_priv->gt.pm_ier |= PM_VEBOX_USER_INTERRUPT;
4202
		}
4203

4204 4205
		dev_priv->gt.pm_imr = 0xffffffff;
		GEN3_IRQ_INIT(uncore, GEN6_PM, dev_priv->gt.pm_imr, pm_irqs);
4206 4207 4208
	}
}

4209
static void ironlake_irq_postinstall(struct drm_i915_private *dev_priv)
4210
{
4211
	struct intel_uncore *uncore = &dev_priv->uncore;
4212 4213
	u32 display_mask, extra_mask;

4214
	if (INTEL_GEN(dev_priv) >= 7) {
4215
		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
4216
				DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
4217
		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
4218 4219
			      DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
			      DE_DP_A_HOTPLUG_IVB);
4220 4221
	} else {
		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
4222 4223
				DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
				DE_PIPEA_CRC_DONE | DE_POISON);
4224 4225 4226
		extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
			      DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
			      DE_DP_A_HOTPLUG);
4227
	}
4228

4229
	if (IS_HASWELL(dev_priv)) {
4230
		gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
4231
		intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
4232 4233 4234
		display_mask |= DE_EDP_PSR_INT_HSW;
	}

4235
	dev_priv->irq_mask = ~display_mask;
4236

4237
	ibx_irq_pre_postinstall(dev_priv);
P
Paulo Zanoni 已提交
4238

4239 4240
	GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
		      display_mask | extra_mask);
4241

4242
	gen5_gt_irq_postinstall(dev_priv);
4243

4244 4245
	ilk_hpd_detection_setup(dev_priv);

4246
	ibx_irq_postinstall(dev_priv);
4247

4248
	if (IS_IRONLAKE_M(dev_priv)) {
4249 4250 4251
		/* Enable PCU event interrupts
		 *
		 * spinlocking not required here for correctness since interrupt
4252 4253
		 * setup is guaranteed to run in single-threaded context. But we
		 * need it to make the assert_spin_locked happy. */
4254
		spin_lock_irq(&dev_priv->irq_lock);
4255
		ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
4256
		spin_unlock_irq(&dev_priv->irq_lock);
4257
	}
4258 4259
}

4260 4261
void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
{
4262
	lockdep_assert_held(&dev_priv->irq_lock);
4263 4264 4265 4266 4267 4268

	if (dev_priv->display_irqs_enabled)
		return;

	dev_priv->display_irqs_enabled = true;

4269 4270
	if (intel_irqs_enabled(dev_priv)) {
		vlv_display_irq_reset(dev_priv);
4271
		vlv_display_irq_postinstall(dev_priv);
4272
	}
4273 4274 4275 4276
}

void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
{
4277
	lockdep_assert_held(&dev_priv->irq_lock);
4278 4279 4280 4281 4282 4283

	if (!dev_priv->display_irqs_enabled)
		return;

	dev_priv->display_irqs_enabled = false;

4284
	if (intel_irqs_enabled(dev_priv))
4285
		vlv_display_irq_reset(dev_priv);
4286 4287
}

4288

4289
static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
4290
{
4291
	gen5_gt_irq_postinstall(dev_priv);
J
Jesse Barnes 已提交
4292

4293
	spin_lock_irq(&dev_priv->irq_lock);
4294 4295
	if (dev_priv->display_irqs_enabled)
		vlv_display_irq_postinstall(dev_priv);
4296 4297
	spin_unlock_irq(&dev_priv->irq_lock);

J
Jesse Barnes 已提交
4298
	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
4299
	POSTING_READ(VLV_MASTER_IER);
4300 4301
}

4302
static void gen8_gt_irq_postinstall(struct drm_i915_private *i915)
4303
{
4304 4305
	struct intel_gt *gt = &i915->gt;
	struct intel_uncore *uncore = gt->uncore;
4306

4307
	/* These are interrupts we'll toggle with the ring mask register */
4308
	u32 gt_interrupts[] = {
4309 4310 4311 4312 4313 4314 4315 4316 4317 4318
		(GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
		 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT),

		(GT_RENDER_USER_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
		 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT),

4319
		0,
4320 4321 4322 4323

		(GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT)
	};
4324

4325 4326
	gt->pm_ier = 0x0;
	gt->pm_imr = ~gt->pm_ier;
4327 4328
	GEN8_IRQ_INIT_NDX(uncore, GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
	GEN8_IRQ_INIT_NDX(uncore, GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
4329 4330
	/*
	 * RPS interrupts will get enabled/disabled on demand when RPS itself
4331
	 * is enabled/disabled. Same wil be the case for GuC interrupts.
4332
	 */
4333
	GEN8_IRQ_INIT_NDX(uncore, GT, 2, gt->pm_imr, gt->pm_ier);
4334
	GEN8_IRQ_INIT_NDX(uncore, GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
4335 4336 4337 4338
}

static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
{
4339 4340
	struct intel_uncore *uncore = &dev_priv->uncore;

4341 4342
	u32 de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
	u32 de_pipe_enables;
4343 4344
	u32 de_port_masked = GEN8_AUX_CHANNEL_A;
	u32 de_port_enables;
4345
	u32 de_misc_masked = GEN8_DE_EDP_PSR;
4346
	enum pipe pipe;
4347

4348 4349 4350
	if (INTEL_GEN(dev_priv) <= 10)
		de_misc_masked |= GEN8_DE_MISC_GSE;

4351
	if (INTEL_GEN(dev_priv) >= 9) {
4352
		de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
4353 4354
		de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
				  GEN9_AUX_CHANNEL_D;
4355
		if (IS_GEN9_LP(dev_priv))
4356 4357
			de_port_masked |= BXT_DE_PORT_GMBUS;
	} else {
4358
		de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
4359
	}
4360

4361 4362 4363
	if (INTEL_GEN(dev_priv) >= 11)
		de_port_masked |= ICL_AUX_CHANNEL_E;

4364
	if (IS_CNL_WITH_PORT_F(dev_priv) || INTEL_GEN(dev_priv) >= 11)
R
Rodrigo Vivi 已提交
4365 4366
		de_port_masked |= CNL_AUX_CHANNEL_F;

4367 4368 4369
	de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
					   GEN8_PIPE_FIFO_UNDERRUN;

4370
	de_port_enables = de_port_masked;
4371
	if (IS_GEN9_LP(dev_priv))
4372 4373
		de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
	else if (IS_BROADWELL(dev_priv))
4374 4375
		de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;

4376
	gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
4377
	intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
4378

M
Mika Kahola 已提交
4379 4380
	for_each_pipe(dev_priv, pipe) {
		dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
4381

4382
		if (intel_display_power_is_enabled(dev_priv,
4383
				POWER_DOMAIN_PIPE(pipe)))
4384
			GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
4385 4386
					  dev_priv->de_irq_mask[pipe],
					  de_pipe_enables);
M
Mika Kahola 已提交
4387
	}
4388

4389 4390
	GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
	GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
4391

4392 4393
	if (INTEL_GEN(dev_priv) >= 11) {
		u32 de_hpd_masked = 0;
4394 4395
		u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
				     GEN11_DE_TBT_HOTPLUG_MASK;
4396

4397 4398
		GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked,
			      de_hpd_enables);
4399 4400
		gen11_hpd_detection_setup(dev_priv);
	} else if (IS_GEN9_LP(dev_priv)) {
4401
		bxt_hpd_detection_setup(dev_priv);
4402
	} else if (IS_BROADWELL(dev_priv)) {
4403
		ilk_hpd_detection_setup(dev_priv);
4404
	}
4405 4406
}

4407
static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
4408
{
4409
	if (HAS_PCH_SPLIT(dev_priv))
4410
		ibx_irq_pre_postinstall(dev_priv);
P
Paulo Zanoni 已提交
4411

4412 4413 4414
	gen8_gt_irq_postinstall(dev_priv);
	gen8_de_irq_postinstall(dev_priv);

4415
	if (HAS_PCH_SPLIT(dev_priv))
4416
		ibx_irq_postinstall(dev_priv);
4417

4418
	gen8_master_intr_enable(dev_priv->uncore.regs);
4419 4420
}

4421
static void gen11_gt_irq_postinstall(struct intel_gt *gt)
M
Mika Kuoppala 已提交
4422 4423
{
	const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT;
4424 4425 4426
	struct intel_uncore *uncore = gt->uncore;
	const u32 dmask = irqs << 16 | irqs;
	const u32 smask = irqs << 16;
M
Mika Kuoppala 已提交
4427 4428 4429 4430

	BUILD_BUG_ON(irqs & 0xffff0000);

	/* Enable RCS, BCS, VCS and VECS class interrupts. */
4431 4432
	intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, dmask);
	intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, dmask);
M
Mika Kuoppala 已提交
4433 4434

	/* Unmask irqs on RCS, BCS, VCS and VECS engines. */
4435 4436 4437 4438 4439
	intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~smask);
	intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~smask);
	intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~dmask);
	intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~dmask);
	intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK, ~dmask);
M
Mika Kuoppala 已提交
4440

4441 4442 4443 4444
	/*
	 * RPS interrupts will get enabled/disabled on demand when RPS itself
	 * is enabled/disabled.
	 */
4445 4446
	gt->pm_ier = 0x0;
	gt->pm_imr = ~gt->pm_ier;
4447 4448
	intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
	intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK,  ~0);
4449 4450

	/* Same thing for GuC interrupts */
4451 4452
	intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
	intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK,  ~0);
M
Mika Kuoppala 已提交
4453 4454
}

4455
static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
4456 4457 4458 4459 4460 4461 4462
{
	u32 mask = SDE_GMBUS_ICP;

	WARN_ON(I915_READ(SDEIER) != 0);
	I915_WRITE(SDEIER, 0xffffffff);
	POSTING_READ(SDEIER);

4463
	gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
4464 4465
	I915_WRITE(SDEIMR, ~mask);

4466 4467 4468
	if (HAS_PCH_TGP(dev_priv))
		icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK,
					TGP_TC_HPD_ENABLE_MASK);
4469 4470
	else if (HAS_PCH_MCC(dev_priv))
		icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK, 0);
4471 4472 4473
	else
		icp_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK,
					ICP_TC_HPD_ENABLE_MASK);
4474 4475
}

4476
static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
M
Mika Kuoppala 已提交
4477
{
4478
	struct intel_uncore *uncore = &dev_priv->uncore;
4479
	u32 gu_misc_masked = GEN11_GU_MISC_GSE;
M
Mika Kuoppala 已提交
4480

4481
	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
4482
		icp_irq_postinstall(dev_priv);
4483

4484
	gen11_gt_irq_postinstall(&dev_priv->gt);
M
Mika Kuoppala 已提交
4485 4486
	gen8_de_irq_postinstall(dev_priv);

4487
	GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
4488

M
Mika Kuoppala 已提交
4489 4490
	I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);

4491
	gen11_master_intr_enable(uncore->regs);
4492
	POSTING_READ(GEN11_GFX_MSTR_IRQ);
M
Mika Kuoppala 已提交
4493 4494
}

4495
static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
4496 4497 4498
{
	gen8_gt_irq_postinstall(dev_priv);

4499
	spin_lock_irq(&dev_priv->irq_lock);
4500 4501
	if (dev_priv->display_irqs_enabled)
		vlv_display_irq_postinstall(dev_priv);
4502 4503
	spin_unlock_irq(&dev_priv->irq_lock);

4504
	I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
4505 4506 4507
	POSTING_READ(GEN8_MASTER_IRQ);
}

4508
static void i8xx_irq_reset(struct drm_i915_private *dev_priv)
L
Linus Torvalds 已提交
4509
{
4510
	struct intel_uncore *uncore = &dev_priv->uncore;
4511

4512 4513
	i9xx_pipestat_irq_reset(dev_priv);

4514
	GEN2_IRQ_RESET(uncore);
C
Chris Wilson 已提交
4515 4516
}

4517
static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv)
C
Chris Wilson 已提交
4518
{
4519
	struct intel_uncore *uncore = &dev_priv->uncore;
4520
	u16 enable_mask;
C
Chris Wilson 已提交
4521

4522 4523 4524 4525
	intel_uncore_write16(uncore,
			     EMR,
			     ~(I915_ERROR_PAGE_TABLE |
			       I915_ERROR_MEMORY_REFRESH));
C
Chris Wilson 已提交
4526 4527 4528 4529

	/* Unmask the interrupts that we always want on. */
	dev_priv->irq_mask =
		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4530 4531
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
		  I915_MASTER_ERROR_INTERRUPT);
C
Chris Wilson 已提交
4532

4533 4534 4535
	enable_mask =
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4536
		I915_MASTER_ERROR_INTERRUPT |
4537 4538
		I915_USER_INTERRUPT;

4539
	GEN2_IRQ_INIT(uncore, dev_priv->irq_mask, enable_mask);
C
Chris Wilson 已提交
4540

4541 4542
	/* Interrupt setup is already guaranteed to be single-threaded, this is
	 * just to make the assert_spin_locked check happy. */
4543
	spin_lock_irq(&dev_priv->irq_lock);
4544 4545
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4546
	spin_unlock_irq(&dev_priv->irq_lock);
C
Chris Wilson 已提交
4547 4548
}

4549
static void i8xx_error_irq_ack(struct drm_i915_private *i915,
4550 4551
			       u16 *eir, u16 *eir_stuck)
{
4552
	struct intel_uncore *uncore = &i915->uncore;
4553 4554
	u16 emr;

4555
	*eir = intel_uncore_read16(uncore, EIR);
4556 4557

	if (*eir)
4558
		intel_uncore_write16(uncore, EIR, *eir);
4559

4560
	*eir_stuck = intel_uncore_read16(uncore, EIR);
4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573
	if (*eir_stuck == 0)
		return;

	/*
	 * Toggle all EMR bits to make sure we get an edge
	 * in the ISR master error bit if we don't clear
	 * all the EIR bits. Otherwise the edge triggered
	 * IIR on i965/g4x wouldn't notice that an interrupt
	 * is still pending. Also some EIR bits can't be
	 * cleared except by handling the underlying error
	 * (or by a GPU reset) so we mask any bit that
	 * remains set.
	 */
4574 4575 4576
	emr = intel_uncore_read16(uncore, EMR);
	intel_uncore_write16(uncore, EMR, 0xffff);
	intel_uncore_write16(uncore, EMR, emr | *eir_stuck);
4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624
}

static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
				   u16 eir, u16 eir_stuck)
{
	DRM_DEBUG("Master Error: EIR 0x%04x\n", eir);

	if (eir_stuck)
		DRM_DEBUG_DRIVER("EIR stuck: 0x%04x, masked\n", eir_stuck);
}

static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
			       u32 *eir, u32 *eir_stuck)
{
	u32 emr;

	*eir = I915_READ(EIR);

	I915_WRITE(EIR, *eir);

	*eir_stuck = I915_READ(EIR);
	if (*eir_stuck == 0)
		return;

	/*
	 * Toggle all EMR bits to make sure we get an edge
	 * in the ISR master error bit if we don't clear
	 * all the EIR bits. Otherwise the edge triggered
	 * IIR on i965/g4x wouldn't notice that an interrupt
	 * is still pending. Also some EIR bits can't be
	 * cleared except by handling the underlying error
	 * (or by a GPU reset) so we mask any bit that
	 * remains set.
	 */
	emr = I915_READ(EMR);
	I915_WRITE(EMR, 0xffffffff);
	I915_WRITE(EMR, emr | *eir_stuck);
}

static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
				   u32 eir, u32 eir_stuck)
{
	DRM_DEBUG("Master Error, EIR 0x%08x\n", eir);

	if (eir_stuck)
		DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masked\n", eir_stuck);
}

4625
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
C
Chris Wilson 已提交
4626
{
4627
	struct drm_i915_private *dev_priv = arg;
4628
	irqreturn_t ret = IRQ_NONE;
C
Chris Wilson 已提交
4629

4630 4631 4632
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

4633
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
4634
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4635

4636
	do {
4637
		u32 pipe_stats[I915_MAX_PIPES] = {};
4638
		u16 eir = 0, eir_stuck = 0;
4639
		u16 iir;
4640

4641
		iir = intel_uncore_read16(&dev_priv->uncore, GEN2_IIR);
4642 4643 4644 4645
		if (iir == 0)
			break;

		ret = IRQ_HANDLED;
C
Chris Wilson 已提交
4646

4647 4648 4649
		/* Call regardless, as some status bits might not be
		 * signalled in iir */
		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
C
Chris Wilson 已提交
4650

4651 4652 4653
		if (iir & I915_MASTER_ERROR_INTERRUPT)
			i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);

4654
		intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir);
C
Chris Wilson 已提交
4655 4656

		if (iir & I915_USER_INTERRUPT)
4657
			intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
C
Chris Wilson 已提交
4658

4659 4660
		if (iir & I915_MASTER_ERROR_INTERRUPT)
			i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
C
Chris Wilson 已提交
4661

4662 4663
		i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
	} while (0);
4664

4665
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
C
Chris Wilson 已提交
4666

4667
	return ret;
C
Chris Wilson 已提交
4668 4669
}

4670
static void i915_irq_reset(struct drm_i915_private *dev_priv)
4671
{
4672
	struct intel_uncore *uncore = &dev_priv->uncore;
4673

4674
	if (I915_HAS_HOTPLUG(dev_priv)) {
4675
		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4676 4677 4678
		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
	}

4679 4680
	i9xx_pipestat_irq_reset(dev_priv);

4681
	GEN3_IRQ_RESET(uncore, GEN2_);
4682 4683
}

4684
static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
4685
{
4686
	struct intel_uncore *uncore = &dev_priv->uncore;
4687
	u32 enable_mask;
4688

4689 4690
	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE |
			  I915_ERROR_MEMORY_REFRESH));
4691 4692 4693 4694 4695

	/* Unmask the interrupts that we always want on. */
	dev_priv->irq_mask =
		~(I915_ASLE_INTERRUPT |
		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4696 4697
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
		  I915_MASTER_ERROR_INTERRUPT);
4698 4699 4700 4701 4702

	enable_mask =
		I915_ASLE_INTERRUPT |
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4703
		I915_MASTER_ERROR_INTERRUPT |
4704 4705
		I915_USER_INTERRUPT;

4706
	if (I915_HAS_HOTPLUG(dev_priv)) {
4707 4708 4709 4710 4711 4712
		/* Enable in IER... */
		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
		/* and unmask in IMR */
		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
	}

4713
	GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
4714

4715 4716
	/* Interrupt setup is already guaranteed to be single-threaded, this is
	 * just to make the assert_spin_locked check happy. */
4717
	spin_lock_irq(&dev_priv->irq_lock);
4718 4719
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4720
	spin_unlock_irq(&dev_priv->irq_lock);
4721

4722
	i915_enable_asle_pipestat(dev_priv);
4723 4724
}

4725
static irqreturn_t i915_irq_handler(int irq, void *arg)
4726
{
4727
	struct drm_i915_private *dev_priv = arg;
4728
	irqreturn_t ret = IRQ_NONE;
4729

4730 4731 4732
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

4733
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
4734
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4735

4736
	do {
4737
		u32 pipe_stats[I915_MAX_PIPES] = {};
4738
		u32 eir = 0, eir_stuck = 0;
4739 4740
		u32 hotplug_status = 0;
		u32 iir;
4741

4742
		iir = I915_READ(GEN2_IIR);
4743 4744 4745 4746 4747 4748 4749 4750
		if (iir == 0)
			break;

		ret = IRQ_HANDLED;

		if (I915_HAS_HOTPLUG(dev_priv) &&
		    iir & I915_DISPLAY_PORT_INTERRUPT)
			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4751

4752 4753 4754
		/* Call regardless, as some status bits might not be
		 * signalled in iir */
		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4755

4756 4757 4758
		if (iir & I915_MASTER_ERROR_INTERRUPT)
			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);

4759
		I915_WRITE(GEN2_IIR, iir);
4760 4761

		if (iir & I915_USER_INTERRUPT)
4762
			intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
4763

4764 4765
		if (iir & I915_MASTER_ERROR_INTERRUPT)
			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4766

4767 4768 4769 4770 4771
		if (hotplug_status)
			i9xx_hpd_irq_handler(dev_priv, hotplug_status);

		i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
	} while (0);
4772

4773
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4774

4775 4776 4777
	return ret;
}

4778
static void i965_irq_reset(struct drm_i915_private *dev_priv)
4779
{
4780
	struct intel_uncore *uncore = &dev_priv->uncore;
4781

4782
	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4783
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4784

4785 4786
	i9xx_pipestat_irq_reset(dev_priv);

4787
	GEN3_IRQ_RESET(uncore, GEN2_);
4788 4789
}

4790
static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
4791
{
4792
	struct intel_uncore *uncore = &dev_priv->uncore;
4793
	u32 enable_mask;
4794 4795
	u32 error_mask;

4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810
	/*
	 * Enable some error detection, note the instruction error mask
	 * bit is reserved, so we leave it masked.
	 */
	if (IS_G4X(dev_priv)) {
		error_mask = ~(GM45_ERROR_PAGE_TABLE |
			       GM45_ERROR_MEM_PRIV |
			       GM45_ERROR_CP_PRIV |
			       I915_ERROR_MEMORY_REFRESH);
	} else {
		error_mask = ~(I915_ERROR_PAGE_TABLE |
			       I915_ERROR_MEMORY_REFRESH);
	}
	I915_WRITE(EMR, error_mask);

4811
	/* Unmask the interrupts that we always want on. */
4812 4813 4814 4815 4816
	dev_priv->irq_mask =
		~(I915_ASLE_INTERRUPT |
		  I915_DISPLAY_PORT_INTERRUPT |
		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4817
		  I915_MASTER_ERROR_INTERRUPT);
4818

4819 4820 4821 4822 4823
	enable_mask =
		I915_ASLE_INTERRUPT |
		I915_DISPLAY_PORT_INTERRUPT |
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4824
		I915_MASTER_ERROR_INTERRUPT |
4825
		I915_USER_INTERRUPT;
4826

4827
	if (IS_G4X(dev_priv))
4828
		enable_mask |= I915_BSD_USER_INTERRUPT;
4829

4830
	GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
4831

4832 4833
	/* Interrupt setup is already guaranteed to be single-threaded, this is
	 * just to make the assert_spin_locked check happy. */
4834
	spin_lock_irq(&dev_priv->irq_lock);
4835 4836 4837
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4838
	spin_unlock_irq(&dev_priv->irq_lock);
4839

4840
	i915_enable_asle_pipestat(dev_priv);
4841 4842
}

4843
static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
4844 4845 4846
{
	u32 hotplug_en;

4847
	lockdep_assert_held(&dev_priv->irq_lock);
4848

4849 4850
	/* Note HDMI and DP share hotplug bits */
	/* enable bits are the same for all generations */
4851
	hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
4852 4853 4854 4855
	/* Programming the CRT detection parameters tends
	   to generate a spurious hotplug event about three
	   seconds later.  So just do it once.
	*/
4856
	if (IS_G4X(dev_priv))
4857 4858 4859 4860
		hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
	hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;

	/* Ignore TV since it's buggy */
4861
	i915_hotplug_interrupt_update_locked(dev_priv,
4862 4863 4864 4865
					     HOTPLUG_INT_EN_MASK |
					     CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
					     CRT_HOTPLUG_ACTIVATION_PERIOD_64,
					     hotplug_en);
4866 4867
}

4868
static irqreturn_t i965_irq_handler(int irq, void *arg)
4869
{
4870
	struct drm_i915_private *dev_priv = arg;
4871
	irqreturn_t ret = IRQ_NONE;
4872

4873 4874 4875
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

4876
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
4877
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4878

4879
	do {
4880
		u32 pipe_stats[I915_MAX_PIPES] = {};
4881
		u32 eir = 0, eir_stuck = 0;
4882 4883
		u32 hotplug_status = 0;
		u32 iir;
4884

4885
		iir = I915_READ(GEN2_IIR);
4886
		if (iir == 0)
4887 4888 4889 4890
			break;

		ret = IRQ_HANDLED;

4891 4892 4893 4894 4895 4896
		if (iir & I915_DISPLAY_PORT_INTERRUPT)
			hotplug_status = i9xx_hpd_irq_ack(dev_priv);

		/* Call regardless, as some status bits might not be
		 * signalled in iir */
		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4897

4898 4899 4900
		if (iir & I915_MASTER_ERROR_INTERRUPT)
			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);

4901
		I915_WRITE(GEN2_IIR, iir);
4902 4903

		if (iir & I915_USER_INTERRUPT)
4904
			intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
4905

4906
		if (iir & I915_BSD_USER_INTERRUPT)
4907
			intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
4908

4909 4910
		if (iir & I915_MASTER_ERROR_INTERRUPT)
			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4911

4912 4913 4914 4915 4916
		if (hotplug_status)
			i9xx_hpd_irq_handler(dev_priv, hotplug_status);

		i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
	} while (0);
4917

4918
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4919

4920 4921 4922
	return ret;
}

4923 4924 4925 4926 4927 4928 4929
/**
 * intel_irq_init - initializes irq support
 * @dev_priv: i915 device instance
 *
 * This function initializes all the irq support including work items, timers
 * and all the vtables. It does not setup the interrupt itself though.
 */
4930
void intel_irq_init(struct drm_i915_private *dev_priv)
4931
{
4932
	struct drm_device *dev = &dev_priv->drm;
4933
	struct intel_rps *rps = &dev_priv->gt_pm.rps;
4934
	int i;
4935

4936 4937 4938
	if (IS_I945GM(dev_priv))
		i945gm_vblank_work_init(dev_priv);

4939 4940
	intel_hpd_init_work(dev_priv);

4941
	INIT_WORK(&rps->work, gen6_pm_rps_work);
4942

4943
	INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4944 4945
	for (i = 0; i < MAX_L3_SLICES; ++i)
		dev_priv->l3_parity.remap_info[i] = NULL;
4946

4947
	/* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */
4948
	if (HAS_GT_UC(dev_priv) && INTEL_GEN(dev_priv) < 11)
4949
		dev_priv->gt.pm_guc_events = GUC_INTR_GUC2HOST << 16;
4950

4951
	/* Let's track the enabled rps events */
4952
	if (IS_VALLEYVIEW(dev_priv))
4953
		/* WaGsvRC0ResidencyMethod:vlv */
4954
		dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
4955
	else
4956 4957 4958
		dev_priv->pm_rps_events = (GEN6_PM_RP_UP_THRESHOLD |
					   GEN6_PM_RP_DOWN_THRESHOLD |
					   GEN6_PM_RP_DOWN_TIMEOUT);
4959

4960 4961 4962 4963
	/* We share the register with other engine */
	if (INTEL_GEN(dev_priv) > 9)
		GEM_WARN_ON(dev_priv->pm_rps_events & 0xffff0000);

4964
	rps->pm_intrmsk_mbz = 0;
4965 4966

	/*
4967
	 * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer
4968 4969 4970 4971
	 * if GEN6_PM_UP_EI_EXPIRED is masked.
	 *
	 * TODO: verify if this can be reproduced on VLV,CHV.
	 */
4972
	if (INTEL_GEN(dev_priv) <= 7)
4973
		rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
4974

4975
	if (INTEL_GEN(dev_priv) >= 8)
4976
		rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
4977

4978
	dev->vblank_disable_immediate = true;
4979

4980 4981 4982 4983 4984 4985 4986 4987 4988 4989
	/* Most platforms treat the display irq block as an always-on
	 * power domain. vlv/chv can disable it at runtime and need
	 * special care to avoid writing any of the display block registers
	 * outside of the power domain. We defer setting up the display irqs
	 * in this case to the runtime pm.
	 */
	dev_priv->display_irqs_enabled = true;
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
		dev_priv->display_irqs_enabled = false;

L
Lyude 已提交
4990
	dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4991 4992 4993 4994 4995 4996 4997
	/* If we have MST support, we want to avoid doing short HPD IRQ storm
	 * detection, as short HPD storms will occur as a natural part of
	 * sideband messaging with MST.
	 * On older platforms however, IRQ storms can occur with both long and
	 * short pulses, as seen on some G4x systems.
	 */
	dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
L
Lyude 已提交
4998

4999 5000 5001 5002
	if (HAS_GMCH(dev_priv)) {
		if (I915_HAS_HOTPLUG(dev_priv))
			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
	} else {
5003 5004 5005 5006
		if (HAS_PCH_MCC(dev_priv))
			/* EHL doesn't need most of gen11_hpd_irq_setup */
			dev_priv->display.hpd_irq_setup = mcc_hpd_irq_setup;
		else if (INTEL_GEN(dev_priv) >= 11)
5007 5008
			dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
		else if (IS_GEN9_LP(dev_priv))
5009
			dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
5010
		else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
5011 5012
			dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
		else
5013
			dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
5014 5015
	}
}
5016

5017 5018 5019 5020 5021 5022 5023 5024 5025 5026
/**
 * intel_irq_fini - deinitializes IRQ support
 * @i915: i915 device instance
 *
 * This function deinitializes all the IRQ support.
 */
void intel_irq_fini(struct drm_i915_private *i915)
{
	int i;

5027 5028 5029
	if (IS_I945GM(i915))
		i945gm_vblank_work_fini(i915);

5030 5031 5032 5033
	for (i = 0; i < MAX_L3_SLICES; ++i)
		kfree(i915->l3_parity.remap_info[i]);
}

5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102
static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
{
	if (HAS_GMCH(dev_priv)) {
		if (IS_CHERRYVIEW(dev_priv))
			return cherryview_irq_handler;
		else if (IS_VALLEYVIEW(dev_priv))
			return valleyview_irq_handler;
		else if (IS_GEN(dev_priv, 4))
			return i965_irq_handler;
		else if (IS_GEN(dev_priv, 3))
			return i915_irq_handler;
		else
			return i8xx_irq_handler;
	} else {
		if (INTEL_GEN(dev_priv) >= 11)
			return gen11_irq_handler;
		else if (INTEL_GEN(dev_priv) >= 8)
			return gen8_irq_handler;
		else
			return ironlake_irq_handler;
	}
}

static void intel_irq_reset(struct drm_i915_private *dev_priv)
{
	if (HAS_GMCH(dev_priv)) {
		if (IS_CHERRYVIEW(dev_priv))
			cherryview_irq_reset(dev_priv);
		else if (IS_VALLEYVIEW(dev_priv))
			valleyview_irq_reset(dev_priv);
		else if (IS_GEN(dev_priv, 4))
			i965_irq_reset(dev_priv);
		else if (IS_GEN(dev_priv, 3))
			i915_irq_reset(dev_priv);
		else
			i8xx_irq_reset(dev_priv);
	} else {
		if (INTEL_GEN(dev_priv) >= 11)
			gen11_irq_reset(dev_priv);
		else if (INTEL_GEN(dev_priv) >= 8)
			gen8_irq_reset(dev_priv);
		else
			ironlake_irq_reset(dev_priv);
	}
}

static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
{
	if (HAS_GMCH(dev_priv)) {
		if (IS_CHERRYVIEW(dev_priv))
			cherryview_irq_postinstall(dev_priv);
		else if (IS_VALLEYVIEW(dev_priv))
			valleyview_irq_postinstall(dev_priv);
		else if (IS_GEN(dev_priv, 4))
			i965_irq_postinstall(dev_priv);
		else if (IS_GEN(dev_priv, 3))
			i915_irq_postinstall(dev_priv);
		else
			i8xx_irq_postinstall(dev_priv);
	} else {
		if (INTEL_GEN(dev_priv) >= 11)
			gen11_irq_postinstall(dev_priv);
		else if (INTEL_GEN(dev_priv) >= 8)
			gen8_irq_postinstall(dev_priv);
		else
			ironlake_irq_postinstall(dev_priv);
	}
}

5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113
/**
 * intel_irq_install - enables the hardware interrupt
 * @dev_priv: i915 device instance
 *
 * This function enables the hardware interrupt handling, but leaves the hotplug
 * handling still disabled. It is called after intel_irq_init().
 *
 * In the driver load and resume code we need working interrupts in a few places
 * but don't want to deal with the hassle of concurrent probe and hotplug
 * workers. Hence the split into this two-stage approach.
 */
5114 5115
int intel_irq_install(struct drm_i915_private *dev_priv)
{
5116 5117 5118
	int irq = dev_priv->drm.pdev->irq;
	int ret;

5119 5120 5121 5122 5123
	/*
	 * We enable some interrupt sources in our postinstall hooks, so mark
	 * interrupts as enabled _before_ actually enabling them to avoid
	 * special cases in our ordering checks.
	 */
5124
	dev_priv->runtime_pm.irqs_enabled = true;
5125

5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139
	dev_priv->drm.irq_enabled = true;

	intel_irq_reset(dev_priv);

	ret = request_irq(irq, intel_irq_handler(dev_priv),
			  IRQF_SHARED, DRIVER_NAME, dev_priv);
	if (ret < 0) {
		dev_priv->drm.irq_enabled = false;
		return ret;
	}

	intel_irq_postinstall(dev_priv);

	return ret;
5140 5141
}

5142 5143 5144 5145 5146 5147 5148
/**
 * intel_irq_uninstall - finilizes all irq handling
 * @dev_priv: i915 device instance
 *
 * This stops interrupt and hotplug handling and unregisters and frees all
 * resources acquired in the init functions.
 */
5149 5150
void intel_irq_uninstall(struct drm_i915_private *dev_priv)
{
5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167
	int irq = dev_priv->drm.pdev->irq;

	/*
	 * FIXME we can get called twice during driver load
	 * error handling due to intel_modeset_cleanup()
	 * calling us out of sequence. Would be nice if
	 * it didn't do that...
	 */
	if (!dev_priv->drm.irq_enabled)
		return;

	dev_priv->drm.irq_enabled = false;

	intel_irq_reset(dev_priv);

	free_irq(irq, dev_priv);

5168
	intel_hpd_cancel_work(dev_priv);
5169
	dev_priv->runtime_pm.irqs_enabled = false;
5170 5171
}

5172 5173 5174 5175 5176 5177 5178
/**
 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
 * @dev_priv: i915 device instance
 *
 * This function is used to disable interrupts at runtime, both in the runtime
 * pm and the system suspend/resume code.
 */
5179
void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
5180
{
5181
	intel_irq_reset(dev_priv);
5182
	dev_priv->runtime_pm.irqs_enabled = false;
5183
	intel_synchronize_irq(dev_priv);
5184 5185
}

5186 5187 5188 5189 5190 5191 5192
/**
 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
 * @dev_priv: i915 device instance
 *
 * This function is used to enable interrupts at runtime, both in the runtime
 * pm and the system suspend/resume code.
 */
5193
void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
5194
{
5195
	dev_priv->runtime_pm.irqs_enabled = true;
5196 5197
	intel_irq_reset(dev_priv);
	intel_irq_postinstall(dev_priv);
5198
}