i915_irq.c 137.2 KB
Newer Older
D
Dave Airlie 已提交
1
/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
L
Linus Torvalds 已提交
2
 */
D
Dave Airlie 已提交
3
/*
L
Linus Torvalds 已提交
4 5
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
 * All Rights Reserved.
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
D
Dave Airlie 已提交
27
 */
L
Linus Torvalds 已提交
28

29 30
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

31
#include <linux/sysrq.h>
32
#include <linux/slab.h>
33
#include <linux/circ_buf.h>
34 35
#include <drm/drmP.h>
#include <drm/i915_drm.h>
L
Linus Torvalds 已提交
36
#include "i915_drv.h"
C
Chris Wilson 已提交
37
#include "i915_trace.h"
J
Jesse Barnes 已提交
38
#include "intel_drv.h"
L
Linus Torvalds 已提交
39

40 41 42 43 44 45 46 47
/**
 * DOC: interrupt handling
 *
 * These functions provide the basic support for enabling and disabling the
 * interrupt handling support. There's a lot more functionality in i915_irq.c
 * and related files, but that will be described in separate chapters.
 */

48 49 50 51
static const u32 hpd_ilk[HPD_NUM_PINS] = {
	[HPD_PORT_A] = DE_DP_A_HOTPLUG,
};

52 53 54 55
static const u32 hpd_ivb[HPD_NUM_PINS] = {
	[HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
};

56 57 58 59
static const u32 hpd_bdw[HPD_NUM_PINS] = {
	[HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
};

60
static const u32 hpd_ibx[HPD_NUM_PINS] = {
61 62 63 64 65 66 67
	[HPD_CRT] = SDE_CRT_HOTPLUG,
	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG
};

68
static const u32 hpd_cpt[HPD_NUM_PINS] = {
69
	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
70
	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
71 72 73 74 75
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
};

X
Xiong Zhang 已提交
76
static const u32 hpd_spt[HPD_NUM_PINS] = {
77
	[HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
X
Xiong Zhang 已提交
78 79 80 81 82 83
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
	[HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
};

84
static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
85 86 87 88 89 90 91 92
	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
};

93
static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
94 95 96 97 98 99 100 101
	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
};

102
static const u32 hpd_status_i915[HPD_NUM_PINS] = {
103 104 105 106 107 108 109 110
	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
};

111 112
/* BXT hpd list */
static const u32 hpd_bxt[HPD_NUM_PINS] = {
113
	[HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
114 115 116 117
	[HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
	[HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
};

118 119 120 121 122
static const u32 hpd_gen11[HPD_NUM_PINS] = {
	[HPD_PORT_C] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG,
	[HPD_PORT_D] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG,
	[HPD_PORT_E] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG,
	[HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG
123 124
};

125 126 127 128 129 130 131 132 133
static const u32 hpd_icp[HPD_NUM_PINS] = {
	[HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP,
	[HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP,
	[HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP,
	[HPD_PORT_D] = SDE_TC2_HOTPLUG_ICP,
	[HPD_PORT_E] = SDE_TC3_HOTPLUG_ICP,
	[HPD_PORT_F] = SDE_TC4_HOTPLUG_ICP
};

134
/* IIR can theoretically queue up two events. Be paranoid. */
135
#define GEN8_IRQ_RESET_NDX(type, which) do { \
136 137 138 139 140 141 142 143 144
	I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
	POSTING_READ(GEN8_##type##_IMR(which)); \
	I915_WRITE(GEN8_##type##_IER(which), 0); \
	I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
	POSTING_READ(GEN8_##type##_IIR(which)); \
	I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
	POSTING_READ(GEN8_##type##_IIR(which)); \
} while (0)

V
Ville Syrjälä 已提交
145
#define GEN3_IRQ_RESET(type) do { \
P
Paulo Zanoni 已提交
146
	I915_WRITE(type##IMR, 0xffffffff); \
147
	POSTING_READ(type##IMR); \
P
Paulo Zanoni 已提交
148
	I915_WRITE(type##IER, 0); \
149 150 151 152
	I915_WRITE(type##IIR, 0xffffffff); \
	POSTING_READ(type##IIR); \
	I915_WRITE(type##IIR, 0xffffffff); \
	POSTING_READ(type##IIR); \
P
Paulo Zanoni 已提交
153 154
} while (0)

155 156 157 158 159 160 161 162 163 164
#define GEN2_IRQ_RESET(type) do { \
	I915_WRITE16(type##IMR, 0xffff); \
	POSTING_READ16(type##IMR); \
	I915_WRITE16(type##IER, 0); \
	I915_WRITE16(type##IIR, 0xffff); \
	POSTING_READ16(type##IIR); \
	I915_WRITE16(type##IIR, 0xffff); \
	POSTING_READ16(type##IIR); \
} while (0)

165 166 167
/*
 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
 */
V
Ville Syrjälä 已提交
168
static void gen3_assert_iir_is_zero(struct drm_i915_private *dev_priv,
169
				    i915_reg_t reg)
170 171 172 173 174 175 176
{
	u32 val = I915_READ(reg);

	if (val == 0)
		return;

	WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
177
	     i915_mmio_reg_offset(reg), val);
178 179 180 181 182
	I915_WRITE(reg, 0xffffffff);
	POSTING_READ(reg);
	I915_WRITE(reg, 0xffffffff);
	POSTING_READ(reg);
}
183

184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199
static void gen2_assert_iir_is_zero(struct drm_i915_private *dev_priv,
				    i915_reg_t reg)
{
	u16 val = I915_READ16(reg);

	if (val == 0)
		return;

	WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
	     i915_mmio_reg_offset(reg), val);
	I915_WRITE16(reg, 0xffff);
	POSTING_READ16(reg);
	I915_WRITE16(reg, 0xffff);
	POSTING_READ16(reg);
}

P
Paulo Zanoni 已提交
200
#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
V
Ville Syrjälä 已提交
201
	gen3_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
P
Paulo Zanoni 已提交
202
	I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
203 204
	I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
	POSTING_READ(GEN8_##type##_IMR(which)); \
P
Paulo Zanoni 已提交
205 206
} while (0)

V
Ville Syrjälä 已提交
207 208
#define GEN3_IRQ_INIT(type, imr_val, ier_val) do { \
	gen3_assert_iir_is_zero(dev_priv, type##IIR); \
P
Paulo Zanoni 已提交
209
	I915_WRITE(type##IER, (ier_val)); \
210 211
	I915_WRITE(type##IMR, (imr_val)); \
	POSTING_READ(type##IMR); \
P
Paulo Zanoni 已提交
212 213
} while (0)

214 215 216 217 218 219 220
#define GEN2_IRQ_INIT(type, imr_val, ier_val) do { \
	gen2_assert_iir_is_zero(dev_priv, type##IIR); \
	I915_WRITE16(type##IER, (ier_val)); \
	I915_WRITE16(type##IMR, (imr_val)); \
	POSTING_READ16(type##IMR); \
} while (0)

221
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
222
static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
223

224 225 226 227 228 229 230 231
/* For display hotplug interrupt */
static inline void
i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
				     uint32_t mask,
				     uint32_t bits)
{
	uint32_t val;

232
	lockdep_assert_held(&dev_priv->irq_lock);
233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261
	WARN_ON(bits & ~mask);

	val = I915_READ(PORT_HOTPLUG_EN);
	val &= ~mask;
	val |= bits;
	I915_WRITE(PORT_HOTPLUG_EN, val);
}

/**
 * i915_hotplug_interrupt_update - update hotplug interrupt enable
 * @dev_priv: driver private
 * @mask: bits to update
 * @bits: bits to enable
 * NOTE: the HPD enable bits are modified both inside and outside
 * of an interrupt context. To avoid that read-modify-write cycles
 * interfer, these bits are protected by a spinlock. Since this
 * function is usually not called from a context where the lock is
 * held already, this function acquires the lock itself. A non-locking
 * version is also available.
 */
void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
				   uint32_t mask,
				   uint32_t bits)
{
	spin_lock_irq(&dev_priv->irq_lock);
	i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
	spin_unlock_irq(&dev_priv->irq_lock);
}

262 263 264 265
static u32
gen11_gt_engine_identity(struct drm_i915_private * const i915,
			 const unsigned int bank, const unsigned int bit);

266 267 268
static bool gen11_reset_one_iir(struct drm_i915_private * const i915,
				const unsigned int bank,
				const unsigned int bit)
269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296
{
	void __iomem * const regs = i915->regs;
	u32 dw;

	lockdep_assert_held(&i915->irq_lock);

	dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
	if (dw & BIT(bit)) {
		/*
		 * According to the BSpec, DW_IIR bits cannot be cleared without
		 * first servicing the Selector & Shared IIR registers.
		 */
		gen11_gt_engine_identity(i915, bank, bit);

		/*
		 * We locked GT INT DW by reading it. If we want to (try
		 * to) recover from this succesfully, we need to clear
		 * our bit, otherwise we are locking the register for
		 * everybody.
		 */
		raw_reg_write(regs, GEN11_GT_INTR_DW(bank), BIT(bit));

		return true;
	}

	return false;
}

297 298 299 300 301 302
/**
 * ilk_update_display_irq - update DEIMR
 * @dev_priv: driver private
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
303 304 305
void ilk_update_display_irq(struct drm_i915_private *dev_priv,
			    uint32_t interrupt_mask,
			    uint32_t enabled_irq_mask)
306
{
307 308
	uint32_t new_val;

309
	lockdep_assert_held(&dev_priv->irq_lock);
310

311 312
	WARN_ON(enabled_irq_mask & ~interrupt_mask);

313
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
314 315
		return;

316 317 318 319 320 321
	new_val = dev_priv->irq_mask;
	new_val &= ~interrupt_mask;
	new_val |= (~enabled_irq_mask & interrupt_mask);

	if (new_val != dev_priv->irq_mask) {
		dev_priv->irq_mask = new_val;
322
		I915_WRITE(DEIMR, dev_priv->irq_mask);
323
		POSTING_READ(DEIMR);
324 325 326
	}
}

P
Paulo Zanoni 已提交
327 328 329 330 331 332 333 334 335 336
/**
 * ilk_update_gt_irq - update GTIMR
 * @dev_priv: driver private
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
			      uint32_t interrupt_mask,
			      uint32_t enabled_irq_mask)
{
337
	lockdep_assert_held(&dev_priv->irq_lock);
P
Paulo Zanoni 已提交
338

339 340
	WARN_ON(enabled_irq_mask & ~interrupt_mask);

341
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
342 343
		return;

P
Paulo Zanoni 已提交
344 345 346 347 348
	dev_priv->gt_irq_mask &= ~interrupt_mask;
	dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
}

349
void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
P
Paulo Zanoni 已提交
350 351
{
	ilk_update_gt_irq(dev_priv, mask, mask);
352
	POSTING_READ_FW(GTIMR);
P
Paulo Zanoni 已提交
353 354
}

355
void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
P
Paulo Zanoni 已提交
356 357 358 359
{
	ilk_update_gt_irq(dev_priv, mask, 0);
}

360
static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
361
{
362 363
	WARN_ON_ONCE(INTEL_GEN(dev_priv) >= 11);

364
	return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
365 366
}

367
static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv)
368
{
369 370 371 372 373 374
	if (INTEL_GEN(dev_priv) >= 11)
		return GEN11_GPM_WGBOXPERF_INTR_MASK;
	else if (INTEL_GEN(dev_priv) >= 8)
		return GEN8_GT_IMR(2);
	else
		return GEN6_PMIMR;
375 376
}

377
static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
378
{
379 380 381 382 383 384
	if (INTEL_GEN(dev_priv) >= 11)
		return GEN11_GPM_WGBOXPERF_INTR_ENABLE;
	else if (INTEL_GEN(dev_priv) >= 8)
		return GEN8_GT_IER(2);
	else
		return GEN6_PMIER;
385 386
}

P
Paulo Zanoni 已提交
387
/**
388 389 390 391 392
 * snb_update_pm_irq - update GEN6_PMIMR
 * @dev_priv: driver private
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
P
Paulo Zanoni 已提交
393 394 395 396
static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
			      uint32_t interrupt_mask,
			      uint32_t enabled_irq_mask)
{
397
	uint32_t new_val;
P
Paulo Zanoni 已提交
398

399 400
	WARN_ON(enabled_irq_mask & ~interrupt_mask);

401
	lockdep_assert_held(&dev_priv->irq_lock);
P
Paulo Zanoni 已提交
402

403
	new_val = dev_priv->pm_imr;
404 405 406
	new_val &= ~interrupt_mask;
	new_val |= (~enabled_irq_mask & interrupt_mask);

407 408 409
	if (new_val != dev_priv->pm_imr) {
		dev_priv->pm_imr = new_val;
		I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_imr);
410
		POSTING_READ(gen6_pm_imr(dev_priv));
411
	}
P
Paulo Zanoni 已提交
412 413
}

414
void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
P
Paulo Zanoni 已提交
415
{
416 417 418
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
		return;

P
Paulo Zanoni 已提交
419 420 421
	snb_update_pm_irq(dev_priv, mask, mask);
}

422
static void __gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
P
Paulo Zanoni 已提交
423 424 425 426
{
	snb_update_pm_irq(dev_priv, mask, 0);
}

427
void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
428 429 430 431
{
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
		return;

432
	__gen6_mask_pm_irq(dev_priv, mask);
433 434
}

435
static void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
I
Imre Deak 已提交
436
{
437
	i915_reg_t reg = gen6_pm_iir(dev_priv);
I
Imre Deak 已提交
438

439
	lockdep_assert_held(&dev_priv->irq_lock);
440 441 442

	I915_WRITE(reg, reset_mask);
	I915_WRITE(reg, reset_mask);
I
Imre Deak 已提交
443
	POSTING_READ(reg);
444 445
}

446
static void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask)
447
{
448
	lockdep_assert_held(&dev_priv->irq_lock);
449 450 451 452 453 454 455

	dev_priv->pm_ier |= enable_mask;
	I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
	gen6_unmask_pm_irq(dev_priv, enable_mask);
	/* unmask_pm_irq provides an implicit barrier (POSTING_READ) */
}

456
static void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask)
457
{
458
	lockdep_assert_held(&dev_priv->irq_lock);
459 460 461 462 463 464 465

	dev_priv->pm_ier &= ~disable_mask;
	__gen6_mask_pm_irq(dev_priv, disable_mask);
	I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
	/* though a barrier is missing here, but don't really need a one */
}

466 467 468 469
void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv)
{
	spin_lock_irq(&dev_priv->irq_lock);

470 471
	while (gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM))
		;
472 473 474 475 476 477

	dev_priv->gt_pm.rps.pm_iir = 0;

	spin_unlock_irq(&dev_priv->irq_lock);
}

478 479 480
void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
{
	spin_lock_irq(&dev_priv->irq_lock);
481
	gen6_reset_pm_iir(dev_priv, GEN6_PM_RPS_EVENTS);
482
	dev_priv->gt_pm.rps.pm_iir = 0;
I
Imre Deak 已提交
483 484 485
	spin_unlock_irq(&dev_priv->irq_lock);
}

486
void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
487
{
488 489 490
	struct intel_rps *rps = &dev_priv->gt_pm.rps;

	if (READ_ONCE(rps->interrupts_enabled))
491 492
		return;

493
	spin_lock_irq(&dev_priv->irq_lock);
494
	WARN_ON_ONCE(rps->pm_iir);
495

496
	if (INTEL_GEN(dev_priv) >= 11)
497
		WARN_ON_ONCE(gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM));
498 499
	else
		WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
500

501
	rps->interrupts_enabled = true;
502
	gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
503

504 505 506
	spin_unlock_irq(&dev_priv->irq_lock);
}

507
void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
508
{
509 510 511
	struct intel_rps *rps = &dev_priv->gt_pm.rps;

	if (!READ_ONCE(rps->interrupts_enabled))
512 513
		return;

I
Imre Deak 已提交
514
	spin_lock_irq(&dev_priv->irq_lock);
515
	rps->interrupts_enabled = false;
516

517
	I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
518

519
	gen6_disable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
520 521

	spin_unlock_irq(&dev_priv->irq_lock);
522
	synchronize_irq(dev_priv->drm.irq);
523 524

	/* Now that we will not be generating any more work, flush any
525
	 * outstanding tasks. As we are called on the RPS idle path,
526 527 528
	 * we will reset the GPU to minimum frequencies, so the current
	 * state of the worker can be discarded.
	 */
529
	cancel_work_sync(&rps->work);
530 531 532 533
	if (INTEL_GEN(dev_priv) >= 11)
		gen11_reset_rps_interrupts(dev_priv);
	else
		gen6_reset_rps_interrupts(dev_priv);
534 535
}

536 537
void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv)
{
538 539
	assert_rpm_wakelock_held(dev_priv);

540 541 542 543 544 545 546
	spin_lock_irq(&dev_priv->irq_lock);
	gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events);
	spin_unlock_irq(&dev_priv->irq_lock);
}

void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv)
{
547 548
	assert_rpm_wakelock_held(dev_priv);

549 550 551 552 553 554 555 556 557 558 559 560
	spin_lock_irq(&dev_priv->irq_lock);
	if (!dev_priv->guc.interrupts_enabled) {
		WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) &
				       dev_priv->pm_guc_events);
		dev_priv->guc.interrupts_enabled = true;
		gen6_enable_pm_irq(dev_priv, dev_priv->pm_guc_events);
	}
	spin_unlock_irq(&dev_priv->irq_lock);
}

void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv)
{
561 562
	assert_rpm_wakelock_held(dev_priv);

563 564 565 566 567 568 569 570 571 572 573
	spin_lock_irq(&dev_priv->irq_lock);
	dev_priv->guc.interrupts_enabled = false;

	gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events);

	spin_unlock_irq(&dev_priv->irq_lock);
	synchronize_irq(dev_priv->drm.irq);

	gen9_reset_guc_interrupts(dev_priv);
}

574
/**
575 576 577 578 579
 * bdw_update_port_irq - update DE port interrupt
 * @dev_priv: driver private
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
580 581 582 583 584 585 586
static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
				uint32_t interrupt_mask,
				uint32_t enabled_irq_mask)
{
	uint32_t new_val;
	uint32_t old_val;

587
	lockdep_assert_held(&dev_priv->irq_lock);
588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605

	WARN_ON(enabled_irq_mask & ~interrupt_mask);

	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
		return;

	old_val = I915_READ(GEN8_DE_PORT_IMR);

	new_val = old_val;
	new_val &= ~interrupt_mask;
	new_val |= (~enabled_irq_mask & interrupt_mask);

	if (new_val != old_val) {
		I915_WRITE(GEN8_DE_PORT_IMR, new_val);
		POSTING_READ(GEN8_DE_PORT_IMR);
	}
}

606 607 608 609 610 611 612 613 614 615 616 617 618 619
/**
 * bdw_update_pipe_irq - update DE pipe interrupt
 * @dev_priv: driver private
 * @pipe: pipe whose interrupt to update
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
			 enum pipe pipe,
			 uint32_t interrupt_mask,
			 uint32_t enabled_irq_mask)
{
	uint32_t new_val;

620
	lockdep_assert_held(&dev_priv->irq_lock);
621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637

	WARN_ON(enabled_irq_mask & ~interrupt_mask);

	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
		return;

	new_val = dev_priv->de_irq_mask[pipe];
	new_val &= ~interrupt_mask;
	new_val |= (~enabled_irq_mask & interrupt_mask);

	if (new_val != dev_priv->de_irq_mask[pipe]) {
		dev_priv->de_irq_mask[pipe] = new_val;
		I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
		POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
	}
}

638 639 640 641 642 643
/**
 * ibx_display_interrupt_update - update SDEIMR
 * @dev_priv: driver private
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
644 645 646
void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
				  uint32_t interrupt_mask,
				  uint32_t enabled_irq_mask)
647 648 649 650 651
{
	uint32_t sdeimr = I915_READ(SDEIMR);
	sdeimr &= ~interrupt_mask;
	sdeimr |= (~enabled_irq_mask & interrupt_mask);

652 653
	WARN_ON(enabled_irq_mask & ~interrupt_mask);

654
	lockdep_assert_held(&dev_priv->irq_lock);
655

656
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
657 658
		return;

659 660 661
	I915_WRITE(SDEIMR, sdeimr);
	POSTING_READ(SDEIMR);
}
662

663 664
u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
			      enum pipe pipe)
665
{
666 667
	u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
	u32 enable_mask = status_mask << 16;
668

669
	lockdep_assert_held(&dev_priv->irq_lock);
670

671 672
	if (INTEL_GEN(dev_priv) < 5)
		goto out;
673 674

	/*
675 676
	 * On pipe A we don't support the PSR interrupt yet,
	 * on pipe B and C the same bit MBZ.
677 678 679
	 */
	if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
		return 0;
680 681 682 683 684 685
	/*
	 * On pipe B and C we don't support the PSR interrupt yet, on pipe
	 * A the same bit is for perf counters which we don't use either.
	 */
	if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
		return 0;
686 687 688 689 690 691 692 693 694

	enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
			 SPRITE0_FLIP_DONE_INT_EN_VLV |
			 SPRITE1_FLIP_DONE_INT_EN_VLV);
	if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
		enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
	if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
		enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;

695 696 697 698 699 700
out:
	WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
		  status_mask & ~PIPESTAT_INT_STATUS_MASK,
		  "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
		  pipe_name(pipe), enable_mask, status_mask);

701 702 703
	return enable_mask;
}

704 705
void i915_enable_pipestat(struct drm_i915_private *dev_priv,
			  enum pipe pipe, u32 status_mask)
706
{
707
	i915_reg_t reg = PIPESTAT(pipe);
708 709
	u32 enable_mask;

710 711 712 713 714 715 716 717 718 719 720 721 722 723 724
	WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
		  "pipe %c: status_mask=0x%x\n",
		  pipe_name(pipe), status_mask);

	lockdep_assert_held(&dev_priv->irq_lock);
	WARN_ON(!intel_irqs_enabled(dev_priv));

	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
		return;

	dev_priv->pipestat_irq_mask[pipe] |= status_mask;
	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);

	I915_WRITE(reg, enable_mask | status_mask);
	POSTING_READ(reg);
725 726
}

727 728
void i915_disable_pipestat(struct drm_i915_private *dev_priv,
			   enum pipe pipe, u32 status_mask)
729
{
730
	i915_reg_t reg = PIPESTAT(pipe);
731 732
	u32 enable_mask;

733 734 735 736 737 738 739 740 741 742 743 744 745 746 747
	WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
		  "pipe %c: status_mask=0x%x\n",
		  pipe_name(pipe), status_mask);

	lockdep_assert_held(&dev_priv->irq_lock);
	WARN_ON(!intel_irqs_enabled(dev_priv));

	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
		return;

	dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);

	I915_WRITE(reg, enable_mask | status_mask);
	POSTING_READ(reg);
748 749
}

750
/**
751
 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
752
 * @dev_priv: i915 device private
753
 */
754
static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
755
{
756
	if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv))
757 758
		return;

759
	spin_lock_irq(&dev_priv->irq_lock);
760

761
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
762
	if (INTEL_GEN(dev_priv) >= 4)
763
		i915_enable_pipestat(dev_priv, PIPE_A,
764
				     PIPE_LEGACY_BLC_EVENT_STATUS);
765

766
	spin_unlock_irq(&dev_priv->irq_lock);
767 768
}

769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818
/*
 * This timing diagram depicts the video signal in and
 * around the vertical blanking period.
 *
 * Assumptions about the fictitious mode used in this example:
 *  vblank_start >= 3
 *  vsync_start = vblank_start + 1
 *  vsync_end = vblank_start + 2
 *  vtotal = vblank_start + 3
 *
 *           start of vblank:
 *           latch double buffered registers
 *           increment frame counter (ctg+)
 *           generate start of vblank interrupt (gen4+)
 *           |
 *           |          frame start:
 *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
 *           |          may be shifted forward 1-3 extra lines via PIPECONF
 *           |          |
 *           |          |  start of vsync:
 *           |          |  generate vsync interrupt
 *           |          |  |
 * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
 *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
 * ----va---> <-----------------vb--------------------> <--------va-------------
 *       |          |       <----vs----->                     |
 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
 *       |          |                                         |
 *       last visible pixel                                   first visible pixel
 *                  |                                         increment frame counter (gen3/4)
 *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
 *
 * x  = horizontal active
 * _  = horizontal blanking
 * hs = horizontal sync
 * va = vertical active
 * vb = vertical blanking
 * vs = vertical sync
 * vbs = vblank_start (number)
 *
 * Summary:
 * - most events happen at the start of horizontal sync
 * - frame start happens at the start of horizontal blank, 1-4 lines
 *   (depending on PIPECONF settings) after the start of vblank
 * - gen3/4 pixel and frame counter are synchronized with the start
 *   of horizontal active on the first line of vertical active
 */

819 820 821
/* Called from drm generic code, passed a 'crtc', which
 * we use as a pipe index
 */
822
static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
823
{
824
	struct drm_i915_private *dev_priv = to_i915(dev);
825
	i915_reg_t high_frame, low_frame;
826
	u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
827
	const struct drm_display_mode *mode = &dev->vblank[pipe].hwmode;
828
	unsigned long irqflags;
829

830 831 832 833 834
	htotal = mode->crtc_htotal;
	hsync_start = mode->crtc_hsync_start;
	vbl_start = mode->crtc_vblank_start;
	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
		vbl_start = DIV_ROUND_UP(vbl_start, 2);
835

836 837 838 839 840 841
	/* Convert to pixel count */
	vbl_start *= htotal;

	/* Start of vblank event occurs at start of hsync */
	vbl_start -= htotal - hsync_start;

842 843
	high_frame = PIPEFRAME(pipe);
	low_frame = PIPEFRAMEPIXEL(pipe);
844

845 846
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);

847 848 849 850 851 852
	/*
	 * High & low register fields aren't synchronized, so make sure
	 * we get a low value that's stable across two reads of the high
	 * register.
	 */
	do {
853 854 855
		high1 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
		low   = I915_READ_FW(low_frame);
		high2 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
856 857
	} while (high1 != high2);

858 859
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);

860
	high1 >>= PIPE_FRAME_HIGH_SHIFT;
861
	pixel = low & PIPE_PIXEL_MASK;
862
	low >>= PIPE_FRAME_LOW_SHIFT;
863 864 865 866 867 868

	/*
	 * The frame counter increments at beginning of active.
	 * Cook up a vblank counter by also checking the pixel
	 * counter against vblank start.
	 */
869
	return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
870 871
}

872
static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
873
{
874
	struct drm_i915_private *dev_priv = to_i915(dev);
875

876
	return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
877 878
}

879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929
/*
 * On certain encoders on certain platforms, pipe
 * scanline register will not work to get the scanline,
 * since the timings are driven from the PORT or issues
 * with scanline register updates.
 * This function will use Framestamp and current
 * timestamp registers to calculate the scanline.
 */
static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
{
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
	struct drm_vblank_crtc *vblank =
		&crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
	const struct drm_display_mode *mode = &vblank->hwmode;
	u32 vblank_start = mode->crtc_vblank_start;
	u32 vtotal = mode->crtc_vtotal;
	u32 htotal = mode->crtc_htotal;
	u32 clock = mode->crtc_clock;
	u32 scanline, scan_prev_time, scan_curr_time, scan_post_time;

	/*
	 * To avoid the race condition where we might cross into the
	 * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR
	 * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR
	 * during the same frame.
	 */
	do {
		/*
		 * This field provides read back of the display
		 * pipe frame time stamp. The time stamp value
		 * is sampled at every start of vertical blank.
		 */
		scan_prev_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));

		/*
		 * The TIMESTAMP_CTR register has the current
		 * time stamp value.
		 */
		scan_curr_time = I915_READ_FW(IVB_TIMESTAMP_CTR);

		scan_post_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
	} while (scan_post_time != scan_prev_time);

	scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
					clock), 1000 * htotal);
	scanline = min(scanline, vtotal - 1);
	scanline = (scanline + vblank_start) % vtotal;

	return scanline;
}

930
/* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
931 932 933
static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
{
	struct drm_device *dev = crtc->base.dev;
934
	struct drm_i915_private *dev_priv = to_i915(dev);
935 936
	const struct drm_display_mode *mode;
	struct drm_vblank_crtc *vblank;
937
	enum pipe pipe = crtc->pipe;
938
	int position, vtotal;
939

940 941 942
	if (!crtc->active)
		return -1;

943 944 945
	vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
	mode = &vblank->hwmode;

946 947 948
	if (mode->private_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP)
		return __intel_get_crtc_scanline_from_timestamp(crtc);

949
	vtotal = mode->crtc_vtotal;
950 951 952
	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
		vtotal /= 2;

953
	if (IS_GEN2(dev_priv))
954
		position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
955
	else
956
		position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
957

958 959 960 961 962 963 964 965 966 967 968 969
	/*
	 * On HSW, the DSL reg (0x70000) appears to return 0 if we
	 * read it just before the start of vblank.  So try it again
	 * so we don't accidentally end up spanning a vblank frame
	 * increment, causing the pipe_update_end() code to squak at us.
	 *
	 * The nature of this problem means we can't simply check the ISR
	 * bit and return the vblank start value; nor can we use the scanline
	 * debug register in the transcoder as it appears to have the same
	 * problem.  We may need to extend this to include other platforms,
	 * but so far testing only shows the problem on HSW.
	 */
970
	if (HAS_DDI(dev_priv) && !position) {
971 972 973 974
		int i, temp;

		for (i = 0; i < 100; i++) {
			udelay(1);
975
			temp = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
976 977 978 979 980 981 982
			if (temp != position) {
				position = temp;
				break;
			}
		}
	}

983
	/*
984 985
	 * See update_scanline_offset() for the details on the
	 * scanline_offset adjustment.
986
	 */
987
	return (position + crtc->scanline_offset) % vtotal;
988 989
}

990 991 992 993
static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
				     bool in_vblank_irq, int *vpos, int *hpos,
				     ktime_t *stime, ktime_t *etime,
				     const struct drm_display_mode *mode)
994
{
995
	struct drm_i915_private *dev_priv = to_i915(dev);
996 997
	struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
								pipe);
998
	int position;
999
	int vbl_start, vbl_end, hsync_start, htotal, vtotal;
1000
	unsigned long irqflags;
1001

1002
	if (WARN_ON(!mode->crtc_clock)) {
1003
		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
1004
				 "pipe %c\n", pipe_name(pipe));
1005
		return false;
1006 1007
	}

1008
	htotal = mode->crtc_htotal;
1009
	hsync_start = mode->crtc_hsync_start;
1010 1011 1012
	vtotal = mode->crtc_vtotal;
	vbl_start = mode->crtc_vblank_start;
	vbl_end = mode->crtc_vblank_end;
1013

1014 1015 1016 1017 1018 1019
	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
		vbl_start = DIV_ROUND_UP(vbl_start, 2);
		vbl_end /= 2;
		vtotal /= 2;
	}

1020 1021 1022 1023 1024 1025
	/*
	 * Lock uncore.lock, as we will do multiple timing critical raw
	 * register reads, potentially with preemption disabled, so the
	 * following code must not block on uncore.lock.
	 */
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1026

1027 1028 1029 1030 1031 1032
	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */

	/* Get optional system timestamp before query. */
	if (stime)
		*stime = ktime_get();

1033
	if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
1034 1035 1036
		/* No obvious pixelcount register. Only query vertical
		 * scanout position from Display scan line register.
		 */
1037
		position = __intel_get_crtc_scanline(intel_crtc);
1038 1039 1040 1041 1042
	} else {
		/* Have access to pixelcount since start of frame.
		 * We can split this into vertical and horizontal
		 * scanout position.
		 */
1043
		position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
1044

1045 1046 1047 1048
		/* convert to pixel counts */
		vbl_start *= htotal;
		vbl_end *= htotal;
		vtotal *= htotal;
1049

1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061
		/*
		 * In interlaced modes, the pixel counter counts all pixels,
		 * so one field will have htotal more pixels. In order to avoid
		 * the reported position from jumping backwards when the pixel
		 * counter is beyond the length of the shorter field, just
		 * clamp the position the length of the shorter field. This
		 * matches how the scanline counter based position works since
		 * the scanline counter doesn't count the two half lines.
		 */
		if (position >= vtotal)
			position = vtotal - 1;

1062 1063 1064 1065 1066 1067 1068 1069 1070 1071
		/*
		 * Start of vblank interrupt is triggered at start of hsync,
		 * just prior to the first active line of vblank. However we
		 * consider lines to start at the leading edge of horizontal
		 * active. So, should we get here before we've crossed into
		 * the horizontal active of the first line in vblank, we would
		 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
		 * always add htotal-hsync_start to the current pixel position.
		 */
		position = (position + htotal - hsync_start) % vtotal;
1072 1073
	}

1074 1075 1076 1077 1078 1079 1080 1081
	/* Get optional system timestamp after query. */
	if (etime)
		*etime = ktime_get();

	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */

	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);

1082 1083 1084 1085 1086 1087 1088 1089 1090 1091
	/*
	 * While in vblank, position will be negative
	 * counting up towards 0 at vbl_end. And outside
	 * vblank, position will be positive counting
	 * up since vbl_end.
	 */
	if (position >= vbl_start)
		position -= vbl_end;
	else
		position += vtotal - vbl_end;
1092

1093
	if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
1094 1095 1096 1097 1098 1099
		*vpos = position;
		*hpos = 0;
	} else {
		*vpos = position / htotal;
		*hpos = position - (*vpos * htotal);
	}
1100

1101
	return true;
1102 1103
}

1104 1105
int intel_get_crtc_scanline(struct intel_crtc *crtc)
{
1106
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1107 1108 1109 1110 1111 1112 1113 1114 1115 1116
	unsigned long irqflags;
	int position;

	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
	position = __intel_get_crtc_scanline(crtc);
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);

	return position;
}

1117
static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
1118
{
1119
	u32 busy_up, busy_down, max_avg, min_avg;
1120 1121
	u8 new_delay;

1122
	spin_lock(&mchdev_lock);
1123

1124 1125
	I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));

1126
	new_delay = dev_priv->ips.cur_delay;
1127

1128
	I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
1129 1130
	busy_up = I915_READ(RCPREVBSYTUPAVG);
	busy_down = I915_READ(RCPREVBSYTDNAVG);
1131 1132 1133 1134
	max_avg = I915_READ(RCBMAXAVG);
	min_avg = I915_READ(RCBMINAVG);

	/* Handle RCS change request from hw */
1135
	if (busy_up > max_avg) {
1136 1137 1138 1139
		if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
			new_delay = dev_priv->ips.cur_delay - 1;
		if (new_delay < dev_priv->ips.max_delay)
			new_delay = dev_priv->ips.max_delay;
1140
	} else if (busy_down < min_avg) {
1141 1142 1143 1144
		if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
			new_delay = dev_priv->ips.cur_delay + 1;
		if (new_delay > dev_priv->ips.min_delay)
			new_delay = dev_priv->ips.min_delay;
1145 1146
	}

1147
	if (ironlake_set_drps(dev_priv, new_delay))
1148
		dev_priv->ips.cur_delay = new_delay;
1149

1150
	spin_unlock(&mchdev_lock);
1151

1152 1153 1154
	return;
}

1155
static void notify_ring(struct intel_engine_cs *engine)
1156
{
1157
	const u32 seqno = intel_engine_get_seqno(engine);
1158
	struct i915_request *rq = NULL;
1159
	struct task_struct *tsk = NULL;
1160
	struct intel_wait *wait;
1161

1162
	if (unlikely(!engine->breadcrumbs.irq_armed))
1163 1164
		return;

1165
	rcu_read_lock();
1166

1167 1168
	spin_lock(&engine->breadcrumbs.irq_lock);
	wait = engine->breadcrumbs.irq_wait;
1169
	if (wait) {
1170 1171
		/*
		 * We use a callback from the dma-fence to submit
1172 1173 1174 1175 1176 1177 1178 1179 1180 1181
		 * requests after waiting on our own requests. To
		 * ensure minimum delay in queuing the next request to
		 * hardware, signal the fence now rather than wait for
		 * the signaler to be woken up. We still wake up the
		 * waiter in order to handle the irq-seqno coherency
		 * issues (we may receive the interrupt before the
		 * seqno is written, see __i915_request_irq_complete())
		 * and to handle coalescing of multiple seqno updates
		 * and many waiters.
		 */
1182
		if (i915_seqno_passed(seqno, wait->seqno)) {
1183
			struct i915_request *waiter = wait->request;
1184

1185 1186
			if (waiter &&
			    !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
1187 1188
				      &waiter->fence.flags) &&
			    intel_wait_check_request(wait, waiter))
1189
				rq = i915_request_get(waiter);
1190

1191 1192
			tsk = wait->tsk;
		} else {
1193 1194
			if (engine->irq_seqno_barrier &&
			    i915_seqno_passed(seqno, wait->seqno - 1)) {
1195 1196 1197 1198 1199
				set_bit(ENGINE_IRQ_BREADCRUMB,
					&engine->irq_posted);
				tsk = wait->tsk;
			}
		}
1200 1201

		engine->breadcrumbs.irq_count++;
1202
	} else {
1203 1204
		if (engine->breadcrumbs.irq_armed)
			__intel_engine_disarm_breadcrumbs(engine);
1205
	}
1206
	spin_unlock(&engine->breadcrumbs.irq_lock);
1207

1208
	if (rq) {
1209 1210
		spin_lock(&rq->lock);
		dma_fence_signal_locked(&rq->fence);
1211
		GEM_BUG_ON(!i915_request_completed(rq));
1212 1213
		spin_unlock(&rq->lock);

1214
		i915_request_put(rq);
1215
	}
1216

1217 1218 1219 1220 1221
	if (tsk && tsk->state & TASK_NORMAL)
		wake_up_process(tsk);

	rcu_read_unlock();

1222
	trace_intel_engine_notify(engine, wait);
1223 1224
}

1225 1226
static void vlv_c0_read(struct drm_i915_private *dev_priv,
			struct intel_rps_ei *ei)
1227
{
1228
	ei->ktime = ktime_get_raw();
1229 1230 1231
	ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
	ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
}
1232

1233
void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
1234
{
1235
	memset(&dev_priv->gt_pm.rps.ei, 0, sizeof(dev_priv->gt_pm.rps.ei));
1236
}
1237

1238 1239
static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
{
1240 1241
	struct intel_rps *rps = &dev_priv->gt_pm.rps;
	const struct intel_rps_ei *prev = &rps->ei;
1242 1243
	struct intel_rps_ei now;
	u32 events = 0;
1244

1245
	if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
1246
		return 0;
1247

1248
	vlv_c0_read(dev_priv, &now);
1249

1250
	if (prev->ktime) {
1251
		u64 time, c0;
1252
		u32 render, media;
1253

1254
		time = ktime_us_delta(now.ktime, prev->ktime);
1255

1256 1257 1258 1259 1260 1261 1262
		time *= dev_priv->czclk_freq;

		/* Workload can be split between render + media,
		 * e.g. SwapBuffers being blitted in X after being rendered in
		 * mesa. To account for this we need to combine both engines
		 * into our activity counter.
		 */
1263 1264 1265
		render = now.render_c0 - prev->render_c0;
		media = now.media_c0 - prev->media_c0;
		c0 = max(render, media);
1266
		c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */
1267

C
Chris Wilson 已提交
1268
		if (c0 > time * rps->power.up_threshold)
1269
			events = GEN6_PM_RP_UP_THRESHOLD;
C
Chris Wilson 已提交
1270
		else if (c0 < time * rps->power.down_threshold)
1271
			events = GEN6_PM_RP_DOWN_THRESHOLD;
1272 1273
	}

1274
	rps->ei = now;
1275
	return events;
1276 1277
}

1278
static void gen6_pm_rps_work(struct work_struct *work)
1279
{
1280
	struct drm_i915_private *dev_priv =
1281 1282
		container_of(work, struct drm_i915_private, gt_pm.rps.work);
	struct intel_rps *rps = &dev_priv->gt_pm.rps;
1283
	bool client_boost = false;
1284
	int new_delay, adj, min, max;
1285
	u32 pm_iir = 0;
1286

1287
	spin_lock_irq(&dev_priv->irq_lock);
1288 1289 1290
	if (rps->interrupts_enabled) {
		pm_iir = fetch_and_zero(&rps->pm_iir);
		client_boost = atomic_read(&rps->num_waiters);
I
Imre Deak 已提交
1291
	}
1292
	spin_unlock_irq(&dev_priv->irq_lock);
1293

1294
	/* Make sure we didn't queue anything we're not going to process. */
1295
	WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1296
	if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
1297
		goto out;
1298

1299
	mutex_lock(&dev_priv->pcu_lock);
1300

1301 1302
	pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);

1303 1304 1305 1306
	adj = rps->last_adj;
	new_delay = rps->cur_freq;
	min = rps->min_freq_softlimit;
	max = rps->max_freq_softlimit;
1307
	if (client_boost)
1308 1309 1310
		max = rps->max_freq;
	if (client_boost && new_delay < rps->boost_freq) {
		new_delay = rps->boost_freq;
1311 1312
		adj = 0;
	} else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1313 1314
		if (adj > 0)
			adj *= 2;
1315 1316
		else /* CHV needs even encode values */
			adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
1317

1318
		if (new_delay >= rps->max_freq_softlimit)
1319
			adj = 0;
1320
	} else if (client_boost) {
1321
		adj = 0;
1322
	} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1323 1324 1325 1326
		if (rps->cur_freq > rps->efficient_freq)
			new_delay = rps->efficient_freq;
		else if (rps->cur_freq > rps->min_freq_softlimit)
			new_delay = rps->min_freq_softlimit;
1327 1328 1329 1330
		adj = 0;
	} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
		if (adj < 0)
			adj *= 2;
1331 1332
		else /* CHV needs even encode values */
			adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
1333

1334
		if (new_delay <= rps->min_freq_softlimit)
1335
			adj = 0;
1336
	} else { /* unknown event */
1337
		adj = 0;
1338
	}
1339

1340
	rps->last_adj = adj;
1341

1342 1343 1344
	/* sysfs frequency interfaces may have snuck in while servicing the
	 * interrupt
	 */
1345
	new_delay += adj;
1346
	new_delay = clamp_t(int, new_delay, min, max);
1347

1348 1349
	if (intel_set_rps(dev_priv, new_delay)) {
		DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n");
1350
		rps->last_adj = 0;
1351
	}
1352

1353
	mutex_unlock(&dev_priv->pcu_lock);
1354 1355 1356 1357

out:
	/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
	spin_lock_irq(&dev_priv->irq_lock);
1358
	if (rps->interrupts_enabled)
1359 1360
		gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events);
	spin_unlock_irq(&dev_priv->irq_lock);
1361 1362
}

1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374

/**
 * ivybridge_parity_work - Workqueue called when a parity error interrupt
 * occurred.
 * @work: workqueue struct
 *
 * Doesn't actually do anything except notify userspace. As a consequence of
 * this event, userspace should try to remap the bad rows since statistically
 * it is likely the same row is more likely to go bad again.
 */
static void ivybridge_parity_work(struct work_struct *work)
{
1375
	struct drm_i915_private *dev_priv =
1376
		container_of(work, typeof(*dev_priv), l3_parity.error_work);
1377
	u32 error_status, row, bank, subbank;
1378
	char *parity_event[6];
1379
	uint32_t misccpctl;
1380
	uint8_t slice = 0;
1381 1382 1383 1384 1385

	/* We must turn off DOP level clock gating to access the L3 registers.
	 * In order to prevent a get/put style interface, acquire struct mutex
	 * any time we access those registers.
	 */
1386
	mutex_lock(&dev_priv->drm.struct_mutex);
1387

1388 1389 1390 1391
	/* If we've screwed up tracking, just let the interrupt fire again */
	if (WARN_ON(!dev_priv->l3_parity.which_slice))
		goto out;

1392 1393 1394 1395
	misccpctl = I915_READ(GEN7_MISCCPCTL);
	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
	POSTING_READ(GEN7_MISCCPCTL);

1396
	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1397
		i915_reg_t reg;
1398

1399
		slice--;
1400
		if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv)))
1401
			break;
1402

1403
		dev_priv->l3_parity.which_slice &= ~(1<<slice);
1404

1405
		reg = GEN7_L3CDERRST1(slice);
1406

1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421
		error_status = I915_READ(reg);
		row = GEN7_PARITY_ERROR_ROW(error_status);
		bank = GEN7_PARITY_ERROR_BANK(error_status);
		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);

		I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
		POSTING_READ(reg);

		parity_event[0] = I915_L3_PARITY_UEVENT "=1";
		parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
		parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
		parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
		parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
		parity_event[5] = NULL;

1422
		kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
1423
				   KOBJ_CHANGE, parity_event);
1424

1425 1426
		DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
			  slice, row, bank, subbank);
1427

1428 1429 1430 1431 1432
		kfree(parity_event[4]);
		kfree(parity_event[3]);
		kfree(parity_event[2]);
		kfree(parity_event[1]);
	}
1433

1434
	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1435

1436 1437
out:
	WARN_ON(dev_priv->l3_parity.which_slice);
1438
	spin_lock_irq(&dev_priv->irq_lock);
1439
	gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1440
	spin_unlock_irq(&dev_priv->irq_lock);
1441

1442
	mutex_unlock(&dev_priv->drm.struct_mutex);
1443 1444
}

1445 1446
static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv,
					       u32 iir)
1447
{
1448
	if (!HAS_L3_DPF(dev_priv))
1449 1450
		return;

1451
	spin_lock(&dev_priv->irq_lock);
1452
	gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1453
	spin_unlock(&dev_priv->irq_lock);
1454

1455
	iir &= GT_PARITY_ERROR(dev_priv);
1456 1457 1458 1459 1460 1461
	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
		dev_priv->l3_parity.which_slice |= 1 << 1;

	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
		dev_priv->l3_parity.which_slice |= 1 << 0;

1462
	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1463 1464
}

1465
static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
1466 1467
			       u32 gt_iir)
{
1468
	if (gt_iir & GT_RENDER_USER_INTERRUPT)
1469
		notify_ring(dev_priv->engine[RCS]);
1470
	if (gt_iir & ILK_BSD_USER_INTERRUPT)
1471
		notify_ring(dev_priv->engine[VCS]);
1472 1473
}

1474
static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
1475 1476
			       u32 gt_iir)
{
1477
	if (gt_iir & GT_RENDER_USER_INTERRUPT)
1478
		notify_ring(dev_priv->engine[RCS]);
1479
	if (gt_iir & GT_BSD_USER_INTERRUPT)
1480
		notify_ring(dev_priv->engine[VCS]);
1481
	if (gt_iir & GT_BLT_USER_INTERRUPT)
1482
		notify_ring(dev_priv->engine[BCS]);
1483

1484 1485
	if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
		      GT_BSD_CS_ERROR_INTERRUPT |
1486 1487
		      GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
		DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
1488

1489 1490
	if (gt_iir & GT_PARITY_ERROR(dev_priv))
		ivybridge_parity_error_irq_handler(dev_priv, gt_iir);
1491 1492
}

1493
static void
1494
gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
1495
{
1496
	bool tasklet = false;
1497

C
Chris Wilson 已提交
1498 1499
	if (iir & GT_CONTEXT_SWITCH_INTERRUPT)
		tasklet = true;
1500

1501
	if (iir & GT_RENDER_USER_INTERRUPT) {
1502
		notify_ring(engine);
1503
		tasklet |= USES_GUC_SUBMISSION(engine->i915);
1504 1505 1506
	}

	if (tasklet)
C
Chris Wilson 已提交
1507
		tasklet_hi_schedule(&engine->execlists.tasklet);
1508 1509
}

1510
static void gen8_gt_irq_ack(struct drm_i915_private *i915,
1511
			    u32 master_ctl, u32 gt_iir[4])
1512
{
1513 1514
	void __iomem * const regs = i915->regs;

1515 1516 1517 1518 1519 1520 1521 1522
#define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \
		      GEN8_GT_BCS_IRQ | \
		      GEN8_GT_VCS1_IRQ | \
		      GEN8_GT_VCS2_IRQ | \
		      GEN8_GT_VECS_IRQ | \
		      GEN8_GT_PM_IRQ | \
		      GEN8_GT_GUC_IRQ)

1523
	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1524 1525 1526
		gt_iir[0] = raw_reg_read(regs, GEN8_GT_IIR(0));
		if (likely(gt_iir[0]))
			raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]);
1527 1528
	}

1529
	if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1530 1531 1532
		gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1));
		if (likely(gt_iir[1]))
			raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]);
1533 1534
	}

1535 1536
	if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
		gt_iir[2] = raw_reg_read(regs, GEN8_GT_IIR(2));
1537 1538
		if (likely(gt_iir[2]))
			raw_reg_write(regs, GEN8_GT_IIR(2), gt_iir[2]);
1539 1540
	}

1541 1542 1543 1544
	if (master_ctl & GEN8_GT_VECS_IRQ) {
		gt_iir[3] = raw_reg_read(regs, GEN8_GT_IIR(3));
		if (likely(gt_iir[3]))
			raw_reg_write(regs, GEN8_GT_IIR(3), gt_iir[3]);
1545
	}
1546 1547
}

1548
static void gen8_gt_irq_handler(struct drm_i915_private *i915,
1549
				u32 master_ctl, u32 gt_iir[4])
1550
{
1551
	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1552
		gen8_cs_irq_handler(i915->engine[RCS],
1553
				    gt_iir[0] >> GEN8_RCS_IRQ_SHIFT);
1554
		gen8_cs_irq_handler(i915->engine[BCS],
1555
				    gt_iir[0] >> GEN8_BCS_IRQ_SHIFT);
1556 1557
	}

1558
	if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1559
		gen8_cs_irq_handler(i915->engine[VCS],
1560
				    gt_iir[1] >> GEN8_VCS1_IRQ_SHIFT);
1561
		gen8_cs_irq_handler(i915->engine[VCS2],
1562
				    gt_iir[1] >> GEN8_VCS2_IRQ_SHIFT);
1563 1564
	}

1565
	if (master_ctl & GEN8_GT_VECS_IRQ) {
1566
		gen8_cs_irq_handler(i915->engine[VECS],
1567
				    gt_iir[3] >> GEN8_VECS_IRQ_SHIFT);
1568
	}
1569

1570
	if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
1571 1572
		gen6_rps_irq_handler(i915, gt_iir[2]);
		gen9_guc_irq_handler(i915, gt_iir[2]);
1573
	}
1574 1575
}

1576
static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1577
{
1578 1579
	switch (pin) {
	case HPD_PORT_C:
1580
		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1);
1581
	case HPD_PORT_D:
1582
		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2);
1583
	case HPD_PORT_E:
1584
		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3);
1585
	case HPD_PORT_F:
1586 1587 1588 1589 1590 1591
		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4);
	default:
		return false;
	}
}

1592
static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1593
{
1594 1595
	switch (pin) {
	case HPD_PORT_A:
1596
		return val & PORTA_HOTPLUG_LONG_DETECT;
1597
	case HPD_PORT_B:
1598
		return val & PORTB_HOTPLUG_LONG_DETECT;
1599
	case HPD_PORT_C:
1600 1601 1602 1603 1604 1605
		return val & PORTC_HOTPLUG_LONG_DETECT;
	default:
		return false;
	}
}

1606
static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1607
{
1608 1609
	switch (pin) {
	case HPD_PORT_A:
1610
		return val & ICP_DDIA_HPD_LONG_DETECT;
1611
	case HPD_PORT_B:
1612 1613 1614 1615 1616 1617
		return val & ICP_DDIB_HPD_LONG_DETECT;
	default:
		return false;
	}
}

1618
static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1619
{
1620 1621
	switch (pin) {
	case HPD_PORT_C:
1622
		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1);
1623
	case HPD_PORT_D:
1624
		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2);
1625
	case HPD_PORT_E:
1626
		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3);
1627
	case HPD_PORT_F:
1628 1629 1630 1631 1632 1633
		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4);
	default:
		return false;
	}
}

1634
static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val)
1635
{
1636 1637
	switch (pin) {
	case HPD_PORT_E:
1638 1639 1640 1641 1642 1643
		return val & PORTE_HOTPLUG_LONG_DETECT;
	default:
		return false;
	}
}

1644
static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1645
{
1646 1647
	switch (pin) {
	case HPD_PORT_A:
1648
		return val & PORTA_HOTPLUG_LONG_DETECT;
1649
	case HPD_PORT_B:
1650
		return val & PORTB_HOTPLUG_LONG_DETECT;
1651
	case HPD_PORT_C:
1652
		return val & PORTC_HOTPLUG_LONG_DETECT;
1653
	case HPD_PORT_D:
1654 1655 1656 1657 1658 1659
		return val & PORTD_HOTPLUG_LONG_DETECT;
	default:
		return false;
	}
}

1660
static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1661
{
1662 1663
	switch (pin) {
	case HPD_PORT_A:
1664 1665 1666 1667 1668 1669
		return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
	default:
		return false;
	}
}

1670
static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1671
{
1672 1673
	switch (pin) {
	case HPD_PORT_B:
1674
		return val & PORTB_HOTPLUG_LONG_DETECT;
1675
	case HPD_PORT_C:
1676
		return val & PORTC_HOTPLUG_LONG_DETECT;
1677
	case HPD_PORT_D:
1678 1679 1680
		return val & PORTD_HOTPLUG_LONG_DETECT;
	default:
		return false;
1681 1682 1683
	}
}

1684
static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1685
{
1686 1687
	switch (pin) {
	case HPD_PORT_B:
1688
		return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1689
	case HPD_PORT_C:
1690
		return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1691
	case HPD_PORT_D:
1692 1693 1694
		return val & PORTD_HOTPLUG_INT_LONG_PULSE;
	default:
		return false;
1695 1696 1697
	}
}

1698 1699 1700 1701 1702 1703 1704
/*
 * Get a bit mask of pins that have triggered, and which ones may be long.
 * This can be called multiple times with the same masks to accumulate
 * hotplug detection results from several registers.
 *
 * Note that the caller is expected to zero out the masks initially.
 */
1705 1706 1707 1708
static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
			       u32 *pin_mask, u32 *long_mask,
			       u32 hotplug_trigger, u32 dig_hotplug_reg,
			       const u32 hpd[HPD_NUM_PINS],
1709
			       bool long_pulse_detect(enum hpd_pin pin, u32 val))
1710
{
1711
	enum hpd_pin pin;
1712

1713 1714
	for_each_hpd_pin(pin) {
		if ((hpd[pin] & hotplug_trigger) == 0)
1715
			continue;
1716

1717
		*pin_mask |= BIT(pin);
1718

1719
		if (long_pulse_detect(pin, dig_hotplug_reg))
1720
			*long_mask |= BIT(pin);
1721 1722
	}

1723 1724
	DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
			 hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
1725 1726 1727

}

1728
static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
1729
{
1730
	wake_up_all(&dev_priv->gmbus_wait_queue);
1731 1732
}

1733
static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
1734
{
1735
	wake_up_all(&dev_priv->gmbus_wait_queue);
1736 1737
}

1738
#if defined(CONFIG_DEBUG_FS)
1739 1740
static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
					 enum pipe pipe,
1741 1742 1743
					 uint32_t crc0, uint32_t crc1,
					 uint32_t crc2, uint32_t crc3,
					 uint32_t crc4)
1744 1745
{
	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
T
Tomeu Vizoso 已提交
1746 1747
	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
	uint32_t crcs[5];
1748

1749
	spin_lock(&pipe_crc->lock);
1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760
	/*
	 * For some not yet identified reason, the first CRC is
	 * bonkers. So let's just wait for the next vblank and read
	 * out the buggy result.
	 *
	 * On GEN8+ sometimes the second CRC is bonkers as well, so
	 * don't trust that one either.
	 */
	if (pipe_crc->skipped <= 0 ||
	    (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
		pipe_crc->skipped++;
T
Tomeu Vizoso 已提交
1761
		spin_unlock(&pipe_crc->lock);
1762
		return;
T
Tomeu Vizoso 已提交
1763
	}
1764 1765 1766 1767 1768 1769 1770 1771 1772 1773
	spin_unlock(&pipe_crc->lock);

	crcs[0] = crc0;
	crcs[1] = crc1;
	crcs[2] = crc2;
	crcs[3] = crc3;
	crcs[4] = crc4;
	drm_crtc_add_crc_entry(&crtc->base, true,
				drm_crtc_accurate_vblank_count(&crtc->base),
				crcs);
1774
}
1775 1776
#else
static inline void
1777 1778
display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
			     enum pipe pipe,
1779 1780 1781 1782 1783
			     uint32_t crc0, uint32_t crc1,
			     uint32_t crc2, uint32_t crc3,
			     uint32_t crc4) {}
#endif

1784

1785 1786
static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
				     enum pipe pipe)
D
Daniel Vetter 已提交
1787
{
1788
	display_pipe_crc_irq_handler(dev_priv, pipe,
1789 1790
				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
				     0, 0, 0, 0);
D
Daniel Vetter 已提交
1791 1792
}

1793 1794
static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
				     enum pipe pipe)
1795
{
1796
	display_pipe_crc_irq_handler(dev_priv, pipe,
1797 1798 1799 1800 1801
				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
				     I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
				     I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
				     I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
				     I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1802
}
1803

1804 1805
static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
				      enum pipe pipe)
1806
{
1807 1808
	uint32_t res1, res2;

1809
	if (INTEL_GEN(dev_priv) >= 3)
1810 1811 1812 1813
		res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
	else
		res1 = 0;

1814
	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1815 1816 1817
		res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
	else
		res2 = 0;
1818

1819
	display_pipe_crc_irq_handler(dev_priv, pipe,
1820 1821 1822 1823
				     I915_READ(PIPE_CRC_RES_RED(pipe)),
				     I915_READ(PIPE_CRC_RES_GREEN(pipe)),
				     I915_READ(PIPE_CRC_RES_BLUE(pipe)),
				     res1, res2);
1824
}
1825

1826 1827 1828 1829
/* The RPS events need forcewake, so we add them to a work queue and mask their
 * IMR bits until the work is done. Other interrupts can be processed without
 * the work queue. */
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1830
{
1831 1832
	struct intel_rps *rps = &dev_priv->gt_pm.rps;

1833
	if (pm_iir & dev_priv->pm_rps_events) {
1834
		spin_lock(&dev_priv->irq_lock);
1835
		gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1836 1837 1838
		if (rps->interrupts_enabled) {
			rps->pm_iir |= pm_iir & dev_priv->pm_rps_events;
			schedule_work(&rps->work);
I
Imre Deak 已提交
1839
		}
1840
		spin_unlock(&dev_priv->irq_lock);
1841 1842
	}

1843
	if (INTEL_GEN(dev_priv) >= 8)
1844 1845
		return;

1846
	if (HAS_VEBOX(dev_priv)) {
1847
		if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1848
			notify_ring(dev_priv->engine[VECS]);
B
Ben Widawsky 已提交
1849

1850 1851
		if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
			DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
B
Ben Widawsky 已提交
1852
	}
1853 1854
}

1855 1856
static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir)
{
1857 1858
	if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT)
		intel_guc_to_host_event_handler(&dev_priv->guc);
1859 1860
}

1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873
static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
{
	enum pipe pipe;

	for_each_pipe(dev_priv, pipe) {
		I915_WRITE(PIPESTAT(pipe),
			   PIPESTAT_INT_STATUS_MASK |
			   PIPE_FIFO_UNDERRUN_STATUS);

		dev_priv->pipestat_irq_mask[pipe] = 0;
	}
}

1874 1875
static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
				  u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1876 1877 1878
{
	int pipe;

1879
	spin_lock(&dev_priv->irq_lock);
1880 1881 1882 1883 1884 1885

	if (!dev_priv->display_irqs_enabled) {
		spin_unlock(&dev_priv->irq_lock);
		return;
	}

1886
	for_each_pipe(dev_priv, pipe) {
1887
		i915_reg_t reg;
1888
		u32 status_mask, enable_mask, iir_bit = 0;
1889

1890 1891 1892 1893 1894 1895 1896
		/*
		 * PIPESTAT bits get signalled even when the interrupt is
		 * disabled with the mask bits, and some of the status bits do
		 * not generate interrupts at all (like the underrun bit). Hence
		 * we need to be careful that we only handle what we want to
		 * handle.
		 */
1897 1898

		/* fifo underruns are filterered in the underrun handler. */
1899
		status_mask = PIPE_FIFO_UNDERRUN_STATUS;
1900 1901 1902 1903 1904 1905 1906 1907

		switch (pipe) {
		case PIPE_A:
			iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
			break;
		case PIPE_B:
			iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
			break;
1908 1909 1910
		case PIPE_C:
			iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
			break;
1911 1912
		}
		if (iir & iir_bit)
1913
			status_mask |= dev_priv->pipestat_irq_mask[pipe];
1914

1915
		if (!status_mask)
1916 1917 1918
			continue;

		reg = PIPESTAT(pipe);
1919 1920
		pipe_stats[pipe] = I915_READ(reg) & status_mask;
		enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
1921 1922 1923

		/*
		 * Clear the PIPE*STAT regs before the IIR
1924 1925 1926 1927 1928 1929
		 *
		 * Toggle the enable bits to make sure we get an
		 * edge in the ISR pipe event bit if we don't clear
		 * all the enabled status bits. Otherwise the edge
		 * triggered IIR on i965/g4x wouldn't notice that
		 * an interrupt is still pending.
1930
		 */
1931 1932 1933 1934
		if (pipe_stats[pipe]) {
			I915_WRITE(reg, pipe_stats[pipe]);
			I915_WRITE(reg, enable_mask);
		}
1935
	}
1936
	spin_unlock(&dev_priv->irq_lock);
1937 1938
}

1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006
static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
				      u16 iir, u32 pipe_stats[I915_MAX_PIPES])
{
	enum pipe pipe;

	for_each_pipe(dev_priv, pipe) {
		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
			drm_handle_vblank(&dev_priv->drm, pipe);

		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
			i9xx_pipe_crc_irq_handler(dev_priv, pipe);

		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
	}
}

static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
{
	bool blc_event = false;
	enum pipe pipe;

	for_each_pipe(dev_priv, pipe) {
		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
			drm_handle_vblank(&dev_priv->drm, pipe);

		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
			blc_event = true;

		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
			i9xx_pipe_crc_irq_handler(dev_priv, pipe);

		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
	}

	if (blc_event || (iir & I915_ASLE_INTERRUPT))
		intel_opregion_asle_intr(dev_priv);
}

static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
{
	bool blc_event = false;
	enum pipe pipe;

	for_each_pipe(dev_priv, pipe) {
		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
			drm_handle_vblank(&dev_priv->drm, pipe);

		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
			blc_event = true;

		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
			i9xx_pipe_crc_irq_handler(dev_priv, pipe);

		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
	}

	if (blc_event || (iir & I915_ASLE_INTERRUPT))
		intel_opregion_asle_intr(dev_priv);

	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
		gmbus_irq_handler(dev_priv);
}

2007
static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
2008 2009 2010
					    u32 pipe_stats[I915_MAX_PIPES])
{
	enum pipe pipe;
2011

2012
	for_each_pipe(dev_priv, pipe) {
2013 2014
		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
			drm_handle_vblank(&dev_priv->drm, pipe);
2015 2016

		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
2017
			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2018

2019 2020
		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2021 2022 2023
	}

	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
2024
		gmbus_irq_handler(dev_priv);
2025 2026
}

2027
static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
2028
{
2029 2030 2031 2032 2033 2034 2035 2036 2037
	u32 hotplug_status = 0, hotplug_status_mask;
	int i;

	if (IS_G4X(dev_priv) ||
	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
		hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
			DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
	else
		hotplug_status_mask = HOTPLUG_INT_STATUS_I915;
2038

2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054
	/*
	 * We absolutely have to clear all the pending interrupt
	 * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port
	 * interrupt bit won't have an edge, and the i965/g4x
	 * edge triggered IIR will not notice that an interrupt
	 * is still pending. We can't use PORT_HOTPLUG_EN to
	 * guarantee the edge as the act of toggling the enable
	 * bits can itself generate a new hotplug interrupt :(
	 */
	for (i = 0; i < 10; i++) {
		u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask;

		if (tmp == 0)
			return hotplug_status;

		hotplug_status |= tmp;
2055
		I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2056 2057 2058 2059 2060
	}

	WARN_ONCE(1,
		  "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
		  I915_READ(PORT_HOTPLUG_STAT));
2061

2062 2063 2064
	return hotplug_status;
}

2065
static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
2066 2067 2068
				 u32 hotplug_status)
{
	u32 pin_mask = 0, long_mask = 0;
2069

2070 2071
	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
	    IS_CHERRYVIEW(dev_priv)) {
2072
		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
2073

2074
		if (hotplug_trigger) {
2075 2076 2077
			intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
					   hotplug_trigger, hotplug_trigger,
					   hpd_status_g4x,
2078 2079
					   i9xx_port_hotplug_long_detect);

2080
			intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2081
		}
2082 2083

		if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
2084
			dp_aux_irq_handler(dev_priv);
2085 2086
	} else {
		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
2087

2088
		if (hotplug_trigger) {
2089 2090 2091
			intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
					   hotplug_trigger, hotplug_trigger,
					   hpd_status_i915,
2092
					   i9xx_port_hotplug_long_detect);
2093
			intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2094
		}
2095
	}
2096 2097
}

2098
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
J
Jesse Barnes 已提交
2099
{
2100
	struct drm_device *dev = arg;
2101
	struct drm_i915_private *dev_priv = to_i915(dev);
J
Jesse Barnes 已提交
2102 2103
	irqreturn_t ret = IRQ_NONE;

2104 2105 2106
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

2107 2108 2109
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
	disable_rpm_wakeref_asserts(dev_priv);

2110
	do {
2111
		u32 iir, gt_iir, pm_iir;
2112
		u32 pipe_stats[I915_MAX_PIPES] = {};
2113
		u32 hotplug_status = 0;
2114
		u32 ier = 0;
2115

J
Jesse Barnes 已提交
2116 2117
		gt_iir = I915_READ(GTIIR);
		pm_iir = I915_READ(GEN6_PMIIR);
2118
		iir = I915_READ(VLV_IIR);
J
Jesse Barnes 已提交
2119 2120

		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
2121
			break;
J
Jesse Barnes 已提交
2122 2123 2124

		ret = IRQ_HANDLED;

2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137
		/*
		 * Theory on interrupt generation, based on empirical evidence:
		 *
		 * x = ((VLV_IIR & VLV_IER) ||
		 *      (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
		 *       (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
		 *
		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
		 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
		 * guarantee the CPU interrupt will be raised again even if we
		 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
		 * bits this time around.
		 */
2138
		I915_WRITE(VLV_MASTER_IER, 0);
2139 2140
		ier = I915_READ(VLV_IER);
		I915_WRITE(VLV_IER, 0);
2141 2142 2143 2144 2145 2146

		if (gt_iir)
			I915_WRITE(GTIIR, gt_iir);
		if (pm_iir)
			I915_WRITE(GEN6_PMIIR, pm_iir);

2147
		if (iir & I915_DISPLAY_PORT_INTERRUPT)
2148
			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
2149

2150 2151
		/* Call regardless, as some status bits might not be
		 * signalled in iir */
2152
		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
2153

2154 2155 2156 2157
		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
			   I915_LPE_PIPE_B_INTERRUPT))
			intel_lpe_audio_irq_handler(dev_priv);

2158 2159 2160 2161 2162 2163
		/*
		 * VLV_IIR is single buffered, and reflects the level
		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
		 */
		if (iir)
			I915_WRITE(VLV_IIR, iir);
2164

2165
		I915_WRITE(VLV_IER, ier);
2166
		I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
2167

2168
		if (gt_iir)
2169
			snb_gt_irq_handler(dev_priv, gt_iir);
2170 2171 2172
		if (pm_iir)
			gen6_rps_irq_handler(dev_priv, pm_iir);

2173
		if (hotplug_status)
2174
			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
2175

2176
		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
2177
	} while (0);
J
Jesse Barnes 已提交
2178

2179 2180
	enable_rpm_wakeref_asserts(dev_priv);

J
Jesse Barnes 已提交
2181 2182 2183
	return ret;
}

2184 2185
static irqreturn_t cherryview_irq_handler(int irq, void *arg)
{
2186
	struct drm_device *dev = arg;
2187
	struct drm_i915_private *dev_priv = to_i915(dev);
2188 2189
	irqreturn_t ret = IRQ_NONE;

2190 2191 2192
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

2193 2194 2195
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
	disable_rpm_wakeref_asserts(dev_priv);

2196
	do {
2197
		u32 master_ctl, iir;
2198
		u32 pipe_stats[I915_MAX_PIPES] = {};
2199
		u32 hotplug_status = 0;
2200
		u32 gt_iir[4];
2201 2202
		u32 ier = 0;

2203 2204
		master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
		iir = I915_READ(VLV_IIR);
2205

2206 2207
		if (master_ctl == 0 && iir == 0)
			break;
2208

2209 2210
		ret = IRQ_HANDLED;

2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223
		/*
		 * Theory on interrupt generation, based on empirical evidence:
		 *
		 * x = ((VLV_IIR & VLV_IER) ||
		 *      ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
		 *       (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
		 *
		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
		 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
		 * guarantee the CPU interrupt will be raised again even if we
		 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
		 * bits this time around.
		 */
2224
		I915_WRITE(GEN8_MASTER_IRQ, 0);
2225 2226
		ier = I915_READ(VLV_IER);
		I915_WRITE(VLV_IER, 0);
2227

2228
		gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
2229

2230
		if (iir & I915_DISPLAY_PORT_INTERRUPT)
2231
			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
2232

2233 2234
		/* Call regardless, as some status bits might not be
		 * signalled in iir */
2235
		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
2236

2237 2238 2239 2240 2241
		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
			   I915_LPE_PIPE_B_INTERRUPT |
			   I915_LPE_PIPE_C_INTERRUPT))
			intel_lpe_audio_irq_handler(dev_priv);

2242 2243 2244 2245 2246 2247 2248
		/*
		 * VLV_IIR is single buffered, and reflects the level
		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
		 */
		if (iir)
			I915_WRITE(VLV_IIR, iir);

2249
		I915_WRITE(VLV_IER, ier);
2250
		I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2251

2252
		gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
2253

2254
		if (hotplug_status)
2255
			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
2256

2257
		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
2258
	} while (0);
2259

2260 2261
	enable_rpm_wakeref_asserts(dev_priv);

2262 2263 2264
	return ret;
}

2265 2266
static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
				u32 hotplug_trigger,
2267 2268 2269 2270
				const u32 hpd[HPD_NUM_PINS])
{
	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;

2271 2272 2273 2274 2275 2276
	/*
	 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
	 * unless we touch the hotplug register, even if hotplug_trigger is
	 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
	 * errors.
	 */
2277
	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2278 2279 2280 2281 2282 2283 2284 2285
	if (!hotplug_trigger) {
		u32 mask = PORTA_HOTPLUG_STATUS_MASK |
			PORTD_HOTPLUG_STATUS_MASK |
			PORTC_HOTPLUG_STATUS_MASK |
			PORTB_HOTPLUG_STATUS_MASK;
		dig_hotplug_reg &= ~mask;
	}

2286
	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2287 2288
	if (!hotplug_trigger)
		return;
2289

2290
	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
2291 2292 2293
			   dig_hotplug_reg, hpd,
			   pch_port_hotplug_long_detect);

2294
	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2295 2296
}

2297
static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2298
{
2299
	int pipe;
2300
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
2301

2302
	ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
2303

2304 2305 2306
	if (pch_iir & SDE_AUDIO_POWER_MASK) {
		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
			       SDE_AUDIO_POWER_SHIFT);
2307
		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
2308 2309
				 port_name(port));
	}
2310

2311
	if (pch_iir & SDE_AUX_MASK)
2312
		dp_aux_irq_handler(dev_priv);
2313

2314
	if (pch_iir & SDE_GMBUS)
2315
		gmbus_irq_handler(dev_priv);
2316 2317 2318 2319 2320 2321 2322 2323 2324 2325

	if (pch_iir & SDE_AUDIO_HDCP_MASK)
		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");

	if (pch_iir & SDE_AUDIO_TRANS_MASK)
		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");

	if (pch_iir & SDE_POISON)
		DRM_ERROR("PCH poison interrupt\n");

2326
	if (pch_iir & SDE_FDI_MASK)
2327
		for_each_pipe(dev_priv, pipe)
2328 2329 2330
			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
					 pipe_name(pipe),
					 I915_READ(FDI_RX_IIR(pipe)));
2331 2332 2333 2334 2335 2336 2337 2338

	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");

	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");

	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
2339
		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
2340 2341

	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
2342
		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
2343 2344
}

2345
static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
2346 2347
{
	u32 err_int = I915_READ(GEN7_ERR_INT);
D
Daniel Vetter 已提交
2348
	enum pipe pipe;
2349

2350 2351 2352
	if (err_int & ERR_INT_POISON)
		DRM_ERROR("Poison interrupt\n");

2353
	for_each_pipe(dev_priv, pipe) {
2354 2355
		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2356

D
Daniel Vetter 已提交
2357
		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
2358 2359
			if (IS_IVYBRIDGE(dev_priv))
				ivb_pipe_crc_irq_handler(dev_priv, pipe);
D
Daniel Vetter 已提交
2360
			else
2361
				hsw_pipe_crc_irq_handler(dev_priv, pipe);
D
Daniel Vetter 已提交
2362 2363
		}
	}
2364

2365 2366 2367
	I915_WRITE(GEN7_ERR_INT, err_int);
}

2368
static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
2369 2370
{
	u32 serr_int = I915_READ(SERR_INT);
2371
	enum pipe pipe;
2372

2373 2374 2375
	if (serr_int & SERR_INT_POISON)
		DRM_ERROR("PCH poison interrupt\n");

2376 2377 2378
	for_each_pipe(dev_priv, pipe)
		if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
			intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
2379 2380

	I915_WRITE(SERR_INT, serr_int);
2381 2382
}

2383
static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2384 2385
{
	int pipe;
2386
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
2387

2388
	ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
2389

2390 2391 2392 2393 2394 2395
	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
			       SDE_AUDIO_POWER_SHIFT_CPT);
		DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
				 port_name(port));
	}
2396 2397

	if (pch_iir & SDE_AUX_MASK_CPT)
2398
		dp_aux_irq_handler(dev_priv);
2399 2400

	if (pch_iir & SDE_GMBUS_CPT)
2401
		gmbus_irq_handler(dev_priv);
2402 2403 2404 2405 2406 2407 2408 2409

	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
		DRM_DEBUG_DRIVER("Audio CP request interrupt\n");

	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
		DRM_DEBUG_DRIVER("Audio CP change interrupt\n");

	if (pch_iir & SDE_FDI_MASK_CPT)
2410
		for_each_pipe(dev_priv, pipe)
2411 2412 2413
			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
					 pipe_name(pipe),
					 I915_READ(FDI_RX_IIR(pipe)));
2414 2415

	if (pch_iir & SDE_ERROR_CPT)
2416
		cpt_serr_int_handler(dev_priv);
2417 2418
}

2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455
static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
{
	u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
	u32 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP;
	u32 pin_mask = 0, long_mask = 0;

	if (ddi_hotplug_trigger) {
		u32 dig_hotplug_reg;

		dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI);
		I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg);

		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
				   ddi_hotplug_trigger,
				   dig_hotplug_reg, hpd_icp,
				   icp_ddi_port_hotplug_long_detect);
	}

	if (tc_hotplug_trigger) {
		u32 dig_hotplug_reg;

		dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC);
		I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg);

		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
				   tc_hotplug_trigger,
				   dig_hotplug_reg, hpd_icp,
				   icp_tc_port_hotplug_long_detect);
	}

	if (pin_mask)
		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);

	if (pch_iir & SDE_GMBUS_ICP)
		gmbus_irq_handler(dev_priv);
}

2456
static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468
{
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
		~SDE_PORTE_HOTPLUG_SPT;
	u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
	u32 pin_mask = 0, long_mask = 0;

	if (hotplug_trigger) {
		u32 dig_hotplug_reg;

		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
		I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);

2469 2470
		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
				   hotplug_trigger, dig_hotplug_reg, hpd_spt,
2471
				   spt_port_hotplug_long_detect);
2472 2473 2474 2475 2476 2477 2478 2479
	}

	if (hotplug2_trigger) {
		u32 dig_hotplug_reg;

		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
		I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);

2480 2481
		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
				   hotplug2_trigger, dig_hotplug_reg, hpd_spt,
2482 2483 2484 2485
				   spt_port_hotplug2_long_detect);
	}

	if (pin_mask)
2486
		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2487 2488

	if (pch_iir & SDE_GMBUS_CPT)
2489
		gmbus_irq_handler(dev_priv);
2490 2491
}

2492 2493
static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
				u32 hotplug_trigger,
2494 2495 2496 2497 2498 2499 2500
				const u32 hpd[HPD_NUM_PINS])
{
	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;

	dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);

2501
	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
2502 2503 2504
			   dig_hotplug_reg, hpd,
			   ilk_port_hotplug_long_detect);

2505
	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2506 2507
}

2508 2509
static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
				    u32 de_iir)
2510
{
2511
	enum pipe pipe;
2512 2513
	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;

2514
	if (hotplug_trigger)
2515
		ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk);
2516 2517

	if (de_iir & DE_AUX_CHANNEL_A)
2518
		dp_aux_irq_handler(dev_priv);
2519 2520

	if (de_iir & DE_GSE)
2521
		intel_opregion_asle_intr(dev_priv);
2522 2523 2524 2525

	if (de_iir & DE_POISON)
		DRM_ERROR("Poison interrupt\n");

2526
	for_each_pipe(dev_priv, pipe) {
2527 2528
		if (de_iir & DE_PIPE_VBLANK(pipe))
			drm_handle_vblank(&dev_priv->drm, pipe);
2529

2530
		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2531
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2532

2533
		if (de_iir & DE_PIPE_CRC_DONE(pipe))
2534
			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2535 2536 2537 2538 2539 2540
	}

	/* check event from PCH */
	if (de_iir & DE_PCH_EVENT) {
		u32 pch_iir = I915_READ(SDEIIR);

2541 2542
		if (HAS_PCH_CPT(dev_priv))
			cpt_irq_handler(dev_priv, pch_iir);
2543
		else
2544
			ibx_irq_handler(dev_priv, pch_iir);
2545 2546 2547 2548 2549

		/* should clear PCH hotplug event before clear CPU irq */
		I915_WRITE(SDEIIR, pch_iir);
	}

2550 2551
	if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT)
		ironlake_rps_change_irq_handler(dev_priv);
2552 2553
}

2554 2555
static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
				    u32 de_iir)
2556
{
2557
	enum pipe pipe;
2558 2559
	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;

2560
	if (hotplug_trigger)
2561
		ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb);
2562 2563

	if (de_iir & DE_ERR_INT_IVB)
2564
		ivb_err_int_handler(dev_priv);
2565

2566 2567 2568 2569 2570 2571
	if (de_iir & DE_EDP_PSR_INT_HSW) {
		u32 psr_iir = I915_READ(EDP_PSR_IIR);

		intel_psr_irq_handler(dev_priv, psr_iir);
		I915_WRITE(EDP_PSR_IIR, psr_iir);
	}
2572

2573
	if (de_iir & DE_AUX_CHANNEL_A_IVB)
2574
		dp_aux_irq_handler(dev_priv);
2575 2576

	if (de_iir & DE_GSE_IVB)
2577
		intel_opregion_asle_intr(dev_priv);
2578

2579
	for_each_pipe(dev_priv, pipe) {
2580 2581
		if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
			drm_handle_vblank(&dev_priv->drm, pipe);
2582 2583 2584
	}

	/* check event from PCH */
2585
	if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
2586 2587
		u32 pch_iir = I915_READ(SDEIIR);

2588
		cpt_irq_handler(dev_priv, pch_iir);
2589 2590 2591 2592 2593 2594

		/* clear PCH hotplug event before clear CPU irq */
		I915_WRITE(SDEIIR, pch_iir);
	}
}

2595 2596 2597 2598 2599 2600 2601 2602
/*
 * To handle irqs with the minimum potential races with fresh interrupts, we:
 * 1 - Disable Master Interrupt Control.
 * 2 - Find the source(s) of the interrupt.
 * 3 - Clear the Interrupt Identity bits (IIR).
 * 4 - Process the interrupt(s) that had bits set in the IIRs.
 * 5 - Re-enable Master Interrupt Control.
 */
2603
static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2604
{
2605
	struct drm_device *dev = arg;
2606
	struct drm_i915_private *dev_priv = to_i915(dev);
2607
	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2608
	irqreturn_t ret = IRQ_NONE;
2609

2610 2611 2612
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

2613 2614 2615
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
	disable_rpm_wakeref_asserts(dev_priv);

2616 2617 2618 2619
	/* disable master interrupt before clearing iir  */
	de_ier = I915_READ(DEIER);
	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);

2620 2621 2622 2623 2624
	/* Disable south interrupts. We'll only write to SDEIIR once, so further
	 * interrupts will will be stored on its back queue, and then we'll be
	 * able to process them after we restore SDEIER (as soon as we restore
	 * it, we'll get an interrupt if SDEIIR still has something to process
	 * due to its back queue). */
2625
	if (!HAS_PCH_NOP(dev_priv)) {
2626 2627 2628
		sde_ier = I915_READ(SDEIER);
		I915_WRITE(SDEIER, 0);
	}
2629

2630 2631
	/* Find, clear, then process each source of interrupt */

2632
	gt_iir = I915_READ(GTIIR);
2633
	if (gt_iir) {
2634 2635
		I915_WRITE(GTIIR, gt_iir);
		ret = IRQ_HANDLED;
2636
		if (INTEL_GEN(dev_priv) >= 6)
2637
			snb_gt_irq_handler(dev_priv, gt_iir);
2638
		else
2639
			ilk_gt_irq_handler(dev_priv, gt_iir);
2640 2641
	}

2642 2643
	de_iir = I915_READ(DEIIR);
	if (de_iir) {
2644 2645
		I915_WRITE(DEIIR, de_iir);
		ret = IRQ_HANDLED;
2646 2647
		if (INTEL_GEN(dev_priv) >= 7)
			ivb_display_irq_handler(dev_priv, de_iir);
2648
		else
2649
			ilk_display_irq_handler(dev_priv, de_iir);
2650 2651
	}

2652
	if (INTEL_GEN(dev_priv) >= 6) {
2653 2654 2655 2656
		u32 pm_iir = I915_READ(GEN6_PMIIR);
		if (pm_iir) {
			I915_WRITE(GEN6_PMIIR, pm_iir);
			ret = IRQ_HANDLED;
2657
			gen6_rps_irq_handler(dev_priv, pm_iir);
2658
		}
2659
	}
2660 2661

	I915_WRITE(DEIER, de_ier);
2662
	if (!HAS_PCH_NOP(dev_priv))
2663
		I915_WRITE(SDEIER, sde_ier);
2664

2665 2666 2667
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
	enable_rpm_wakeref_asserts(dev_priv);

2668 2669 2670
	return ret;
}

2671 2672
static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
				u32 hotplug_trigger,
2673
				const u32 hpd[HPD_NUM_PINS])
2674
{
2675
	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2676

2677 2678
	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2679

2680
	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
2681
			   dig_hotplug_reg, hpd,
2682
			   bxt_port_hotplug_long_detect);
2683

2684
	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2685 2686
}

2687 2688 2689
static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
{
	u32 pin_mask = 0, long_mask = 0;
2690 2691
	u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
	u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
2692 2693

	if (trigger_tc) {
2694 2695
		u32 dig_hotplug_reg;

2696 2697 2698 2699
		dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL);
		I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);

		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tc,
2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711
				   dig_hotplug_reg, hpd_gen11,
				   gen11_port_hotplug_long_detect);
	}

	if (trigger_tbt) {
		u32 dig_hotplug_reg;

		dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL);
		I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);

		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tbt,
				   dig_hotplug_reg, hpd_gen11,
2712
				   gen11_port_hotplug_long_detect);
2713 2714 2715
	}

	if (pin_mask)
2716
		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2717
	else
2718 2719 2720
		DRM_ERROR("Unexpected DE HPD interrupt 0x%08x\n", iir);
}

2721 2722
static irqreturn_t
gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2723 2724
{
	irqreturn_t ret = IRQ_NONE;
2725
	u32 iir;
2726
	enum pipe pipe;
J
Jesse Barnes 已提交
2727

2728
	if (master_ctl & GEN8_DE_MISC_IRQ) {
2729 2730
		iir = I915_READ(GEN8_DE_MISC_IIR);
		if (iir) {
2731 2732
			bool found = false;

2733
			I915_WRITE(GEN8_DE_MISC_IIR, iir);
2734
			ret = IRQ_HANDLED;
2735 2736

			if (iir & GEN8_DE_MISC_GSE) {
2737
				intel_opregion_asle_intr(dev_priv);
2738 2739 2740 2741
				found = true;
			}

			if (iir & GEN8_DE_EDP_PSR) {
2742 2743 2744 2745
				u32 psr_iir = I915_READ(EDP_PSR_IIR);

				intel_psr_irq_handler(dev_priv, psr_iir);
				I915_WRITE(EDP_PSR_IIR, psr_iir);
2746 2747 2748 2749
				found = true;
			}

			if (!found)
2750
				DRM_ERROR("Unexpected DE Misc interrupt\n");
2751
		}
2752 2753
		else
			DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2754 2755
	}

2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766
	if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
		iir = I915_READ(GEN11_DE_HPD_IIR);
		if (iir) {
			I915_WRITE(GEN11_DE_HPD_IIR, iir);
			ret = IRQ_HANDLED;
			gen11_hpd_irq_handler(dev_priv, iir);
		} else {
			DRM_ERROR("The master control interrupt lied, (DE HPD)!\n");
		}
	}

2767
	if (master_ctl & GEN8_DE_PORT_IRQ) {
2768 2769 2770
		iir = I915_READ(GEN8_DE_PORT_IIR);
		if (iir) {
			u32 tmp_mask;
2771
			bool found = false;
2772

2773
			I915_WRITE(GEN8_DE_PORT_IIR, iir);
2774
			ret = IRQ_HANDLED;
J
Jesse Barnes 已提交
2775

2776
			tmp_mask = GEN8_AUX_CHANNEL_A;
2777
			if (INTEL_GEN(dev_priv) >= 9)
2778 2779 2780 2781
				tmp_mask |= GEN9_AUX_CHANNEL_B |
					    GEN9_AUX_CHANNEL_C |
					    GEN9_AUX_CHANNEL_D;

2782 2783 2784
			if (INTEL_GEN(dev_priv) >= 11)
				tmp_mask |= ICL_AUX_CHANNEL_E;

2785 2786
			if (IS_CNL_WITH_PORT_F(dev_priv) ||
			    INTEL_GEN(dev_priv) >= 11)
R
Rodrigo Vivi 已提交
2787 2788
				tmp_mask |= CNL_AUX_CHANNEL_F;

2789
			if (iir & tmp_mask) {
2790
				dp_aux_irq_handler(dev_priv);
2791 2792 2793
				found = true;
			}

2794
			if (IS_GEN9_LP(dev_priv)) {
2795 2796
				tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
				if (tmp_mask) {
2797 2798
					bxt_hpd_irq_handler(dev_priv, tmp_mask,
							    hpd_bxt);
2799 2800 2801 2802 2803
					found = true;
				}
			} else if (IS_BROADWELL(dev_priv)) {
				tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
				if (tmp_mask) {
2804 2805
					ilk_hpd_irq_handler(dev_priv,
							    tmp_mask, hpd_bdw);
2806 2807
					found = true;
				}
2808 2809
			}

2810
			if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
2811
				gmbus_irq_handler(dev_priv);
S
Shashank Sharma 已提交
2812 2813 2814
				found = true;
			}

2815
			if (!found)
2816
				DRM_ERROR("Unexpected DE Port interrupt\n");
2817
		}
2818 2819
		else
			DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2820 2821
	}

2822
	for_each_pipe(dev_priv, pipe) {
2823
		u32 fault_errors;
2824

2825 2826
		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
			continue;
2827

2828 2829 2830 2831 2832
		iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
		if (!iir) {
			DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
			continue;
		}
2833

2834 2835
		ret = IRQ_HANDLED;
		I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
2836

2837 2838
		if (iir & GEN8_PIPE_VBLANK)
			drm_handle_vblank(&dev_priv->drm, pipe);
2839

2840
		if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
2841
			hsw_pipe_crc_irq_handler(dev_priv, pipe);
2842

2843 2844
		if (iir & GEN8_PIPE_FIFO_UNDERRUN)
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2845

2846
		fault_errors = iir;
2847
		if (INTEL_GEN(dev_priv) >= 9)
2848 2849 2850
			fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
		else
			fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2851

2852
		if (fault_errors)
2853
			DRM_ERROR("Fault errors on pipe %c: 0x%08x\n",
2854 2855
				  pipe_name(pipe),
				  fault_errors);
2856 2857
	}

2858
	if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2859
	    master_ctl & GEN8_DE_PCH_IRQ) {
2860 2861 2862 2863 2864
		/*
		 * FIXME(BDW): Assume for now that the new interrupt handling
		 * scheme also closed the SDE interrupt handling race we've seen
		 * on older pch-split platforms. But this needs testing.
		 */
2865 2866 2867
		iir = I915_READ(SDEIIR);
		if (iir) {
			I915_WRITE(SDEIIR, iir);
2868
			ret = IRQ_HANDLED;
2869

2870 2871 2872 2873 2874
			if (HAS_PCH_ICP(dev_priv))
				icp_irq_handler(dev_priv, iir);
			else if (HAS_PCH_SPT(dev_priv) ||
				 HAS_PCH_KBP(dev_priv) ||
				 HAS_PCH_CNP(dev_priv))
2875
				spt_irq_handler(dev_priv, iir);
2876
			else
2877
				cpt_irq_handler(dev_priv, iir);
2878 2879 2880 2881 2882 2883 2884
		} else {
			/*
			 * Like on previous PCH there seems to be something
			 * fishy going on with forwarding PCH interrupts.
			 */
			DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
		}
2885 2886
	}

2887 2888 2889
	return ret;
}

2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907
static inline u32 gen8_master_intr_disable(void __iomem * const regs)
{
	raw_reg_write(regs, GEN8_MASTER_IRQ, 0);

	/*
	 * Now with master disabled, get a sample of level indications
	 * for this interrupt. Indications will be cleared on related acks.
	 * New indications can and will light up during processing,
	 * and will generate new interrupt after enabling master.
	 */
	return raw_reg_read(regs, GEN8_MASTER_IRQ);
}

static inline void gen8_master_intr_enable(void __iomem * const regs)
{
	raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
}

2908 2909
static irqreturn_t gen8_irq_handler(int irq, void *arg)
{
2910
	struct drm_i915_private *dev_priv = to_i915(arg);
2911
	void __iomem * const regs = dev_priv->regs;
2912
	u32 master_ctl;
2913
	u32 gt_iir[4];
2914 2915 2916 2917

	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

2918 2919 2920
	master_ctl = gen8_master_intr_disable(regs);
	if (!master_ctl) {
		gen8_master_intr_enable(regs);
2921
		return IRQ_NONE;
2922
	}
2923 2924

	/* Find, clear, then process each source of interrupt */
2925
	gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
2926 2927 2928 2929 2930 2931 2932

	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
	if (master_ctl & ~GEN8_GT_IRQS) {
		disable_rpm_wakeref_asserts(dev_priv);
		gen8_de_irq_handler(dev_priv, master_ctl);
		enable_rpm_wakeref_asserts(dev_priv);
	}
2933

2934
	gen8_master_intr_enable(regs);
2935

2936
	gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
2937

2938
	return IRQ_HANDLED;
2939 2940
}

2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980
struct wedge_me {
	struct delayed_work work;
	struct drm_i915_private *i915;
	const char *name;
};

static void wedge_me(struct work_struct *work)
{
	struct wedge_me *w = container_of(work, typeof(*w), work.work);

	dev_err(w->i915->drm.dev,
		"%s timed out, cancelling all in-flight rendering.\n",
		w->name);
	i915_gem_set_wedged(w->i915);
}

static void __init_wedge(struct wedge_me *w,
			 struct drm_i915_private *i915,
			 long timeout,
			 const char *name)
{
	w->i915 = i915;
	w->name = name;

	INIT_DELAYED_WORK_ONSTACK(&w->work, wedge_me);
	schedule_delayed_work(&w->work, timeout);
}

static void __fini_wedge(struct wedge_me *w)
{
	cancel_delayed_work_sync(&w->work);
	destroy_delayed_work_on_stack(&w->work);
	w->i915 = NULL;
}

#define i915_wedge_on_timeout(W, DEV, TIMEOUT)				\
	for (__init_wedge((W), (DEV), (TIMEOUT), __func__);		\
	     (W)->i915;							\
	     __fini_wedge((W)))

M
Mika Kuoppala 已提交
2981
static u32
2982 2983
gen11_gt_engine_identity(struct drm_i915_private * const i915,
			 const unsigned int bank, const unsigned int bit)
M
Mika Kuoppala 已提交
2984 2985 2986 2987 2988
{
	void __iomem * const regs = i915->regs;
	u32 timeout_ts;
	u32 ident;

2989 2990
	lockdep_assert_held(&i915->irq_lock);

M
Mika Kuoppala 已提交
2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011
	raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit));

	/*
	 * NB: Specs do not specify how long to spin wait,
	 * so we do ~100us as an educated guess.
	 */
	timeout_ts = (local_clock() >> 10) + 100;
	do {
		ident = raw_reg_read(regs, GEN11_INTR_IDENTITY_REG(bank));
	} while (!(ident & GEN11_INTR_DATA_VALID) &&
		 !time_after32(local_clock() >> 10, timeout_ts));

	if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) {
		DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n",
			  bank, bit, ident);
		return 0;
	}

	raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank),
		      GEN11_INTR_DATA_VALID);

3012 3013 3014 3015 3016 3017 3018
	return ident;
}

static void
gen11_other_irq_handler(struct drm_i915_private * const i915,
			const u8 instance, const u16 iir)
{
3019 3020 3021
	if (instance == OTHER_GTPM_INSTANCE)
		return gen6_rps_irq_handler(i915, iir);

3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062
	WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
		  instance, iir);
}

static void
gen11_engine_irq_handler(struct drm_i915_private * const i915,
			 const u8 class, const u8 instance, const u16 iir)
{
	struct intel_engine_cs *engine;

	if (instance <= MAX_ENGINE_INSTANCE)
		engine = i915->engine_class[class][instance];
	else
		engine = NULL;

	if (likely(engine))
		return gen8_cs_irq_handler(engine, iir);

	WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n",
		  class, instance);
}

static void
gen11_gt_identity_handler(struct drm_i915_private * const i915,
			  const u32 identity)
{
	const u8 class = GEN11_INTR_ENGINE_CLASS(identity);
	const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity);
	const u16 intr = GEN11_INTR_ENGINE_INTR(identity);

	if (unlikely(!intr))
		return;

	if (class <= COPY_ENGINE_CLASS)
		return gen11_engine_irq_handler(i915, class, instance, intr);

	if (class == OTHER_CLASS)
		return gen11_other_irq_handler(i915, instance, intr);

	WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n",
		  class, instance, intr);
M
Mika Kuoppala 已提交
3063 3064 3065
}

static void
3066 3067
gen11_gt_bank_handler(struct drm_i915_private * const i915,
		      const unsigned int bank)
M
Mika Kuoppala 已提交
3068 3069
{
	void __iomem * const regs = i915->regs;
3070 3071
	unsigned long intr_dw;
	unsigned int bit;
M
Mika Kuoppala 已提交
3072

3073
	lockdep_assert_held(&i915->irq_lock);
M
Mika Kuoppala 已提交
3074

3075
	intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
M
Mika Kuoppala 已提交
3076

3077 3078 3079 3080
	if (unlikely(!intr_dw)) {
		DRM_ERROR("GT_INTR_DW%u blank!\n", bank);
		return;
	}
M
Mika Kuoppala 已提交
3081

3082 3083 3084
	for_each_set_bit(bit, &intr_dw, 32) {
		const u32 ident = gen11_gt_engine_identity(i915,
							   bank, bit);
M
Mika Kuoppala 已提交
3085

3086 3087
		gen11_gt_identity_handler(i915, ident);
	}
M
Mika Kuoppala 已提交
3088

3089 3090 3091
	/* Clear must be after shared has been served for engine */
	raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw);
}
M
Mika Kuoppala 已提交
3092

3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103
static void
gen11_gt_irq_handler(struct drm_i915_private * const i915,
		     const u32 master_ctl)
{
	unsigned int bank;

	spin_lock(&i915->irq_lock);

	for (bank = 0; bank < 2; bank++) {
		if (master_ctl & GEN11_GT_DW_IRQ(bank))
			gen11_gt_bank_handler(i915, bank);
M
Mika Kuoppala 已提交
3104
	}
3105 3106

	spin_unlock(&i915->irq_lock);
M
Mika Kuoppala 已提交
3107 3108
}

3109 3110
static u32
gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl)
3111 3112
{
	void __iomem * const regs = dev_priv->regs;
3113
	u32 iir;
3114 3115

	if (!(master_ctl & GEN11_GU_MISC_IRQ))
3116 3117 3118 3119 3120
		return 0;

	iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
	if (likely(iir))
		raw_reg_write(regs, GEN11_GU_MISC_IIR, iir);
3121

3122
	return iir;
3123 3124 3125
}

static void
3126
gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv, const u32 iir)
3127 3128 3129 3130 3131
{
	if (iir & GEN11_GU_MISC_GSE)
		intel_opregion_asle_intr(dev_priv);
}

3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149
static inline u32 gen11_master_intr_disable(void __iomem * const regs)
{
	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);

	/*
	 * Now with master disabled, get a sample of level indications
	 * for this interrupt. Indications will be cleared on related acks.
	 * New indications can and will light up during processing,
	 * and will generate new interrupt after enabling master.
	 */
	return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
}

static inline void gen11_master_intr_enable(void __iomem * const regs)
{
	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
}

M
Mika Kuoppala 已提交
3150 3151 3152 3153 3154
static irqreturn_t gen11_irq_handler(int irq, void *arg)
{
	struct drm_i915_private * const i915 = to_i915(arg);
	void __iomem * const regs = i915->regs;
	u32 master_ctl;
3155
	u32 gu_misc_iir;
M
Mika Kuoppala 已提交
3156 3157 3158 3159

	if (!intel_irqs_enabled(i915))
		return IRQ_NONE;

3160 3161 3162
	master_ctl = gen11_master_intr_disable(regs);
	if (!master_ctl) {
		gen11_master_intr_enable(regs);
M
Mika Kuoppala 已提交
3163
		return IRQ_NONE;
3164
	}
M
Mika Kuoppala 已提交
3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181

	/* Find, clear, then process each source of interrupt. */
	gen11_gt_irq_handler(i915, master_ctl);

	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
	if (master_ctl & GEN11_DISPLAY_IRQ) {
		const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);

		disable_rpm_wakeref_asserts(i915);
		/*
		 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
		 * for the display related bits.
		 */
		gen8_de_irq_handler(i915, disp_ctl);
		enable_rpm_wakeref_asserts(i915);
	}

3182
	gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
3183

3184
	gen11_master_intr_enable(regs);
M
Mika Kuoppala 已提交
3185

3186
	gen11_gu_misc_irq_handler(i915, gu_misc_iir);
3187

M
Mika Kuoppala 已提交
3188 3189 3190
	return IRQ_HANDLED;
}

3191
static void i915_reset_device(struct drm_i915_private *dev_priv,
3192 3193
			      u32 engine_mask,
			      const char *reason)
3194
{
3195
	struct i915_gpu_error *error = &dev_priv->gpu_error;
3196
	struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj;
3197 3198 3199
	char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
	char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
	char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
3200
	struct wedge_me w;
3201

3202
	kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
3203

3204 3205 3206
	DRM_DEBUG_DRIVER("resetting chip\n");
	kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);

3207 3208 3209
	/* Use a watchdog to ensure that our reset completes */
	i915_wedge_on_timeout(&w, dev_priv, 5*HZ) {
		intel_prepare_reset(dev_priv);
3210

3211 3212
		error->reason = reason;
		error->stalled_mask = engine_mask;
3213

3214
		/* Signal that locked waiters should reset the GPU */
3215
		smp_mb__before_atomic();
3216 3217
		set_bit(I915_RESET_HANDOFF, &error->flags);
		wake_up_all(&error->wait_queue);
3218

3219 3220
		/* Wait for anyone holding the lock to wakeup, without
		 * blocking indefinitely on struct_mutex.
3221
		 */
3222 3223
		do {
			if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
3224
				i915_reset(dev_priv, engine_mask, reason);
3225 3226
				mutex_unlock(&dev_priv->drm.struct_mutex);
			}
3227
		} while (wait_on_bit_timeout(&error->flags,
3228 3229 3230
					     I915_RESET_HANDOFF,
					     TASK_UNINTERRUPTIBLE,
					     1));
3231

3232
		error->stalled_mask = 0;
3233 3234
		error->reason = NULL;

3235 3236
		intel_finish_reset(dev_priv);
	}
3237

3238 3239
	if (!test_bit(I915_WEDGED, &error->flags))
		kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
3240 3241
}

3242
void i915_clear_error_registers(struct drm_i915_private *dev_priv)
3243
{
3244
	u32 eir;
3245

3246 3247
	if (!IS_GEN2(dev_priv))
		I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER));
3248

3249 3250 3251 3252
	if (INTEL_GEN(dev_priv) < 4)
		I915_WRITE(IPEIR, I915_READ(IPEIR));
	else
		I915_WRITE(IPEIR_I965, I915_READ(IPEIR_I965));
3253

3254
	I915_WRITE(EIR, I915_READ(EIR));
3255 3256 3257 3258 3259 3260
	eir = I915_READ(EIR);
	if (eir) {
		/*
		 * some errors might have become stuck,
		 * mask them.
		 */
3261
		DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
3262
		I915_WRITE(EMR, I915_READ(EMR) | eir);
3263
		I915_WRITE(IIR, I915_MASTER_ERROR_INTERRUPT);
3264
	}
3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280

	if (INTEL_GEN(dev_priv) >= 8) {
		I915_WRITE(GEN8_RING_FAULT_REG,
			   I915_READ(GEN8_RING_FAULT_REG) & ~RING_FAULT_VALID);
		POSTING_READ(GEN8_RING_FAULT_REG);
	} else if (INTEL_GEN(dev_priv) >= 6) {
		struct intel_engine_cs *engine;
		enum intel_engine_id id;

		for_each_engine(engine, dev_priv, id) {
			I915_WRITE(RING_FAULT_REG(engine),
				   I915_READ(RING_FAULT_REG(engine)) &
				   ~RING_FAULT_VALID);
		}
		POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS]));
	}
3281 3282 3283
}

/**
3284
 * i915_handle_error - handle a gpu error
3285
 * @dev_priv: i915 device private
3286
 * @engine_mask: mask representing engines that are hung
3287
 * @flags: control flags
3288 3289
 * @fmt: Error message format string
 *
3290
 * Do some basic checking of register state at error time and
3291 3292 3293 3294 3295
 * dump it to the syslog.  Also call i915_capture_error_state() to make
 * sure we get a record and make it available in debugfs.  Fire a uevent
 * so userspace knows something bad happened (should trigger collection
 * of a ring dump etc.).
 */
3296 3297
void i915_handle_error(struct drm_i915_private *dev_priv,
		       u32 engine_mask,
3298
		       unsigned long flags,
3299
		       const char *fmt, ...)
3300
{
3301 3302
	struct intel_engine_cs *engine;
	unsigned int tmp;
3303
	char error_msg[80];
3304
	char *msg = NULL;
3305

3306 3307 3308 3309 3310 3311 3312 3313 3314
	if (fmt) {
		va_list args;

		va_start(args, fmt);
		vscnprintf(error_msg, sizeof(error_msg), fmt, args);
		va_end(args);

		msg = error_msg;
	}
3315

3316 3317 3318 3319 3320 3321 3322 3323 3324
	/*
	 * In most cases it's guaranteed that we get here with an RPM
	 * reference held, for example because there is a pending GPU
	 * request that won't finish until the reset is done. This
	 * isn't the case at least when we get here by doing a
	 * simulated reset via debugfs, so get an RPM reference.
	 */
	intel_runtime_pm_get(dev_priv);

3325
	engine_mask &= INTEL_INFO(dev_priv)->ring_mask;
3326 3327 3328 3329 3330

	if (flags & I915_ERROR_CAPTURE) {
		i915_capture_error_state(dev_priv, engine_mask, msg);
		i915_clear_error_registers(dev_priv);
	}
3331

3332 3333 3334 3335
	/*
	 * Try engine reset when available. We fall back to full reset if
	 * single reset fails.
	 */
3336 3337
	if (intel_has_reset_engine(dev_priv) &&
	    !i915_terminally_wedged(&dev_priv->gpu_error)) {
3338
		for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
3339
			BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
3340 3341 3342 3343
			if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
					     &dev_priv->gpu_error.flags))
				continue;

3344
			if (i915_reset_engine(engine, msg) == 0)
3345 3346 3347 3348 3349 3350 3351 3352 3353
				engine_mask &= ~intel_engine_flag(engine);

			clear_bit(I915_RESET_ENGINE + engine->id,
				  &dev_priv->gpu_error.flags);
			wake_up_bit(&dev_priv->gpu_error.flags,
				    I915_RESET_ENGINE + engine->id);
		}
	}

3354
	if (!engine_mask)
3355
		goto out;
3356

3357
	/* Full reset needs the mutex, stop any other user trying to do so. */
3358 3359 3360 3361
	if (test_and_set_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags)) {
		wait_event(dev_priv->gpu_error.reset_queue,
			   !test_bit(I915_RESET_BACKOFF,
				     &dev_priv->gpu_error.flags));
3362
		goto out;
3363 3364
	}

3365 3366 3367 3368 3369 3370 3371 3372 3373
	/* Prevent any other reset-engine attempt. */
	for_each_engine(engine, dev_priv, tmp) {
		while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
					&dev_priv->gpu_error.flags))
			wait_on_bit(&dev_priv->gpu_error.flags,
				    I915_RESET_ENGINE + engine->id,
				    TASK_UNINTERRUPTIBLE);
	}

3374
	i915_reset_device(dev_priv, engine_mask, msg);
3375

3376 3377 3378 3379 3380
	for_each_engine(engine, dev_priv, tmp) {
		clear_bit(I915_RESET_ENGINE + engine->id,
			  &dev_priv->gpu_error.flags);
	}

3381 3382
	clear_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags);
	wake_up_all(&dev_priv->gpu_error.reset_queue);
3383 3384 3385

out:
	intel_runtime_pm_put(dev_priv);
3386 3387
}

3388 3389 3390
/* Called from drm generic code, passed 'crtc' which
 * we use as a pipe index
 */
3391
static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe)
3392
{
3393
	struct drm_i915_private *dev_priv = to_i915(dev);
3394
	unsigned long irqflags;
3395

3396
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3397
	i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
3398
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3399

3400 3401 3402
	return 0;
}

3403
static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe)
3404
{
3405
	struct drm_i915_private *dev_priv = to_i915(dev);
3406 3407 3408
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3409 3410
	i915_enable_pipestat(dev_priv, pipe,
			     PIPE_START_VBLANK_INTERRUPT_STATUS);
3411 3412 3413 3414 3415
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);

	return 0;
}

3416
static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
J
Jesse Barnes 已提交
3417
{
3418
	struct drm_i915_private *dev_priv = to_i915(dev);
J
Jesse Barnes 已提交
3419
	unsigned long irqflags;
3420
	uint32_t bit = INTEL_GEN(dev_priv) >= 7 ?
3421
		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
J
Jesse Barnes 已提交
3422 3423

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3424
	ilk_enable_display_irq(dev_priv, bit);
J
Jesse Barnes 已提交
3425 3426
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);

3427 3428 3429 3430 3431 3432
	/* Even though there is no DMC, frame counter can get stuck when
	 * PSR is active as no frames are generated.
	 */
	if (HAS_PSR(dev_priv))
		drm_vblank_restore(dev, pipe);

J
Jesse Barnes 已提交
3433 3434 3435
	return 0;
}

3436
static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
3437
{
3438
	struct drm_i915_private *dev_priv = to_i915(dev);
3439 3440 3441
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3442
	bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
3443
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3444

3445 3446 3447 3448 3449 3450
	/* Even if there is no DMC, frame counter can get stuck when
	 * PSR is active as no frames are generated, so check only for PSR.
	 */
	if (HAS_PSR(dev_priv))
		drm_vblank_restore(dev, pipe);

3451 3452 3453
	return 0;
}

3454 3455 3456
/* Called from drm generic code, passed 'crtc' which
 * we use as a pipe index
 */
3457
static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe)
3458
{
3459
	struct drm_i915_private *dev_priv = to_i915(dev);
3460
	unsigned long irqflags;
3461

3462
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3463
	i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
3464 3465 3466
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

3467
static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe)
3468
{
3469
	struct drm_i915_private *dev_priv = to_i915(dev);
3470 3471 3472
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3473 3474
	i915_disable_pipestat(dev_priv, pipe,
			      PIPE_START_VBLANK_INTERRUPT_STATUS);
3475 3476 3477
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

3478
static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
J
Jesse Barnes 已提交
3479
{
3480
	struct drm_i915_private *dev_priv = to_i915(dev);
J
Jesse Barnes 已提交
3481
	unsigned long irqflags;
3482
	uint32_t bit = INTEL_GEN(dev_priv) >= 7 ?
3483
		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
J
Jesse Barnes 已提交
3484 3485

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3486
	ilk_disable_display_irq(dev_priv, bit);
J
Jesse Barnes 已提交
3487 3488 3489
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

3490
static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
3491
{
3492
	struct drm_i915_private *dev_priv = to_i915(dev);
3493 3494 3495
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3496
	bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
3497 3498 3499
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

3500
static void ibx_irq_reset(struct drm_i915_private *dev_priv)
P
Paulo Zanoni 已提交
3501
{
3502
	if (HAS_PCH_NOP(dev_priv))
P
Paulo Zanoni 已提交
3503 3504
		return;

V
Ville Syrjälä 已提交
3505
	GEN3_IRQ_RESET(SDE);
3506

3507
	if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3508
		I915_WRITE(SERR_INT, 0xffffffff);
P
Paulo Zanoni 已提交
3509
}
3510

P
Paulo Zanoni 已提交
3511 3512 3513 3514 3515 3516 3517 3518 3519 3520
/*
 * SDEIER is also touched by the interrupt handler to work around missed PCH
 * interrupts. Hence we can't update it after the interrupt handler is enabled -
 * instead we unconditionally enable all PCH interrupt sources here, but then
 * only unmask them as needed with SDEIMR.
 *
 * This function needs to be called before interrupts are enabled.
 */
static void ibx_irq_pre_postinstall(struct drm_device *dev)
{
3521
	struct drm_i915_private *dev_priv = to_i915(dev);
P
Paulo Zanoni 已提交
3522

3523
	if (HAS_PCH_NOP(dev_priv))
P
Paulo Zanoni 已提交
3524 3525 3526
		return;

	WARN_ON(I915_READ(SDEIER) != 0);
P
Paulo Zanoni 已提交
3527 3528 3529 3530
	I915_WRITE(SDEIER, 0xffffffff);
	POSTING_READ(SDEIER);
}

3531
static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv)
3532
{
V
Ville Syrjälä 已提交
3533
	GEN3_IRQ_RESET(GT);
3534
	if (INTEL_GEN(dev_priv) >= 6)
V
Ville Syrjälä 已提交
3535
		GEN3_IRQ_RESET(GEN6_PM);
3536 3537
}

3538 3539
static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
{
3540 3541 3542 3543 3544
	if (IS_CHERRYVIEW(dev_priv))
		I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
	else
		I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);

3545
	i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
3546 3547
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));

3548
	i9xx_pipestat_irq_reset(dev_priv);
3549

V
Ville Syrjälä 已提交
3550
	GEN3_IRQ_RESET(VLV_);
3551
	dev_priv->irq_mask = ~0u;
3552 3553
}

3554 3555 3556
static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
{
	u32 pipestat_mask;
3557
	u32 enable_mask;
3558 3559
	enum pipe pipe;

3560
	pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
3561 3562 3563 3564 3565

	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
	for_each_pipe(dev_priv, pipe)
		i915_enable_pipestat(dev_priv, pipe, pipestat_mask);

3566 3567
	enable_mask = I915_DISPLAY_PORT_INTERRUPT |
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3568 3569 3570 3571
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
		I915_LPE_PIPE_A_INTERRUPT |
		I915_LPE_PIPE_B_INTERRUPT;

3572
	if (IS_CHERRYVIEW(dev_priv))
3573 3574
		enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
			I915_LPE_PIPE_C_INTERRUPT;
3575

3576
	WARN_ON(dev_priv->irq_mask != ~0u);
3577

3578 3579
	dev_priv->irq_mask = ~enable_mask;

V
Ville Syrjälä 已提交
3580
	GEN3_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
3581 3582 3583 3584 3585 3586
}

/* drm_dma.h hooks
*/
static void ironlake_irq_reset(struct drm_device *dev)
{
3587
	struct drm_i915_private *dev_priv = to_i915(dev);
3588

3589 3590
	if (IS_GEN5(dev_priv))
		I915_WRITE(HWSTAM, 0xffffffff);
3591

V
Ville Syrjälä 已提交
3592
	GEN3_IRQ_RESET(DE);
3593
	if (IS_GEN7(dev_priv))
3594 3595
		I915_WRITE(GEN7_ERR_INT, 0xffffffff);

3596 3597 3598 3599 3600
	if (IS_HASWELL(dev_priv)) {
		I915_WRITE(EDP_PSR_IMR, 0xffffffff);
		I915_WRITE(EDP_PSR_IIR, 0xffffffff);
	}

3601
	gen5_gt_irq_reset(dev_priv);
3602

3603
	ibx_irq_reset(dev_priv);
3604 3605
}

3606
static void valleyview_irq_reset(struct drm_device *dev)
J
Jesse Barnes 已提交
3607
{
3608
	struct drm_i915_private *dev_priv = to_i915(dev);
J
Jesse Barnes 已提交
3609

3610 3611 3612
	I915_WRITE(VLV_MASTER_IER, 0);
	POSTING_READ(VLV_MASTER_IER);

3613
	gen5_gt_irq_reset(dev_priv);
J
Jesse Barnes 已提交
3614

3615
	spin_lock_irq(&dev_priv->irq_lock);
3616 3617
	if (dev_priv->display_irqs_enabled)
		vlv_display_irq_reset(dev_priv);
3618
	spin_unlock_irq(&dev_priv->irq_lock);
J
Jesse Barnes 已提交
3619 3620
}

3621 3622 3623 3624 3625 3626 3627 3628
static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
{
	GEN8_IRQ_RESET_NDX(GT, 0);
	GEN8_IRQ_RESET_NDX(GT, 1);
	GEN8_IRQ_RESET_NDX(GT, 2);
	GEN8_IRQ_RESET_NDX(GT, 3);
}

P
Paulo Zanoni 已提交
3629
static void gen8_irq_reset(struct drm_device *dev)
3630
{
3631
	struct drm_i915_private *dev_priv = to_i915(dev);
3632 3633
	int pipe;

3634
	gen8_master_intr_disable(dev_priv->regs);
3635

3636
	gen8_gt_irq_reset(dev_priv);
3637

3638 3639 3640
	I915_WRITE(EDP_PSR_IMR, 0xffffffff);
	I915_WRITE(EDP_PSR_IIR, 0xffffffff);

3641
	for_each_pipe(dev_priv, pipe)
3642 3643
		if (intel_display_power_is_enabled(dev_priv,
						   POWER_DOMAIN_PIPE(pipe)))
3644
			GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3645

V
Ville Syrjälä 已提交
3646 3647 3648
	GEN3_IRQ_RESET(GEN8_DE_PORT_);
	GEN3_IRQ_RESET(GEN8_DE_MISC_);
	GEN3_IRQ_RESET(GEN8_PCU_);
3649

3650
	if (HAS_PCH_SPLIT(dev_priv))
3651
		ibx_irq_reset(dev_priv);
3652
}
3653

M
Mika Kuoppala 已提交
3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665
static void gen11_gt_irq_reset(struct drm_i915_private *dev_priv)
{
	/* Disable RCS, BCS, VCS and VECS class engines. */
	I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, 0);
	I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE,	  0);

	/* Restore masks irqs on RCS, BCS, VCS and VECS engines. */
	I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK,	~0);
	I915_WRITE(GEN11_BCS_RSVD_INTR_MASK,	~0);
	I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK,	~0);
	I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK,	~0);
	I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK,	~0);
3666 3667 3668

	I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
	I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK,  ~0);
M
Mika Kuoppala 已提交
3669 3670 3671 3672 3673 3674 3675
}

static void gen11_irq_reset(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int pipe;

3676
	gen11_master_intr_disable(dev_priv->regs);
M
Mika Kuoppala 已提交
3677 3678 3679 3680 3681

	gen11_gt_irq_reset(dev_priv);

	I915_WRITE(GEN11_DISPLAY_INT_CTL, 0);

3682 3683 3684
	I915_WRITE(EDP_PSR_IMR, 0xffffffff);
	I915_WRITE(EDP_PSR_IIR, 0xffffffff);

M
Mika Kuoppala 已提交
3685 3686 3687 3688 3689 3690 3691
	for_each_pipe(dev_priv, pipe)
		if (intel_display_power_is_enabled(dev_priv,
						   POWER_DOMAIN_PIPE(pipe)))
			GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);

	GEN3_IRQ_RESET(GEN8_DE_PORT_);
	GEN3_IRQ_RESET(GEN8_DE_MISC_);
3692
	GEN3_IRQ_RESET(GEN11_DE_HPD_);
3693
	GEN3_IRQ_RESET(GEN11_GU_MISC_);
M
Mika Kuoppala 已提交
3694
	GEN3_IRQ_RESET(GEN8_PCU_);
3695 3696 3697

	if (HAS_PCH_ICP(dev_priv))
		GEN3_IRQ_RESET(SDE);
M
Mika Kuoppala 已提交
3698 3699
}

3700
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3701
				     u8 pipe_mask)
3702
{
3703
	uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3704
	enum pipe pipe;
3705

3706
	spin_lock_irq(&dev_priv->irq_lock);
3707 3708 3709 3710 3711 3712

	if (!intel_irqs_enabled(dev_priv)) {
		spin_unlock_irq(&dev_priv->irq_lock);
		return;
	}

3713 3714 3715 3716
	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
		GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
				  dev_priv->de_irq_mask[pipe],
				  ~dev_priv->de_irq_mask[pipe] | extra_ier);
3717

3718
	spin_unlock_irq(&dev_priv->irq_lock);
3719 3720
}

3721
void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
3722
				     u8 pipe_mask)
3723
{
3724 3725
	enum pipe pipe;

3726
	spin_lock_irq(&dev_priv->irq_lock);
3727 3728 3729 3730 3731 3732

	if (!intel_irqs_enabled(dev_priv)) {
		spin_unlock_irq(&dev_priv->irq_lock);
		return;
	}

3733 3734
	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
		GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3735

3736 3737 3738
	spin_unlock_irq(&dev_priv->irq_lock);

	/* make sure we're done processing display irqs */
3739
	synchronize_irq(dev_priv->drm.irq);
3740 3741
}

3742
static void cherryview_irq_reset(struct drm_device *dev)
3743
{
3744
	struct drm_i915_private *dev_priv = to_i915(dev);
3745 3746 3747 3748

	I915_WRITE(GEN8_MASTER_IRQ, 0);
	POSTING_READ(GEN8_MASTER_IRQ);

3749
	gen8_gt_irq_reset(dev_priv);
3750

V
Ville Syrjälä 已提交
3751
	GEN3_IRQ_RESET(GEN8_PCU_);
3752

3753
	spin_lock_irq(&dev_priv->irq_lock);
3754 3755
	if (dev_priv->display_irqs_enabled)
		vlv_display_irq_reset(dev_priv);
3756
	spin_unlock_irq(&dev_priv->irq_lock);
3757 3758
}

3759
static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
3760 3761 3762 3763 3764
				  const u32 hpd[HPD_NUM_PINS])
{
	struct intel_encoder *encoder;
	u32 enabled_irqs = 0;

3765
	for_each_intel_encoder(&dev_priv->drm, encoder)
3766 3767 3768 3769 3770 3771
		if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
			enabled_irqs |= hpd[encoder->hpd_pin];

	return enabled_irqs;
}

3772
static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
3773
{
3774
	u32 hotplug;
3775 3776 3777

	/*
	 * Enable digital hotplug on the PCH, and configure the DP short pulse
3778 3779
	 * duration to 2ms (which is the minimum in the Display Port spec).
	 * The pulse duration bits are reserved on LPT+.
3780
	 */
3781
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3782 3783 3784
	hotplug &= ~(PORTB_PULSE_DURATION_MASK |
		     PORTC_PULSE_DURATION_MASK |
		     PORTD_PULSE_DURATION_MASK);
3785
	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3786 3787
	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3788 3789 3790 3791
	/*
	 * When CPU and PCH are on the same package, port A
	 * HPD must be enabled in both north and south.
	 */
3792
	if (HAS_PCH_LPT_LP(dev_priv))
3793
		hotplug |= PORTA_HOTPLUG_ENABLE;
3794
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3795
}
X
Xiong Zhang 已提交
3796

3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813
static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug_irqs, enabled_irqs;

	if (HAS_PCH_IBX(dev_priv)) {
		hotplug_irqs = SDE_HOTPLUG_MASK;
		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
	} else {
		hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
	}

	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);

	ibx_hpd_detection_setup(dev_priv);
}

3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842
static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug;

	hotplug = I915_READ(SHOTPLUG_CTL_DDI);
	hotplug |= ICP_DDIA_HPD_ENABLE |
		   ICP_DDIB_HPD_ENABLE;
	I915_WRITE(SHOTPLUG_CTL_DDI, hotplug);

	hotplug = I915_READ(SHOTPLUG_CTL_TC);
	hotplug |= ICP_TC_HPD_ENABLE(PORT_TC1) |
		   ICP_TC_HPD_ENABLE(PORT_TC2) |
		   ICP_TC_HPD_ENABLE(PORT_TC3) |
		   ICP_TC_HPD_ENABLE(PORT_TC4);
	I915_WRITE(SHOTPLUG_CTL_TC, hotplug);
}

static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug_irqs, enabled_irqs;

	hotplug_irqs = SDE_DDI_MASK_ICP | SDE_TC_MASK_ICP;
	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_icp);

	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);

	icp_hpd_detection_setup(dev_priv);
}

3843 3844 3845 3846 3847 3848 3849 3850 3851 3852
static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug;

	hotplug = I915_READ(GEN11_TC_HOTPLUG_CTL);
	hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
	I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug);
3853 3854 3855 3856 3857 3858 3859

	hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL);
	hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
	I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug);
3860 3861 3862 3863 3864 3865 3866
}

static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug_irqs, enabled_irqs;
	u32 val;

3867 3868
	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_gen11);
	hotplug_irqs = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK;
3869 3870 3871 3872 3873 3874 3875

	val = I915_READ(GEN11_DE_HPD_IMR);
	val &= ~hotplug_irqs;
	I915_WRITE(GEN11_DE_HPD_IMR, val);
	POSTING_READ(GEN11_DE_HPD_IMR);

	gen11_hpd_detection_setup(dev_priv);
3876 3877 3878

	if (HAS_PCH_ICP(dev_priv))
		icp_hpd_irq_setup(dev_priv);
3879 3880
}

3881
static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3882
{
3883 3884 3885 3886 3887 3888 3889 3890 3891
	u32 val, hotplug;

	/* Display WA #1179 WaHardHangonHotPlug: cnp */
	if (HAS_PCH_CNP(dev_priv)) {
		val = I915_READ(SOUTH_CHICKEN1);
		val &= ~CHASSIS_CLK_REQ_DURATION_MASK;
		val |= CHASSIS_CLK_REQ_DURATION(0xf);
		I915_WRITE(SOUTH_CHICKEN1, val);
	}
3892 3893 3894

	/* Enable digital hotplug on the PCH */
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3895 3896 3897 3898
	hotplug |= PORTA_HOTPLUG_ENABLE |
		   PORTB_HOTPLUG_ENABLE |
		   PORTC_HOTPLUG_ENABLE |
		   PORTD_HOTPLUG_ENABLE;
3899 3900 3901 3902 3903
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);

	hotplug = I915_READ(PCH_PORT_HOTPLUG2);
	hotplug |= PORTE_HOTPLUG_ENABLE;
	I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3904 3905
}

3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917
static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug_irqs, enabled_irqs;

	hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);

	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);

	spt_hpd_detection_setup(dev_priv);
}

3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933
static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug;

	/*
	 * Enable digital hotplug on the CPU, and configure the DP short pulse
	 * duration to 2ms (which is the minimum in the Display Port spec)
	 * The pulse duration bits are reserved on HSW+.
	 */
	hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
	hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
	hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE |
		   DIGITAL_PORTA_PULSE_DURATION_2ms;
	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
}

3934
static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3935
{
3936
	u32 hotplug_irqs, enabled_irqs;
3937

3938
	if (INTEL_GEN(dev_priv) >= 8) {
3939
		hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
3940
		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw);
3941 3942

		bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3943
	} else if (INTEL_GEN(dev_priv) >= 7) {
3944
		hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
3945
		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb);
3946 3947

		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3948 3949
	} else {
		hotplug_irqs = DE_DP_A_HOTPLUG;
3950
		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk);
3951

3952 3953
		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
	}
3954

3955
	ilk_hpd_detection_setup(dev_priv);
3956

3957
	ibx_hpd_irq_setup(dev_priv);
3958 3959
}

3960 3961
static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv,
				      u32 enabled_irqs)
3962
{
3963
	u32 hotplug;
3964

3965
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3966 3967 3968
	hotplug |= PORTA_HOTPLUG_ENABLE |
		   PORTB_HOTPLUG_ENABLE |
		   PORTC_HOTPLUG_ENABLE;
3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987

	DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n",
		      hotplug, enabled_irqs);
	hotplug &= ~BXT_DDI_HPD_INVERT_MASK;

	/*
	 * For BXT invert bit has to be set based on AOB design
	 * for HPD detection logic, update it based on VBT fields.
	 */
	if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
		hotplug |= BXT_DDIA_HPD_INVERT;
	if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) &&
	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_B))
		hotplug |= BXT_DDIB_HPD_INVERT;
	if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) &&
	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_C))
		hotplug |= BXT_DDIC_HPD_INVERT;

3988
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3989 3990
}

3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007
static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
	__bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK);
}

static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug_irqs, enabled_irqs;

	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
	hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;

	bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);

	__bxt_hpd_detection_setup(dev_priv, enabled_irqs);
}

P
Paulo Zanoni 已提交
4008 4009
static void ibx_irq_postinstall(struct drm_device *dev)
{
4010
	struct drm_i915_private *dev_priv = to_i915(dev);
4011
	u32 mask;
4012

4013
	if (HAS_PCH_NOP(dev_priv))
D
Daniel Vetter 已提交
4014 4015
		return;

4016
	if (HAS_PCH_IBX(dev_priv))
4017
		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
4018
	else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
4019
		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
4020 4021
	else
		mask = SDE_GMBUS_CPT;
4022

V
Ville Syrjälä 已提交
4023
	gen3_assert_iir_is_zero(dev_priv, SDEIIR);
P
Paulo Zanoni 已提交
4024
	I915_WRITE(SDEIMR, ~mask);
4025 4026 4027

	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
	    HAS_PCH_LPT(dev_priv))
4028
		ibx_hpd_detection_setup(dev_priv);
4029 4030
	else
		spt_hpd_detection_setup(dev_priv);
P
Paulo Zanoni 已提交
4031 4032
}

4033 4034
static void gen5_gt_irq_postinstall(struct drm_device *dev)
{
4035
	struct drm_i915_private *dev_priv = to_i915(dev);
4036 4037 4038 4039 4040
	u32 pm_irqs, gt_irqs;

	pm_irqs = gt_irqs = 0;

	dev_priv->gt_irq_mask = ~0;
4041
	if (HAS_L3_DPF(dev_priv)) {
4042
		/* L3 parity interrupt is always unmasked. */
4043 4044
		dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv);
		gt_irqs |= GT_PARITY_ERROR(dev_priv);
4045 4046 4047
	}

	gt_irqs |= GT_RENDER_USER_INTERRUPT;
4048
	if (IS_GEN5(dev_priv)) {
4049
		gt_irqs |= ILK_BSD_USER_INTERRUPT;
4050 4051 4052 4053
	} else {
		gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
	}

V
Ville Syrjälä 已提交
4054
	GEN3_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
4055

4056
	if (INTEL_GEN(dev_priv) >= 6) {
4057 4058 4059 4060
		/*
		 * RPS interrupts will get enabled/disabled on demand when RPS
		 * itself is enabled/disabled.
		 */
4061
		if (HAS_VEBOX(dev_priv)) {
4062
			pm_irqs |= PM_VEBOX_USER_INTERRUPT;
4063 4064
			dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT;
		}
4065

4066
		dev_priv->pm_imr = 0xffffffff;
V
Ville Syrjälä 已提交
4067
		GEN3_IRQ_INIT(GEN6_PM, dev_priv->pm_imr, pm_irqs);
4068 4069 4070
	}
}

4071
static int ironlake_irq_postinstall(struct drm_device *dev)
4072
{
4073
	struct drm_i915_private *dev_priv = to_i915(dev);
4074 4075
	u32 display_mask, extra_mask;

4076
	if (INTEL_GEN(dev_priv) >= 7) {
4077
		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
4078
				DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
4079
		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
4080 4081
			      DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
			      DE_DP_A_HOTPLUG_IVB);
4082 4083
	} else {
		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
4084 4085
				DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
				DE_PIPEA_CRC_DONE | DE_POISON);
4086 4087 4088
		extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
			      DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
			      DE_DP_A_HOTPLUG);
4089
	}
4090

4091 4092
	if (IS_HASWELL(dev_priv)) {
		gen3_assert_iir_is_zero(dev_priv, EDP_PSR_IIR);
4093
		intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
4094 4095 4096
		display_mask |= DE_EDP_PSR_INT_HSW;
	}

4097
	dev_priv->irq_mask = ~display_mask;
4098

P
Paulo Zanoni 已提交
4099 4100
	ibx_irq_pre_postinstall(dev);

V
Ville Syrjälä 已提交
4101
	GEN3_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
4102

4103
	gen5_gt_irq_postinstall(dev);
4104

4105 4106
	ilk_hpd_detection_setup(dev_priv);

P
Paulo Zanoni 已提交
4107
	ibx_irq_postinstall(dev);
4108

4109
	if (IS_IRONLAKE_M(dev_priv)) {
4110 4111 4112
		/* Enable PCU event interrupts
		 *
		 * spinlocking not required here for correctness since interrupt
4113 4114
		 * setup is guaranteed to run in single-threaded context. But we
		 * need it to make the assert_spin_locked happy. */
4115
		spin_lock_irq(&dev_priv->irq_lock);
4116
		ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
4117
		spin_unlock_irq(&dev_priv->irq_lock);
4118 4119
	}

4120 4121 4122
	return 0;
}

4123 4124
void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
{
4125
	lockdep_assert_held(&dev_priv->irq_lock);
4126 4127 4128 4129 4130 4131

	if (dev_priv->display_irqs_enabled)
		return;

	dev_priv->display_irqs_enabled = true;

4132 4133
	if (intel_irqs_enabled(dev_priv)) {
		vlv_display_irq_reset(dev_priv);
4134
		vlv_display_irq_postinstall(dev_priv);
4135
	}
4136 4137 4138 4139
}

void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
{
4140
	lockdep_assert_held(&dev_priv->irq_lock);
4141 4142 4143 4144 4145 4146

	if (!dev_priv->display_irqs_enabled)
		return;

	dev_priv->display_irqs_enabled = false;

4147
	if (intel_irqs_enabled(dev_priv))
4148
		vlv_display_irq_reset(dev_priv);
4149 4150
}

4151 4152 4153

static int valleyview_irq_postinstall(struct drm_device *dev)
{
4154
	struct drm_i915_private *dev_priv = to_i915(dev);
4155

4156
	gen5_gt_irq_postinstall(dev);
J
Jesse Barnes 已提交
4157

4158
	spin_lock_irq(&dev_priv->irq_lock);
4159 4160
	if (dev_priv->display_irqs_enabled)
		vlv_display_irq_postinstall(dev_priv);
4161 4162
	spin_unlock_irq(&dev_priv->irq_lock);

J
Jesse Barnes 已提交
4163
	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
4164
	POSTING_READ(VLV_MASTER_IER);
4165 4166 4167 4168

	return 0;
}

4169 4170 4171 4172 4173
static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
{
	/* These are interrupts we'll toggle with the ring mask register */
	uint32_t gt_interrupts[] = {
		GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
4174 4175 4176
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
			GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
4177
		GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
4178 4179 4180
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
			GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
4181
		0,
4182 4183
		GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
4184 4185
		};

4186 4187 4188
	if (HAS_L3_DPF(dev_priv))
		gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;

4189 4190
	dev_priv->pm_ier = 0x0;
	dev_priv->pm_imr = ~dev_priv->pm_ier;
4191 4192
	GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
	GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
4193 4194
	/*
	 * RPS interrupts will get enabled/disabled on demand when RPS itself
4195
	 * is enabled/disabled. Same wil be the case for GuC interrupts.
4196
	 */
4197
	GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_imr, dev_priv->pm_ier);
4198
	GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
4199 4200 4201 4202
}

static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
{
4203 4204
	uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
	uint32_t de_pipe_enables;
4205 4206
	u32 de_port_masked = GEN8_AUX_CHANNEL_A;
	u32 de_port_enables;
4207
	u32 de_misc_masked = GEN8_DE_EDP_PSR;
4208
	enum pipe pipe;
4209

4210 4211 4212
	if (INTEL_GEN(dev_priv) <= 10)
		de_misc_masked |= GEN8_DE_MISC_GSE;

4213
	if (INTEL_GEN(dev_priv) >= 9) {
4214
		de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
4215 4216
		de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
				  GEN9_AUX_CHANNEL_D;
4217
		if (IS_GEN9_LP(dev_priv))
4218 4219
			de_port_masked |= BXT_DE_PORT_GMBUS;
	} else {
4220
		de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
4221
	}
4222

4223 4224 4225
	if (INTEL_GEN(dev_priv) >= 11)
		de_port_masked |= ICL_AUX_CHANNEL_E;

4226
	if (IS_CNL_WITH_PORT_F(dev_priv) || INTEL_GEN(dev_priv) >= 11)
R
Rodrigo Vivi 已提交
4227 4228
		de_port_masked |= CNL_AUX_CHANNEL_F;

4229 4230 4231
	de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
					   GEN8_PIPE_FIFO_UNDERRUN;

4232
	de_port_enables = de_port_masked;
4233
	if (IS_GEN9_LP(dev_priv))
4234 4235
		de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
	else if (IS_BROADWELL(dev_priv))
4236 4237
		de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;

4238
	gen3_assert_iir_is_zero(dev_priv, EDP_PSR_IIR);
4239
	intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
4240

M
Mika Kahola 已提交
4241 4242
	for_each_pipe(dev_priv, pipe) {
		dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
4243

4244
		if (intel_display_power_is_enabled(dev_priv,
4245 4246 4247 4248
				POWER_DOMAIN_PIPE(pipe)))
			GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
					  dev_priv->de_irq_mask[pipe],
					  de_pipe_enables);
M
Mika Kahola 已提交
4249
	}
4250

V
Ville Syrjälä 已提交
4251 4252
	GEN3_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
	GEN3_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
4253

4254 4255
	if (INTEL_GEN(dev_priv) >= 11) {
		u32 de_hpd_masked = 0;
4256 4257
		u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
				     GEN11_DE_TBT_HOTPLUG_MASK;
4258 4259 4260 4261

		GEN3_IRQ_INIT(GEN11_DE_HPD_, ~de_hpd_masked, de_hpd_enables);
		gen11_hpd_detection_setup(dev_priv);
	} else if (IS_GEN9_LP(dev_priv)) {
4262
		bxt_hpd_detection_setup(dev_priv);
4263
	} else if (IS_BROADWELL(dev_priv)) {
4264
		ilk_hpd_detection_setup(dev_priv);
4265
	}
4266 4267 4268 4269
}

static int gen8_irq_postinstall(struct drm_device *dev)
{
4270
	struct drm_i915_private *dev_priv = to_i915(dev);
4271

4272
	if (HAS_PCH_SPLIT(dev_priv))
4273
		ibx_irq_pre_postinstall(dev);
P
Paulo Zanoni 已提交
4274

4275 4276 4277
	gen8_gt_irq_postinstall(dev_priv);
	gen8_de_irq_postinstall(dev_priv);

4278
	if (HAS_PCH_SPLIT(dev_priv))
4279
		ibx_irq_postinstall(dev);
4280

4281
	gen8_master_intr_enable(dev_priv->regs);
4282 4283 4284 4285

	return 0;
}

M
Mika Kuoppala 已提交
4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302
static void gen11_gt_irq_postinstall(struct drm_i915_private *dev_priv)
{
	const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT;

	BUILD_BUG_ON(irqs & 0xffff0000);

	/* Enable RCS, BCS, VCS and VECS class interrupts. */
	I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, irqs << 16 | irqs);
	I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE,	  irqs << 16 | irqs);

	/* Unmask irqs on RCS, BCS, VCS and VECS engines. */
	I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK,	~(irqs << 16));
	I915_WRITE(GEN11_BCS_RSVD_INTR_MASK,	~(irqs << 16));
	I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK,	~(irqs | irqs << 16));
	I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK,	~(irqs | irqs << 16));
	I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK,	~(irqs | irqs << 16));

4303 4304 4305 4306 4307 4308 4309 4310
	/*
	 * RPS interrupts will get enabled/disabled on demand when RPS itself
	 * is enabled/disabled.
	 */
	dev_priv->pm_ier = 0x0;
	dev_priv->pm_imr = ~dev_priv->pm_ier;
	I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
	I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK,  ~0);
M
Mika Kuoppala 已提交
4311 4312
}

4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327
static void icp_irq_postinstall(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = to_i915(dev);
	u32 mask = SDE_GMBUS_ICP;

	WARN_ON(I915_READ(SDEIER) != 0);
	I915_WRITE(SDEIER, 0xffffffff);
	POSTING_READ(SDEIER);

	gen3_assert_iir_is_zero(dev_priv, SDEIIR);
	I915_WRITE(SDEIMR, ~mask);

	icp_hpd_detection_setup(dev_priv);
}

M
Mika Kuoppala 已提交
4328 4329 4330
static int gen11_irq_postinstall(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
4331
	u32 gu_misc_masked = GEN11_GU_MISC_GSE;
M
Mika Kuoppala 已提交
4332

4333 4334 4335
	if (HAS_PCH_ICP(dev_priv))
		icp_irq_postinstall(dev);

M
Mika Kuoppala 已提交
4336 4337 4338
	gen11_gt_irq_postinstall(dev_priv);
	gen8_de_irq_postinstall(dev_priv);

4339 4340
	GEN3_IRQ_INIT(GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);

M
Mika Kuoppala 已提交
4341 4342
	I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);

4343
	gen11_master_intr_enable(dev_priv->regs);
M
Mika Kuoppala 已提交
4344 4345 4346 4347

	return 0;
}

4348 4349
static int cherryview_irq_postinstall(struct drm_device *dev)
{
4350
	struct drm_i915_private *dev_priv = to_i915(dev);
4351 4352 4353

	gen8_gt_irq_postinstall(dev_priv);

4354
	spin_lock_irq(&dev_priv->irq_lock);
4355 4356
	if (dev_priv->display_irqs_enabled)
		vlv_display_irq_postinstall(dev_priv);
4357 4358
	spin_unlock_irq(&dev_priv->irq_lock);

4359
	I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
4360 4361 4362 4363 4364
	POSTING_READ(GEN8_MASTER_IRQ);

	return 0;
}

4365
static void i8xx_irq_reset(struct drm_device *dev)
L
Linus Torvalds 已提交
4366
{
4367
	struct drm_i915_private *dev_priv = to_i915(dev);
4368

4369 4370
	i9xx_pipestat_irq_reset(dev_priv);

4371 4372
	I915_WRITE16(HWSTAM, 0xffff);

4373
	GEN2_IRQ_RESET();
C
Chris Wilson 已提交
4374 4375 4376 4377
}

static int i8xx_irq_postinstall(struct drm_device *dev)
{
4378
	struct drm_i915_private *dev_priv = to_i915(dev);
4379
	u16 enable_mask;
C
Chris Wilson 已提交
4380

4381 4382
	I915_WRITE16(EMR, ~(I915_ERROR_PAGE_TABLE |
			    I915_ERROR_MEMORY_REFRESH));
C
Chris Wilson 已提交
4383 4384 4385 4386

	/* Unmask the interrupts that we always want on. */
	dev_priv->irq_mask =
		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4387 4388
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
		  I915_MASTER_ERROR_INTERRUPT);
C
Chris Wilson 已提交
4389

4390 4391 4392
	enable_mask =
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4393
		I915_MASTER_ERROR_INTERRUPT |
4394 4395 4396
		I915_USER_INTERRUPT;

	GEN2_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
C
Chris Wilson 已提交
4397

4398 4399
	/* Interrupt setup is already guaranteed to be single-threaded, this is
	 * just to make the assert_spin_locked check happy. */
4400
	spin_lock_irq(&dev_priv->irq_lock);
4401 4402
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4403
	spin_unlock_irq(&dev_priv->irq_lock);
4404

C
Chris Wilson 已提交
4405 4406 4407
	return 0;
}

4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482
static void i8xx_error_irq_ack(struct drm_i915_private *dev_priv,
			       u16 *eir, u16 *eir_stuck)
{
	u16 emr;

	*eir = I915_READ16(EIR);

	if (*eir)
		I915_WRITE16(EIR, *eir);

	*eir_stuck = I915_READ16(EIR);
	if (*eir_stuck == 0)
		return;

	/*
	 * Toggle all EMR bits to make sure we get an edge
	 * in the ISR master error bit if we don't clear
	 * all the EIR bits. Otherwise the edge triggered
	 * IIR on i965/g4x wouldn't notice that an interrupt
	 * is still pending. Also some EIR bits can't be
	 * cleared except by handling the underlying error
	 * (or by a GPU reset) so we mask any bit that
	 * remains set.
	 */
	emr = I915_READ16(EMR);
	I915_WRITE16(EMR, 0xffff);
	I915_WRITE16(EMR, emr | *eir_stuck);
}

static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
				   u16 eir, u16 eir_stuck)
{
	DRM_DEBUG("Master Error: EIR 0x%04x\n", eir);

	if (eir_stuck)
		DRM_DEBUG_DRIVER("EIR stuck: 0x%04x, masked\n", eir_stuck);
}

static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
			       u32 *eir, u32 *eir_stuck)
{
	u32 emr;

	*eir = I915_READ(EIR);

	I915_WRITE(EIR, *eir);

	*eir_stuck = I915_READ(EIR);
	if (*eir_stuck == 0)
		return;

	/*
	 * Toggle all EMR bits to make sure we get an edge
	 * in the ISR master error bit if we don't clear
	 * all the EIR bits. Otherwise the edge triggered
	 * IIR on i965/g4x wouldn't notice that an interrupt
	 * is still pending. Also some EIR bits can't be
	 * cleared except by handling the underlying error
	 * (or by a GPU reset) so we mask any bit that
	 * remains set.
	 */
	emr = I915_READ(EMR);
	I915_WRITE(EMR, 0xffffffff);
	I915_WRITE(EMR, emr | *eir_stuck);
}

static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
				   u32 eir, u32 eir_stuck)
{
	DRM_DEBUG("Master Error, EIR 0x%08x\n", eir);

	if (eir_stuck)
		DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masked\n", eir_stuck);
}

4483
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
C
Chris Wilson 已提交
4484
{
4485
	struct drm_device *dev = arg;
4486
	struct drm_i915_private *dev_priv = to_i915(dev);
4487
	irqreturn_t ret = IRQ_NONE;
C
Chris Wilson 已提交
4488

4489 4490 4491
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

4492 4493 4494
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
	disable_rpm_wakeref_asserts(dev_priv);

4495
	do {
4496
		u32 pipe_stats[I915_MAX_PIPES] = {};
4497
		u16 eir = 0, eir_stuck = 0;
4498
		u16 iir;
4499

4500 4501 4502 4503 4504
		iir = I915_READ16(IIR);
		if (iir == 0)
			break;

		ret = IRQ_HANDLED;
C
Chris Wilson 已提交
4505

4506 4507 4508
		/* Call regardless, as some status bits might not be
		 * signalled in iir */
		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
C
Chris Wilson 已提交
4509

4510 4511 4512
		if (iir & I915_MASTER_ERROR_INTERRUPT)
			i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);

4513
		I915_WRITE16(IIR, iir);
C
Chris Wilson 已提交
4514 4515

		if (iir & I915_USER_INTERRUPT)
4516
			notify_ring(dev_priv->engine[RCS]);
C
Chris Wilson 已提交
4517

4518 4519
		if (iir & I915_MASTER_ERROR_INTERRUPT)
			i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
C
Chris Wilson 已提交
4520

4521 4522
		i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
	} while (0);
4523 4524

	enable_rpm_wakeref_asserts(dev_priv);
C
Chris Wilson 已提交
4525

4526
	return ret;
C
Chris Wilson 已提交
4527 4528
}

4529
static void i915_irq_reset(struct drm_device *dev)
4530
{
4531
	struct drm_i915_private *dev_priv = to_i915(dev);
4532

4533
	if (I915_HAS_HOTPLUG(dev_priv)) {
4534
		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4535 4536 4537
		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
	}

4538 4539
	i9xx_pipestat_irq_reset(dev_priv);

4540
	I915_WRITE(HWSTAM, 0xffffffff);
4541

4542
	GEN3_IRQ_RESET();
4543 4544 4545 4546
}

static int i915_irq_postinstall(struct drm_device *dev)
{
4547
	struct drm_i915_private *dev_priv = to_i915(dev);
4548
	u32 enable_mask;
4549

4550 4551
	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE |
			  I915_ERROR_MEMORY_REFRESH));
4552 4553 4554 4555 4556

	/* Unmask the interrupts that we always want on. */
	dev_priv->irq_mask =
		~(I915_ASLE_INTERRUPT |
		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4557 4558
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
		  I915_MASTER_ERROR_INTERRUPT);
4559 4560 4561 4562 4563

	enable_mask =
		I915_ASLE_INTERRUPT |
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4564
		I915_MASTER_ERROR_INTERRUPT |
4565 4566
		I915_USER_INTERRUPT;

4567
	if (I915_HAS_HOTPLUG(dev_priv)) {
4568 4569 4570 4571 4572 4573
		/* Enable in IER... */
		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
		/* and unmask in IMR */
		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
	}

4574
	GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
4575

4576 4577
	/* Interrupt setup is already guaranteed to be single-threaded, this is
	 * just to make the assert_spin_locked check happy. */
4578
	spin_lock_irq(&dev_priv->irq_lock);
4579 4580
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4581
	spin_unlock_irq(&dev_priv->irq_lock);
4582

4583 4584
	i915_enable_asle_pipestat(dev_priv);

4585 4586 4587
	return 0;
}

4588
static irqreturn_t i915_irq_handler(int irq, void *arg)
4589
{
4590
	struct drm_device *dev = arg;
4591
	struct drm_i915_private *dev_priv = to_i915(dev);
4592
	irqreturn_t ret = IRQ_NONE;
4593

4594 4595 4596
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

4597 4598 4599
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
	disable_rpm_wakeref_asserts(dev_priv);

4600
	do {
4601
		u32 pipe_stats[I915_MAX_PIPES] = {};
4602
		u32 eir = 0, eir_stuck = 0;
4603 4604
		u32 hotplug_status = 0;
		u32 iir;
4605

4606 4607 4608 4609 4610 4611 4612 4613 4614
		iir = I915_READ(IIR);
		if (iir == 0)
			break;

		ret = IRQ_HANDLED;

		if (I915_HAS_HOTPLUG(dev_priv) &&
		    iir & I915_DISPLAY_PORT_INTERRUPT)
			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4615

4616 4617 4618
		/* Call regardless, as some status bits might not be
		 * signalled in iir */
		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4619

4620 4621 4622
		if (iir & I915_MASTER_ERROR_INTERRUPT)
			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);

4623
		I915_WRITE(IIR, iir);
4624 4625

		if (iir & I915_USER_INTERRUPT)
4626
			notify_ring(dev_priv->engine[RCS]);
4627

4628 4629
		if (iir & I915_MASTER_ERROR_INTERRUPT)
			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4630

4631 4632 4633 4634 4635
		if (hotplug_status)
			i9xx_hpd_irq_handler(dev_priv, hotplug_status);

		i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
	} while (0);
4636

4637 4638
	enable_rpm_wakeref_asserts(dev_priv);

4639 4640 4641
	return ret;
}

4642
static void i965_irq_reset(struct drm_device *dev)
4643
{
4644
	struct drm_i915_private *dev_priv = to_i915(dev);
4645

4646
	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4647
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4648

4649 4650
	i9xx_pipestat_irq_reset(dev_priv);

4651
	I915_WRITE(HWSTAM, 0xffffffff);
4652

4653
	GEN3_IRQ_RESET();
4654 4655 4656 4657
}

static int i965_irq_postinstall(struct drm_device *dev)
{
4658
	struct drm_i915_private *dev_priv = to_i915(dev);
4659
	u32 enable_mask;
4660 4661
	u32 error_mask;

4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676
	/*
	 * Enable some error detection, note the instruction error mask
	 * bit is reserved, so we leave it masked.
	 */
	if (IS_G4X(dev_priv)) {
		error_mask = ~(GM45_ERROR_PAGE_TABLE |
			       GM45_ERROR_MEM_PRIV |
			       GM45_ERROR_CP_PRIV |
			       I915_ERROR_MEMORY_REFRESH);
	} else {
		error_mask = ~(I915_ERROR_PAGE_TABLE |
			       I915_ERROR_MEMORY_REFRESH);
	}
	I915_WRITE(EMR, error_mask);

4677
	/* Unmask the interrupts that we always want on. */
4678 4679 4680 4681 4682
	dev_priv->irq_mask =
		~(I915_ASLE_INTERRUPT |
		  I915_DISPLAY_PORT_INTERRUPT |
		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4683
		  I915_MASTER_ERROR_INTERRUPT);
4684

4685 4686 4687 4688 4689
	enable_mask =
		I915_ASLE_INTERRUPT |
		I915_DISPLAY_PORT_INTERRUPT |
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4690
		I915_MASTER_ERROR_INTERRUPT |
4691
		I915_USER_INTERRUPT;
4692

4693
	if (IS_G4X(dev_priv))
4694
		enable_mask |= I915_BSD_USER_INTERRUPT;
4695

4696 4697
	GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask);

4698 4699
	/* Interrupt setup is already guaranteed to be single-threaded, this is
	 * just to make the assert_spin_locked check happy. */
4700
	spin_lock_irq(&dev_priv->irq_lock);
4701 4702 4703
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4704
	spin_unlock_irq(&dev_priv->irq_lock);
4705

4706
	i915_enable_asle_pipestat(dev_priv);
4707 4708 4709 4710

	return 0;
}

4711
static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
4712 4713 4714
{
	u32 hotplug_en;

4715
	lockdep_assert_held(&dev_priv->irq_lock);
4716

4717 4718
	/* Note HDMI and DP share hotplug bits */
	/* enable bits are the same for all generations */
4719
	hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
4720 4721 4722 4723
	/* Programming the CRT detection parameters tends
	   to generate a spurious hotplug event about three
	   seconds later.  So just do it once.
	*/
4724
	if (IS_G4X(dev_priv))
4725 4726 4727 4728
		hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
	hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;

	/* Ignore TV since it's buggy */
4729
	i915_hotplug_interrupt_update_locked(dev_priv,
4730 4731 4732 4733
					     HOTPLUG_INT_EN_MASK |
					     CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
					     CRT_HOTPLUG_ACTIVATION_PERIOD_64,
					     hotplug_en);
4734 4735
}

4736
static irqreturn_t i965_irq_handler(int irq, void *arg)
4737
{
4738
	struct drm_device *dev = arg;
4739
	struct drm_i915_private *dev_priv = to_i915(dev);
4740
	irqreturn_t ret = IRQ_NONE;
4741

4742 4743 4744
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

4745 4746 4747
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
	disable_rpm_wakeref_asserts(dev_priv);

4748
	do {
4749
		u32 pipe_stats[I915_MAX_PIPES] = {};
4750
		u32 eir = 0, eir_stuck = 0;
4751 4752
		u32 hotplug_status = 0;
		u32 iir;
4753

4754 4755
		iir = I915_READ(IIR);
		if (iir == 0)
4756 4757 4758 4759
			break;

		ret = IRQ_HANDLED;

4760 4761 4762 4763 4764 4765
		if (iir & I915_DISPLAY_PORT_INTERRUPT)
			hotplug_status = i9xx_hpd_irq_ack(dev_priv);

		/* Call regardless, as some status bits might not be
		 * signalled in iir */
		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4766

4767 4768 4769
		if (iir & I915_MASTER_ERROR_INTERRUPT)
			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);

4770
		I915_WRITE(IIR, iir);
4771 4772

		if (iir & I915_USER_INTERRUPT)
4773
			notify_ring(dev_priv->engine[RCS]);
4774

4775
		if (iir & I915_BSD_USER_INTERRUPT)
4776
			notify_ring(dev_priv->engine[VCS]);
4777

4778 4779
		if (iir & I915_MASTER_ERROR_INTERRUPT)
			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4780

4781 4782 4783 4784 4785
		if (hotplug_status)
			i9xx_hpd_irq_handler(dev_priv, hotplug_status);

		i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
	} while (0);
4786

4787 4788
	enable_rpm_wakeref_asserts(dev_priv);

4789 4790 4791
	return ret;
}

4792 4793 4794 4795 4796 4797 4798
/**
 * intel_irq_init - initializes irq support
 * @dev_priv: i915 device instance
 *
 * This function initializes all the irq support including work items, timers
 * and all the vtables. It does not setup the interrupt itself though.
 */
4799
void intel_irq_init(struct drm_i915_private *dev_priv)
4800
{
4801
	struct drm_device *dev = &dev_priv->drm;
4802
	struct intel_rps *rps = &dev_priv->gt_pm.rps;
4803
	int i;
4804

4805 4806
	intel_hpd_init_work(dev_priv);

4807
	INIT_WORK(&rps->work, gen6_pm_rps_work);
4808

4809
	INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4810 4811
	for (i = 0; i < MAX_L3_SLICES; ++i)
		dev_priv->l3_parity.remap_info[i] = NULL;
4812

4813
	if (HAS_GUC_SCHED(dev_priv))
4814 4815
		dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT;

4816
	/* Let's track the enabled rps events */
4817
	if (IS_VALLEYVIEW(dev_priv))
4818
		/* WaGsvRC0ResidencyMethod:vlv */
4819
		dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
4820
	else
4821 4822 4823
		dev_priv->pm_rps_events = (GEN6_PM_RP_UP_THRESHOLD |
					   GEN6_PM_RP_DOWN_THRESHOLD |
					   GEN6_PM_RP_DOWN_TIMEOUT);
4824

4825
	rps->pm_intrmsk_mbz = 0;
4826 4827

	/*
4828
	 * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer
4829 4830 4831 4832
	 * if GEN6_PM_UP_EI_EXPIRED is masked.
	 *
	 * TODO: verify if this can be reproduced on VLV,CHV.
	 */
4833
	if (INTEL_GEN(dev_priv) <= 7)
4834
		rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
4835

4836
	if (INTEL_GEN(dev_priv) >= 8)
4837
		rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
4838

4839
	if (IS_GEN2(dev_priv)) {
4840
		/* Gen2 doesn't have a hardware frame counter */
4841
		dev->max_vblank_count = 0;
4842
	} else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
4843
		dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4844
		dev->driver->get_vblank_counter = g4x_get_vblank_counter;
4845 4846 4847
	} else {
		dev->driver->get_vblank_counter = i915_get_vblank_counter;
		dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4848 4849
	}

4850 4851 4852 4853 4854
	/*
	 * Opt out of the vblank disable timer on everything except gen2.
	 * Gen2 doesn't have a hardware frame counter and so depends on
	 * vblank interrupts to produce sane vblank seuquence numbers.
	 */
4855
	if (!IS_GEN2(dev_priv))
4856 4857
		dev->vblank_disable_immediate = true;

4858 4859 4860 4861 4862 4863 4864 4865 4866 4867
	/* Most platforms treat the display irq block as an always-on
	 * power domain. vlv/chv can disable it at runtime and need
	 * special care to avoid writing any of the display block registers
	 * outside of the power domain. We defer setting up the display irqs
	 * in this case to the runtime pm.
	 */
	dev_priv->display_irqs_enabled = true;
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
		dev_priv->display_irqs_enabled = false;

L
Lyude 已提交
4868
	dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4869 4870 4871 4872 4873 4874 4875
	/* If we have MST support, we want to avoid doing short HPD IRQ storm
	 * detection, as short HPD storms will occur as a natural part of
	 * sideband messaging with MST.
	 * On older platforms however, IRQ storms can occur with both long and
	 * short pulses, as seen on some G4x systems.
	 */
	dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
L
Lyude 已提交
4876

4877
	dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos;
4878
	dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4879

4880
	if (IS_CHERRYVIEW(dev_priv)) {
4881
		dev->driver->irq_handler = cherryview_irq_handler;
4882
		dev->driver->irq_preinstall = cherryview_irq_reset;
4883
		dev->driver->irq_postinstall = cherryview_irq_postinstall;
4884
		dev->driver->irq_uninstall = cherryview_irq_reset;
4885 4886
		dev->driver->enable_vblank = i965_enable_vblank;
		dev->driver->disable_vblank = i965_disable_vblank;
4887
		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4888
	} else if (IS_VALLEYVIEW(dev_priv)) {
J
Jesse Barnes 已提交
4889
		dev->driver->irq_handler = valleyview_irq_handler;
4890
		dev->driver->irq_preinstall = valleyview_irq_reset;
J
Jesse Barnes 已提交
4891
		dev->driver->irq_postinstall = valleyview_irq_postinstall;
4892
		dev->driver->irq_uninstall = valleyview_irq_reset;
4893 4894
		dev->driver->enable_vblank = i965_enable_vblank;
		dev->driver->disable_vblank = i965_disable_vblank;
4895
		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
M
Mika Kuoppala 已提交
4896 4897 4898 4899 4900 4901 4902
	} else if (INTEL_GEN(dev_priv) >= 11) {
		dev->driver->irq_handler = gen11_irq_handler;
		dev->driver->irq_preinstall = gen11_irq_reset;
		dev->driver->irq_postinstall = gen11_irq_postinstall;
		dev->driver->irq_uninstall = gen11_irq_reset;
		dev->driver->enable_vblank = gen8_enable_vblank;
		dev->driver->disable_vblank = gen8_disable_vblank;
4903
		dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
4904
	} else if (INTEL_GEN(dev_priv) >= 8) {
4905
		dev->driver->irq_handler = gen8_irq_handler;
4906
		dev->driver->irq_preinstall = gen8_irq_reset;
4907
		dev->driver->irq_postinstall = gen8_irq_postinstall;
4908
		dev->driver->irq_uninstall = gen8_irq_reset;
4909 4910
		dev->driver->enable_vblank = gen8_enable_vblank;
		dev->driver->disable_vblank = gen8_disable_vblank;
4911
		if (IS_GEN9_LP(dev_priv))
4912
			dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
4913 4914
		else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) ||
			 HAS_PCH_CNP(dev_priv))
4915 4916
			dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
		else
4917
			dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4918
	} else if (HAS_PCH_SPLIT(dev_priv)) {
4919
		dev->driver->irq_handler = ironlake_irq_handler;
4920
		dev->driver->irq_preinstall = ironlake_irq_reset;
4921
		dev->driver->irq_postinstall = ironlake_irq_postinstall;
4922
		dev->driver->irq_uninstall = ironlake_irq_reset;
4923 4924
		dev->driver->enable_vblank = ironlake_enable_vblank;
		dev->driver->disable_vblank = ironlake_disable_vblank;
4925
		dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4926
	} else {
4927
		if (IS_GEN2(dev_priv)) {
4928
			dev->driver->irq_preinstall = i8xx_irq_reset;
C
Chris Wilson 已提交
4929 4930
			dev->driver->irq_postinstall = i8xx_irq_postinstall;
			dev->driver->irq_handler = i8xx_irq_handler;
4931
			dev->driver->irq_uninstall = i8xx_irq_reset;
4932 4933
			dev->driver->enable_vblank = i8xx_enable_vblank;
			dev->driver->disable_vblank = i8xx_disable_vblank;
4934
		} else if (IS_GEN3(dev_priv)) {
4935
			dev->driver->irq_preinstall = i915_irq_reset;
4936
			dev->driver->irq_postinstall = i915_irq_postinstall;
4937
			dev->driver->irq_uninstall = i915_irq_reset;
4938
			dev->driver->irq_handler = i915_irq_handler;
4939 4940
			dev->driver->enable_vblank = i8xx_enable_vblank;
			dev->driver->disable_vblank = i8xx_disable_vblank;
C
Chris Wilson 已提交
4941
		} else {
4942
			dev->driver->irq_preinstall = i965_irq_reset;
4943
			dev->driver->irq_postinstall = i965_irq_postinstall;
4944
			dev->driver->irq_uninstall = i965_irq_reset;
4945
			dev->driver->irq_handler = i965_irq_handler;
4946 4947
			dev->driver->enable_vblank = i965_enable_vblank;
			dev->driver->disable_vblank = i965_disable_vblank;
C
Chris Wilson 已提交
4948
		}
4949 4950
		if (I915_HAS_HOTPLUG(dev_priv))
			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4951 4952
	}
}
4953

4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967
/**
 * intel_irq_fini - deinitializes IRQ support
 * @i915: i915 device instance
 *
 * This function deinitializes all the IRQ support.
 */
void intel_irq_fini(struct drm_i915_private *i915)
{
	int i;

	for (i = 0; i < MAX_L3_SLICES; ++i)
		kfree(i915->l3_parity.remap_info[i]);
}

4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978
/**
 * intel_irq_install - enables the hardware interrupt
 * @dev_priv: i915 device instance
 *
 * This function enables the hardware interrupt handling, but leaves the hotplug
 * handling still disabled. It is called after intel_irq_init().
 *
 * In the driver load and resume code we need working interrupts in a few places
 * but don't want to deal with the hassle of concurrent probe and hotplug
 * workers. Hence the split into this two-stage approach.
 */
4979 4980 4981 4982 4983 4984 4985
int intel_irq_install(struct drm_i915_private *dev_priv)
{
	/*
	 * We enable some interrupt sources in our postinstall hooks, so mark
	 * interrupts as enabled _before_ actually enabling them to avoid
	 * special cases in our ordering checks.
	 */
4986
	dev_priv->runtime_pm.irqs_enabled = true;
4987

4988
	return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq);
4989 4990
}

4991 4992 4993 4994 4995 4996 4997
/**
 * intel_irq_uninstall - finilizes all irq handling
 * @dev_priv: i915 device instance
 *
 * This stops interrupt and hotplug handling and unregisters and frees all
 * resources acquired in the init functions.
 */
4998 4999
void intel_irq_uninstall(struct drm_i915_private *dev_priv)
{
5000
	drm_irq_uninstall(&dev_priv->drm);
5001
	intel_hpd_cancel_work(dev_priv);
5002
	dev_priv->runtime_pm.irqs_enabled = false;
5003 5004
}

5005 5006 5007 5008 5009 5010 5011
/**
 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
 * @dev_priv: i915 device instance
 *
 * This function is used to disable interrupts at runtime, both in the runtime
 * pm and the system suspend/resume code.
 */
5012
void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
5013
{
5014
	dev_priv->drm.driver->irq_uninstall(&dev_priv->drm);
5015
	dev_priv->runtime_pm.irqs_enabled = false;
5016
	synchronize_irq(dev_priv->drm.irq);
5017 5018
}

5019 5020 5021 5022 5023 5024 5025
/**
 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
 * @dev_priv: i915 device instance
 *
 * This function is used to enable interrupts at runtime, both in the runtime
 * pm and the system suspend/resume code.
 */
5026
void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
5027
{
5028
	dev_priv->runtime_pm.irqs_enabled = true;
5029 5030
	dev_priv->drm.driver->irq_preinstall(&dev_priv->drm);
	dev_priv->drm.driver->irq_postinstall(&dev_priv->drm);
5031
}