i915_irq.c 116.4 KB
Newer Older
D
Dave Airlie 已提交
1
/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
L
Linus Torvalds 已提交
2
 */
D
Dave Airlie 已提交
3
/*
L
Linus Torvalds 已提交
4 5
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
 * All Rights Reserved.
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
D
Dave Airlie 已提交
27
 */
L
Linus Torvalds 已提交
28

29 30
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

31
#include <linux/sysrq.h>
32
#include <linux/slab.h>
33
#include <linux/circ_buf.h>
34 35
#include <drm/drmP.h>
#include <drm/i915_drm.h>
L
Linus Torvalds 已提交
36
#include "i915_drv.h"
C
Chris Wilson 已提交
37
#include "i915_trace.h"
J
Jesse Barnes 已提交
38
#include "intel_drv.h"
L
Linus Torvalds 已提交
39

40 41 42 43 44 45 46 47
/**
 * DOC: interrupt handling
 *
 * These functions provide the basic support for enabling and disabling the
 * interrupt handling support. There's a lot more functionality in i915_irq.c
 * and related files, but that will be described in separate chapters.
 */

48 49 50 51
static const u32 hpd_ilk[HPD_NUM_PINS] = {
	[HPD_PORT_A] = DE_DP_A_HOTPLUG,
};

52 53 54 55
static const u32 hpd_ivb[HPD_NUM_PINS] = {
	[HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
};

56 57 58 59
static const u32 hpd_bdw[HPD_NUM_PINS] = {
	[HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
};

60
static const u32 hpd_ibx[HPD_NUM_PINS] = {
61 62 63 64 65 66 67
	[HPD_CRT] = SDE_CRT_HOTPLUG,
	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG
};

68
static const u32 hpd_cpt[HPD_NUM_PINS] = {
69
	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
70
	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
71 72 73 74 75
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
};

X
Xiong Zhang 已提交
76
static const u32 hpd_spt[HPD_NUM_PINS] = {
77
	[HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
X
Xiong Zhang 已提交
78 79 80 81 82 83
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
	[HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
};

84
static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
85 86 87 88 89 90 91 92
	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
};

93
static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
94 95 96 97 98 99 100 101
	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
};

102
static const u32 hpd_status_i915[HPD_NUM_PINS] = {
103 104 105 106 107 108 109 110
	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
};

111 112
/* BXT hpd list */
static const u32 hpd_bxt[HPD_NUM_PINS] = {
113
	[HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
114 115 116 117
	[HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
	[HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
};

118
/* IIR can theoretically queue up two events. Be paranoid. */
119
#define GEN8_IRQ_RESET_NDX(type, which) do { \
120 121 122 123 124 125 126 127 128
	I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
	POSTING_READ(GEN8_##type##_IMR(which)); \
	I915_WRITE(GEN8_##type##_IER(which), 0); \
	I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
	POSTING_READ(GEN8_##type##_IIR(which)); \
	I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
	POSTING_READ(GEN8_##type##_IIR(which)); \
} while (0)

V
Ville Syrjälä 已提交
129
#define GEN3_IRQ_RESET(type) do { \
P
Paulo Zanoni 已提交
130
	I915_WRITE(type##IMR, 0xffffffff); \
131
	POSTING_READ(type##IMR); \
P
Paulo Zanoni 已提交
132
	I915_WRITE(type##IER, 0); \
133 134 135 136
	I915_WRITE(type##IIR, 0xffffffff); \
	POSTING_READ(type##IIR); \
	I915_WRITE(type##IIR, 0xffffffff); \
	POSTING_READ(type##IIR); \
P
Paulo Zanoni 已提交
137 138
} while (0)

139 140 141 142 143 144 145 146 147 148
#define GEN2_IRQ_RESET(type) do { \
	I915_WRITE16(type##IMR, 0xffff); \
	POSTING_READ16(type##IMR); \
	I915_WRITE16(type##IER, 0); \
	I915_WRITE16(type##IIR, 0xffff); \
	POSTING_READ16(type##IIR); \
	I915_WRITE16(type##IIR, 0xffff); \
	POSTING_READ16(type##IIR); \
} while (0)

149 150 151
/*
 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
 */
V
Ville Syrjälä 已提交
152
static void gen3_assert_iir_is_zero(struct drm_i915_private *dev_priv,
153
				    i915_reg_t reg)
154 155 156 157 158 159 160
{
	u32 val = I915_READ(reg);

	if (val == 0)
		return;

	WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
161
	     i915_mmio_reg_offset(reg), val);
162 163 164 165 166
	I915_WRITE(reg, 0xffffffff);
	POSTING_READ(reg);
	I915_WRITE(reg, 0xffffffff);
	POSTING_READ(reg);
}
167

168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183
static void gen2_assert_iir_is_zero(struct drm_i915_private *dev_priv,
				    i915_reg_t reg)
{
	u16 val = I915_READ16(reg);

	if (val == 0)
		return;

	WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
	     i915_mmio_reg_offset(reg), val);
	I915_WRITE16(reg, 0xffff);
	POSTING_READ16(reg);
	I915_WRITE16(reg, 0xffff);
	POSTING_READ16(reg);
}

P
Paulo Zanoni 已提交
184
#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
V
Ville Syrjälä 已提交
185
	gen3_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
P
Paulo Zanoni 已提交
186
	I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
187 188
	I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
	POSTING_READ(GEN8_##type##_IMR(which)); \
P
Paulo Zanoni 已提交
189 190
} while (0)

V
Ville Syrjälä 已提交
191 192
#define GEN3_IRQ_INIT(type, imr_val, ier_val) do { \
	gen3_assert_iir_is_zero(dev_priv, type##IIR); \
P
Paulo Zanoni 已提交
193
	I915_WRITE(type##IER, (ier_val)); \
194 195
	I915_WRITE(type##IMR, (imr_val)); \
	POSTING_READ(type##IMR); \
P
Paulo Zanoni 已提交
196 197
} while (0)

198 199 200 201 202 203 204
#define GEN2_IRQ_INIT(type, imr_val, ier_val) do { \
	gen2_assert_iir_is_zero(dev_priv, type##IIR); \
	I915_WRITE16(type##IER, (ier_val)); \
	I915_WRITE16(type##IMR, (imr_val)); \
	POSTING_READ16(type##IMR); \
} while (0)

205
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
206
static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
207

208 209 210 211 212 213 214 215
/* For display hotplug interrupt */
static inline void
i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
				     uint32_t mask,
				     uint32_t bits)
{
	uint32_t val;

216
	lockdep_assert_held(&dev_priv->irq_lock);
217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245
	WARN_ON(bits & ~mask);

	val = I915_READ(PORT_HOTPLUG_EN);
	val &= ~mask;
	val |= bits;
	I915_WRITE(PORT_HOTPLUG_EN, val);
}

/**
 * i915_hotplug_interrupt_update - update hotplug interrupt enable
 * @dev_priv: driver private
 * @mask: bits to update
 * @bits: bits to enable
 * NOTE: the HPD enable bits are modified both inside and outside
 * of an interrupt context. To avoid that read-modify-write cycles
 * interfer, these bits are protected by a spinlock. Since this
 * function is usually not called from a context where the lock is
 * held already, this function acquires the lock itself. A non-locking
 * version is also available.
 */
void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
				   uint32_t mask,
				   uint32_t bits)
{
	spin_lock_irq(&dev_priv->irq_lock);
	i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
	spin_unlock_irq(&dev_priv->irq_lock);
}

246 247 248 249 250 251
/**
 * ilk_update_display_irq - update DEIMR
 * @dev_priv: driver private
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
252 253 254
void ilk_update_display_irq(struct drm_i915_private *dev_priv,
			    uint32_t interrupt_mask,
			    uint32_t enabled_irq_mask)
255
{
256 257
	uint32_t new_val;

258
	lockdep_assert_held(&dev_priv->irq_lock);
259

260 261
	WARN_ON(enabled_irq_mask & ~interrupt_mask);

262
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
263 264
		return;

265 266 267 268 269 270
	new_val = dev_priv->irq_mask;
	new_val &= ~interrupt_mask;
	new_val |= (~enabled_irq_mask & interrupt_mask);

	if (new_val != dev_priv->irq_mask) {
		dev_priv->irq_mask = new_val;
271
		I915_WRITE(DEIMR, dev_priv->irq_mask);
272
		POSTING_READ(DEIMR);
273 274 275
	}
}

P
Paulo Zanoni 已提交
276 277 278 279 280 281 282 283 284 285
/**
 * ilk_update_gt_irq - update GTIMR
 * @dev_priv: driver private
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
			      uint32_t interrupt_mask,
			      uint32_t enabled_irq_mask)
{
286
	lockdep_assert_held(&dev_priv->irq_lock);
P
Paulo Zanoni 已提交
287

288 289
	WARN_ON(enabled_irq_mask & ~interrupt_mask);

290
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
291 292
		return;

P
Paulo Zanoni 已提交
293 294 295 296 297
	dev_priv->gt_irq_mask &= ~interrupt_mask;
	dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
}

298
void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
P
Paulo Zanoni 已提交
299 300
{
	ilk_update_gt_irq(dev_priv, mask, mask);
301
	POSTING_READ_FW(GTIMR);
P
Paulo Zanoni 已提交
302 303
}

304
void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
P
Paulo Zanoni 已提交
305 306 307 308
{
	ilk_update_gt_irq(dev_priv, mask, 0);
}

309
static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
310
{
311
	return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
312 313
}

314
static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv)
315
{
316
	return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
317 318
}

319
static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
320
{
321
	return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
322 323
}

P
Paulo Zanoni 已提交
324
/**
325 326 327 328 329
 * snb_update_pm_irq - update GEN6_PMIMR
 * @dev_priv: driver private
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
P
Paulo Zanoni 已提交
330 331 332 333
static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
			      uint32_t interrupt_mask,
			      uint32_t enabled_irq_mask)
{
334
	uint32_t new_val;
P
Paulo Zanoni 已提交
335

336 337
	WARN_ON(enabled_irq_mask & ~interrupt_mask);

338
	lockdep_assert_held(&dev_priv->irq_lock);
P
Paulo Zanoni 已提交
339

340
	new_val = dev_priv->pm_imr;
341 342 343
	new_val &= ~interrupt_mask;
	new_val |= (~enabled_irq_mask & interrupt_mask);

344 345 346
	if (new_val != dev_priv->pm_imr) {
		dev_priv->pm_imr = new_val;
		I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_imr);
347
		POSTING_READ(gen6_pm_imr(dev_priv));
348
	}
P
Paulo Zanoni 已提交
349 350
}

351
void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
P
Paulo Zanoni 已提交
352
{
353 354 355
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
		return;

P
Paulo Zanoni 已提交
356 357 358
	snb_update_pm_irq(dev_priv, mask, mask);
}

359
static void __gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
P
Paulo Zanoni 已提交
360 361 362 363
{
	snb_update_pm_irq(dev_priv, mask, 0);
}

364
void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
365 366 367 368
{
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
		return;

369
	__gen6_mask_pm_irq(dev_priv, mask);
370 371
}

372
static void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
I
Imre Deak 已提交
373
{
374
	i915_reg_t reg = gen6_pm_iir(dev_priv);
I
Imre Deak 已提交
375

376
	lockdep_assert_held(&dev_priv->irq_lock);
377 378 379

	I915_WRITE(reg, reset_mask);
	I915_WRITE(reg, reset_mask);
I
Imre Deak 已提交
380
	POSTING_READ(reg);
381 382
}

383
static void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask)
384
{
385
	lockdep_assert_held(&dev_priv->irq_lock);
386 387 388 389 390 391 392

	dev_priv->pm_ier |= enable_mask;
	I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
	gen6_unmask_pm_irq(dev_priv, enable_mask);
	/* unmask_pm_irq provides an implicit barrier (POSTING_READ) */
}

393
static void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask)
394
{
395
	lockdep_assert_held(&dev_priv->irq_lock);
396 397 398 399 400 401 402 403 404 405 406

	dev_priv->pm_ier &= ~disable_mask;
	__gen6_mask_pm_irq(dev_priv, disable_mask);
	I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
	/* though a barrier is missing here, but don't really need a one */
}

void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
{
	spin_lock_irq(&dev_priv->irq_lock);
	gen6_reset_pm_iir(dev_priv, dev_priv->pm_rps_events);
407
	dev_priv->gt_pm.rps.pm_iir = 0;
I
Imre Deak 已提交
408 409 410
	spin_unlock_irq(&dev_priv->irq_lock);
}

411
void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
412
{
413 414 415
	struct intel_rps *rps = &dev_priv->gt_pm.rps;

	if (READ_ONCE(rps->interrupts_enabled))
416 417
		return;

418
	spin_lock_irq(&dev_priv->irq_lock);
419
	WARN_ON_ONCE(rps->pm_iir);
420
	WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
421
	rps->interrupts_enabled = true;
422
	gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
423

424 425 426
	spin_unlock_irq(&dev_priv->irq_lock);
}

427
void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
428
{
429 430 431
	struct intel_rps *rps = &dev_priv->gt_pm.rps;

	if (!READ_ONCE(rps->interrupts_enabled))
432 433
		return;

I
Imre Deak 已提交
434
	spin_lock_irq(&dev_priv->irq_lock);
435
	rps->interrupts_enabled = false;
436

437
	I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
438

439
	gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
440 441

	spin_unlock_irq(&dev_priv->irq_lock);
442
	synchronize_irq(dev_priv->drm.irq);
443 444

	/* Now that we will not be generating any more work, flush any
445
	 * outstanding tasks. As we are called on the RPS idle path,
446 447 448
	 * we will reset the GPU to minimum frequencies, so the current
	 * state of the worker can be discarded.
	 */
449
	cancel_work_sync(&rps->work);
450
	gen6_reset_rps_interrupts(dev_priv);
451 452
}

453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484
void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv)
{
	spin_lock_irq(&dev_priv->irq_lock);
	gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events);
	spin_unlock_irq(&dev_priv->irq_lock);
}

void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv)
{
	spin_lock_irq(&dev_priv->irq_lock);
	if (!dev_priv->guc.interrupts_enabled) {
		WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) &
				       dev_priv->pm_guc_events);
		dev_priv->guc.interrupts_enabled = true;
		gen6_enable_pm_irq(dev_priv, dev_priv->pm_guc_events);
	}
	spin_unlock_irq(&dev_priv->irq_lock);
}

void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv)
{
	spin_lock_irq(&dev_priv->irq_lock);
	dev_priv->guc.interrupts_enabled = false;

	gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events);

	spin_unlock_irq(&dev_priv->irq_lock);
	synchronize_irq(dev_priv->drm.irq);

	gen9_reset_guc_interrupts(dev_priv);
}

485
/**
486 487 488 489 490
 * bdw_update_port_irq - update DE port interrupt
 * @dev_priv: driver private
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
491 492 493 494 495 496 497
static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
				uint32_t interrupt_mask,
				uint32_t enabled_irq_mask)
{
	uint32_t new_val;
	uint32_t old_val;

498
	lockdep_assert_held(&dev_priv->irq_lock);
499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516

	WARN_ON(enabled_irq_mask & ~interrupt_mask);

	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
		return;

	old_val = I915_READ(GEN8_DE_PORT_IMR);

	new_val = old_val;
	new_val &= ~interrupt_mask;
	new_val |= (~enabled_irq_mask & interrupt_mask);

	if (new_val != old_val) {
		I915_WRITE(GEN8_DE_PORT_IMR, new_val);
		POSTING_READ(GEN8_DE_PORT_IMR);
	}
}

517 518 519 520 521 522 523 524 525 526 527 528 529 530
/**
 * bdw_update_pipe_irq - update DE pipe interrupt
 * @dev_priv: driver private
 * @pipe: pipe whose interrupt to update
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
			 enum pipe pipe,
			 uint32_t interrupt_mask,
			 uint32_t enabled_irq_mask)
{
	uint32_t new_val;

531
	lockdep_assert_held(&dev_priv->irq_lock);
532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548

	WARN_ON(enabled_irq_mask & ~interrupt_mask);

	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
		return;

	new_val = dev_priv->de_irq_mask[pipe];
	new_val &= ~interrupt_mask;
	new_val |= (~enabled_irq_mask & interrupt_mask);

	if (new_val != dev_priv->de_irq_mask[pipe]) {
		dev_priv->de_irq_mask[pipe] = new_val;
		I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
		POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
	}
}

549 550 551 552 553 554
/**
 * ibx_display_interrupt_update - update SDEIMR
 * @dev_priv: driver private
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
555 556 557
void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
				  uint32_t interrupt_mask,
				  uint32_t enabled_irq_mask)
558 559 560 561 562
{
	uint32_t sdeimr = I915_READ(SDEIMR);
	sdeimr &= ~interrupt_mask;
	sdeimr |= (~enabled_irq_mask & interrupt_mask);

563 564
	WARN_ON(enabled_irq_mask & ~interrupt_mask);

565
	lockdep_assert_held(&dev_priv->irq_lock);
566

567
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
568 569
		return;

570 571 572
	I915_WRITE(SDEIMR, sdeimr);
	POSTING_READ(SDEIMR);
}
573

574 575
u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
			      enum pipe pipe)
576
{
577 578
	u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
	u32 enable_mask = status_mask << 16;
579

580
	lockdep_assert_held(&dev_priv->irq_lock);
581

582 583
	if (INTEL_GEN(dev_priv) < 5)
		goto out;
584 585

	/*
586 587
	 * On pipe A we don't support the PSR interrupt yet,
	 * on pipe B and C the same bit MBZ.
588 589 590
	 */
	if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
		return 0;
591 592 593 594 595 596
	/*
	 * On pipe B and C we don't support the PSR interrupt yet, on pipe
	 * A the same bit is for perf counters which we don't use either.
	 */
	if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
		return 0;
597 598 599 600 601 602 603 604 605

	enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
			 SPRITE0_FLIP_DONE_INT_EN_VLV |
			 SPRITE1_FLIP_DONE_INT_EN_VLV);
	if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
		enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
	if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
		enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;

606 607 608 609 610 611
out:
	WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
		  status_mask & ~PIPESTAT_INT_STATUS_MASK,
		  "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
		  pipe_name(pipe), enable_mask, status_mask);

612 613 614
	return enable_mask;
}

615 616
void i915_enable_pipestat(struct drm_i915_private *dev_priv,
			  enum pipe pipe, u32 status_mask)
617
{
618
	i915_reg_t reg = PIPESTAT(pipe);
619 620
	u32 enable_mask;

621 622 623 624 625 626 627 628 629 630 631 632 633 634 635
	WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
		  "pipe %c: status_mask=0x%x\n",
		  pipe_name(pipe), status_mask);

	lockdep_assert_held(&dev_priv->irq_lock);
	WARN_ON(!intel_irqs_enabled(dev_priv));

	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
		return;

	dev_priv->pipestat_irq_mask[pipe] |= status_mask;
	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);

	I915_WRITE(reg, enable_mask | status_mask);
	POSTING_READ(reg);
636 637
}

638 639
void i915_disable_pipestat(struct drm_i915_private *dev_priv,
			   enum pipe pipe, u32 status_mask)
640
{
641
	i915_reg_t reg = PIPESTAT(pipe);
642 643
	u32 enable_mask;

644 645 646 647 648 649 650 651 652 653 654 655 656 657 658
	WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
		  "pipe %c: status_mask=0x%x\n",
		  pipe_name(pipe), status_mask);

	lockdep_assert_held(&dev_priv->irq_lock);
	WARN_ON(!intel_irqs_enabled(dev_priv));

	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
		return;

	dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);

	I915_WRITE(reg, enable_mask | status_mask);
	POSTING_READ(reg);
659 660
}

661
/**
662
 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
663
 * @dev_priv: i915 device private
664
 */
665
static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
666
{
667
	if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv))
668 669
		return;

670
	spin_lock_irq(&dev_priv->irq_lock);
671

672
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
673
	if (INTEL_GEN(dev_priv) >= 4)
674
		i915_enable_pipestat(dev_priv, PIPE_A,
675
				     PIPE_LEGACY_BLC_EVENT_STATUS);
676

677
	spin_unlock_irq(&dev_priv->irq_lock);
678 679
}

680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729
/*
 * This timing diagram depicts the video signal in and
 * around the vertical blanking period.
 *
 * Assumptions about the fictitious mode used in this example:
 *  vblank_start >= 3
 *  vsync_start = vblank_start + 1
 *  vsync_end = vblank_start + 2
 *  vtotal = vblank_start + 3
 *
 *           start of vblank:
 *           latch double buffered registers
 *           increment frame counter (ctg+)
 *           generate start of vblank interrupt (gen4+)
 *           |
 *           |          frame start:
 *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
 *           |          may be shifted forward 1-3 extra lines via PIPECONF
 *           |          |
 *           |          |  start of vsync:
 *           |          |  generate vsync interrupt
 *           |          |  |
 * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
 *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
 * ----va---> <-----------------vb--------------------> <--------va-------------
 *       |          |       <----vs----->                     |
 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
 *       |          |                                         |
 *       last visible pixel                                   first visible pixel
 *                  |                                         increment frame counter (gen3/4)
 *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
 *
 * x  = horizontal active
 * _  = horizontal blanking
 * hs = horizontal sync
 * va = vertical active
 * vb = vertical blanking
 * vs = vertical sync
 * vbs = vblank_start (number)
 *
 * Summary:
 * - most events happen at the start of horizontal sync
 * - frame start happens at the start of horizontal blank, 1-4 lines
 *   (depending on PIPECONF settings) after the start of vblank
 * - gen3/4 pixel and frame counter are synchronized with the start
 *   of horizontal active on the first line of vertical active
 */

730 731 732
/* Called from drm generic code, passed a 'crtc', which
 * we use as a pipe index
 */
733
static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
734
{
735
	struct drm_i915_private *dev_priv = to_i915(dev);
736
	i915_reg_t high_frame, low_frame;
737
	u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
738
	const struct drm_display_mode *mode = &dev->vblank[pipe].hwmode;
739
	unsigned long irqflags;
740

741 742 743 744 745
	htotal = mode->crtc_htotal;
	hsync_start = mode->crtc_hsync_start;
	vbl_start = mode->crtc_vblank_start;
	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
		vbl_start = DIV_ROUND_UP(vbl_start, 2);
746

747 748 749 750 751 752
	/* Convert to pixel count */
	vbl_start *= htotal;

	/* Start of vblank event occurs at start of hsync */
	vbl_start -= htotal - hsync_start;

753 754
	high_frame = PIPEFRAME(pipe);
	low_frame = PIPEFRAMEPIXEL(pipe);
755

756 757
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);

758 759 760 761 762 763
	/*
	 * High & low register fields aren't synchronized, so make sure
	 * we get a low value that's stable across two reads of the high
	 * register.
	 */
	do {
764 765 766
		high1 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
		low   = I915_READ_FW(low_frame);
		high2 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
767 768
	} while (high1 != high2);

769 770
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);

771
	high1 >>= PIPE_FRAME_HIGH_SHIFT;
772
	pixel = low & PIPE_PIXEL_MASK;
773
	low >>= PIPE_FRAME_LOW_SHIFT;
774 775 776 777 778 779

	/*
	 * The frame counter increments at beginning of active.
	 * Cook up a vblank counter by also checking the pixel
	 * counter against vblank start.
	 */
780
	return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
781 782
}

783
static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
784
{
785
	struct drm_i915_private *dev_priv = to_i915(dev);
786

787
	return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
788 789
}

790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840
/*
 * On certain encoders on certain platforms, pipe
 * scanline register will not work to get the scanline,
 * since the timings are driven from the PORT or issues
 * with scanline register updates.
 * This function will use Framestamp and current
 * timestamp registers to calculate the scanline.
 */
static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
{
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
	struct drm_vblank_crtc *vblank =
		&crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
	const struct drm_display_mode *mode = &vblank->hwmode;
	u32 vblank_start = mode->crtc_vblank_start;
	u32 vtotal = mode->crtc_vtotal;
	u32 htotal = mode->crtc_htotal;
	u32 clock = mode->crtc_clock;
	u32 scanline, scan_prev_time, scan_curr_time, scan_post_time;

	/*
	 * To avoid the race condition where we might cross into the
	 * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR
	 * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR
	 * during the same frame.
	 */
	do {
		/*
		 * This field provides read back of the display
		 * pipe frame time stamp. The time stamp value
		 * is sampled at every start of vertical blank.
		 */
		scan_prev_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));

		/*
		 * The TIMESTAMP_CTR register has the current
		 * time stamp value.
		 */
		scan_curr_time = I915_READ_FW(IVB_TIMESTAMP_CTR);

		scan_post_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
	} while (scan_post_time != scan_prev_time);

	scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
					clock), 1000 * htotal);
	scanline = min(scanline, vtotal - 1);
	scanline = (scanline + vblank_start) % vtotal;

	return scanline;
}

841
/* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
842 843 844
static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
{
	struct drm_device *dev = crtc->base.dev;
845
	struct drm_i915_private *dev_priv = to_i915(dev);
846 847
	const struct drm_display_mode *mode;
	struct drm_vblank_crtc *vblank;
848
	enum pipe pipe = crtc->pipe;
849
	int position, vtotal;
850

851 852 853
	if (!crtc->active)
		return -1;

854 855 856
	vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
	mode = &vblank->hwmode;

857 858 859
	if (mode->private_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP)
		return __intel_get_crtc_scanline_from_timestamp(crtc);

860
	vtotal = mode->crtc_vtotal;
861 862 863
	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
		vtotal /= 2;

864
	if (IS_GEN2(dev_priv))
865
		position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
866
	else
867
		position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
868

869 870 871 872 873 874 875 876 877 878 879 880
	/*
	 * On HSW, the DSL reg (0x70000) appears to return 0 if we
	 * read it just before the start of vblank.  So try it again
	 * so we don't accidentally end up spanning a vblank frame
	 * increment, causing the pipe_update_end() code to squak at us.
	 *
	 * The nature of this problem means we can't simply check the ISR
	 * bit and return the vblank start value; nor can we use the scanline
	 * debug register in the transcoder as it appears to have the same
	 * problem.  We may need to extend this to include other platforms,
	 * but so far testing only shows the problem on HSW.
	 */
881
	if (HAS_DDI(dev_priv) && !position) {
882 883 884 885
		int i, temp;

		for (i = 0; i < 100; i++) {
			udelay(1);
886
			temp = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
887 888 889 890 891 892 893
			if (temp != position) {
				position = temp;
				break;
			}
		}
	}

894
	/*
895 896
	 * See update_scanline_offset() for the details on the
	 * scanline_offset adjustment.
897
	 */
898
	return (position + crtc->scanline_offset) % vtotal;
899 900
}

901 902 903 904
static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
				     bool in_vblank_irq, int *vpos, int *hpos,
				     ktime_t *stime, ktime_t *etime,
				     const struct drm_display_mode *mode)
905
{
906
	struct drm_i915_private *dev_priv = to_i915(dev);
907 908
	struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
								pipe);
909
	int position;
910
	int vbl_start, vbl_end, hsync_start, htotal, vtotal;
911
	unsigned long irqflags;
912

913
	if (WARN_ON(!mode->crtc_clock)) {
914
		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
915
				 "pipe %c\n", pipe_name(pipe));
916
		return false;
917 918
	}

919
	htotal = mode->crtc_htotal;
920
	hsync_start = mode->crtc_hsync_start;
921 922 923
	vtotal = mode->crtc_vtotal;
	vbl_start = mode->crtc_vblank_start;
	vbl_end = mode->crtc_vblank_end;
924

925 926 927 928 929 930
	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
		vbl_start = DIV_ROUND_UP(vbl_start, 2);
		vbl_end /= 2;
		vtotal /= 2;
	}

931 932 933 934 935 936
	/*
	 * Lock uncore.lock, as we will do multiple timing critical raw
	 * register reads, potentially with preemption disabled, so the
	 * following code must not block on uncore.lock.
	 */
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
937

938 939 940 941 942 943
	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */

	/* Get optional system timestamp before query. */
	if (stime)
		*stime = ktime_get();

944
	if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
945 946 947
		/* No obvious pixelcount register. Only query vertical
		 * scanout position from Display scan line register.
		 */
948
		position = __intel_get_crtc_scanline(intel_crtc);
949 950 951 952 953
	} else {
		/* Have access to pixelcount since start of frame.
		 * We can split this into vertical and horizontal
		 * scanout position.
		 */
954
		position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
955

956 957 958 959
		/* convert to pixel counts */
		vbl_start *= htotal;
		vbl_end *= htotal;
		vtotal *= htotal;
960

961 962 963 964 965 966 967 968 969 970 971 972
		/*
		 * In interlaced modes, the pixel counter counts all pixels,
		 * so one field will have htotal more pixels. In order to avoid
		 * the reported position from jumping backwards when the pixel
		 * counter is beyond the length of the shorter field, just
		 * clamp the position the length of the shorter field. This
		 * matches how the scanline counter based position works since
		 * the scanline counter doesn't count the two half lines.
		 */
		if (position >= vtotal)
			position = vtotal - 1;

973 974 975 976 977 978 979 980 981 982
		/*
		 * Start of vblank interrupt is triggered at start of hsync,
		 * just prior to the first active line of vblank. However we
		 * consider lines to start at the leading edge of horizontal
		 * active. So, should we get here before we've crossed into
		 * the horizontal active of the first line in vblank, we would
		 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
		 * always add htotal-hsync_start to the current pixel position.
		 */
		position = (position + htotal - hsync_start) % vtotal;
983 984
	}

985 986 987 988 989 990 991 992
	/* Get optional system timestamp after query. */
	if (etime)
		*etime = ktime_get();

	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */

	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);

993 994 995 996 997 998 999 1000 1001 1002
	/*
	 * While in vblank, position will be negative
	 * counting up towards 0 at vbl_end. And outside
	 * vblank, position will be positive counting
	 * up since vbl_end.
	 */
	if (position >= vbl_start)
		position -= vbl_end;
	else
		position += vtotal - vbl_end;
1003

1004
	if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
1005 1006 1007 1008 1009 1010
		*vpos = position;
		*hpos = 0;
	} else {
		*vpos = position / htotal;
		*hpos = position - (*vpos * htotal);
	}
1011

1012
	return true;
1013 1014
}

1015 1016
int intel_get_crtc_scanline(struct intel_crtc *crtc)
{
1017
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1018 1019 1020 1021 1022 1023 1024 1025 1026 1027
	unsigned long irqflags;
	int position;

	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
	position = __intel_get_crtc_scanline(crtc);
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);

	return position;
}

1028
static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
1029
{
1030
	u32 busy_up, busy_down, max_avg, min_avg;
1031 1032
	u8 new_delay;

1033
	spin_lock(&mchdev_lock);
1034

1035 1036
	I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));

1037
	new_delay = dev_priv->ips.cur_delay;
1038

1039
	I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
1040 1041
	busy_up = I915_READ(RCPREVBSYTUPAVG);
	busy_down = I915_READ(RCPREVBSYTDNAVG);
1042 1043 1044 1045
	max_avg = I915_READ(RCBMAXAVG);
	min_avg = I915_READ(RCBMINAVG);

	/* Handle RCS change request from hw */
1046
	if (busy_up > max_avg) {
1047 1048 1049 1050
		if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
			new_delay = dev_priv->ips.cur_delay - 1;
		if (new_delay < dev_priv->ips.max_delay)
			new_delay = dev_priv->ips.max_delay;
1051
	} else if (busy_down < min_avg) {
1052 1053 1054 1055
		if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
			new_delay = dev_priv->ips.cur_delay + 1;
		if (new_delay > dev_priv->ips.min_delay)
			new_delay = dev_priv->ips.min_delay;
1056 1057
	}

1058
	if (ironlake_set_drps(dev_priv, new_delay))
1059
		dev_priv->ips.cur_delay = new_delay;
1060

1061
	spin_unlock(&mchdev_lock);
1062

1063 1064 1065
	return;
}

1066
static void notify_ring(struct intel_engine_cs *engine)
1067
{
1068 1069
	struct drm_i915_gem_request *rq = NULL;
	struct intel_wait *wait;
1070

1071
	atomic_inc(&engine->irq_count);
1072
	set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
1073

1074 1075
	spin_lock(&engine->breadcrumbs.irq_lock);
	wait = engine->breadcrumbs.irq_wait;
1076
	if (wait) {
1077 1078
		bool wakeup = engine->irq_seqno_barrier;

1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090
		/* We use a callback from the dma-fence to submit
		 * requests after waiting on our own requests. To
		 * ensure minimum delay in queuing the next request to
		 * hardware, signal the fence now rather than wait for
		 * the signaler to be woken up. We still wake up the
		 * waiter in order to handle the irq-seqno coherency
		 * issues (we may receive the interrupt before the
		 * seqno is written, see __i915_request_irq_complete())
		 * and to handle coalescing of multiple seqno updates
		 * and many waiters.
		 */
		if (i915_seqno_passed(intel_engine_get_seqno(engine),
1091
				      wait->seqno)) {
1092 1093
			struct drm_i915_gem_request *waiter = wait->request;

1094 1095
			wakeup = true;
			if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
1096 1097 1098
				      &waiter->fence.flags) &&
			    intel_wait_check_request(wait, waiter))
				rq = i915_gem_request_get(waiter);
1099
		}
1100

1101 1102
		if (wakeup)
			wake_up_process(wait->tsk);
1103 1104
	} else {
		__intel_engine_disarm_breadcrumbs(engine);
1105
	}
1106
	spin_unlock(&engine->breadcrumbs.irq_lock);
1107

1108
	if (rq) {
1109
		dma_fence_signal(&rq->fence);
1110 1111
		i915_gem_request_put(rq);
	}
1112 1113

	trace_intel_engine_notify(engine, wait);
1114 1115
}

1116 1117
static void vlv_c0_read(struct drm_i915_private *dev_priv,
			struct intel_rps_ei *ei)
1118
{
1119
	ei->ktime = ktime_get_raw();
1120 1121 1122
	ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
	ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
}
1123

1124
void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
1125
{
1126
	memset(&dev_priv->gt_pm.rps.ei, 0, sizeof(dev_priv->gt_pm.rps.ei));
1127
}
1128

1129 1130
static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
{
1131 1132
	struct intel_rps *rps = &dev_priv->gt_pm.rps;
	const struct intel_rps_ei *prev = &rps->ei;
1133 1134
	struct intel_rps_ei now;
	u32 events = 0;
1135

1136
	if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
1137
		return 0;
1138

1139
	vlv_c0_read(dev_priv, &now);
1140

1141
	if (prev->ktime) {
1142
		u64 time, c0;
1143
		u32 render, media;
1144

1145
		time = ktime_us_delta(now.ktime, prev->ktime);
1146

1147 1148 1149 1150 1151 1152 1153
		time *= dev_priv->czclk_freq;

		/* Workload can be split between render + media,
		 * e.g. SwapBuffers being blitted in X after being rendered in
		 * mesa. To account for this we need to combine both engines
		 * into our activity counter.
		 */
1154 1155 1156
		render = now.render_c0 - prev->render_c0;
		media = now.media_c0 - prev->media_c0;
		c0 = max(render, media);
1157
		c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */
1158

1159
		if (c0 > time * rps->up_threshold)
1160
			events = GEN6_PM_RP_UP_THRESHOLD;
1161
		else if (c0 < time * rps->down_threshold)
1162
			events = GEN6_PM_RP_DOWN_THRESHOLD;
1163 1164
	}

1165
	rps->ei = now;
1166
	return events;
1167 1168
}

1169
static void gen6_pm_rps_work(struct work_struct *work)
1170
{
1171
	struct drm_i915_private *dev_priv =
1172 1173
		container_of(work, struct drm_i915_private, gt_pm.rps.work);
	struct intel_rps *rps = &dev_priv->gt_pm.rps;
1174
	bool client_boost = false;
1175
	int new_delay, adj, min, max;
1176
	u32 pm_iir = 0;
1177

1178
	spin_lock_irq(&dev_priv->irq_lock);
1179 1180 1181
	if (rps->interrupts_enabled) {
		pm_iir = fetch_and_zero(&rps->pm_iir);
		client_boost = atomic_read(&rps->num_waiters);
I
Imre Deak 已提交
1182
	}
1183
	spin_unlock_irq(&dev_priv->irq_lock);
1184

1185
	/* Make sure we didn't queue anything we're not going to process. */
1186
	WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1187
	if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
1188
		goto out;
1189

1190
	mutex_lock(&dev_priv->pcu_lock);
1191

1192 1193
	pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);

1194 1195 1196 1197
	adj = rps->last_adj;
	new_delay = rps->cur_freq;
	min = rps->min_freq_softlimit;
	max = rps->max_freq_softlimit;
1198
	if (client_boost)
1199 1200 1201
		max = rps->max_freq;
	if (client_boost && new_delay < rps->boost_freq) {
		new_delay = rps->boost_freq;
1202 1203
		adj = 0;
	} else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1204 1205
		if (adj > 0)
			adj *= 2;
1206 1207
		else /* CHV needs even encode values */
			adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
1208

1209
		if (new_delay >= rps->max_freq_softlimit)
1210
			adj = 0;
1211
	} else if (client_boost) {
1212
		adj = 0;
1213
	} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1214 1215 1216 1217
		if (rps->cur_freq > rps->efficient_freq)
			new_delay = rps->efficient_freq;
		else if (rps->cur_freq > rps->min_freq_softlimit)
			new_delay = rps->min_freq_softlimit;
1218 1219 1220 1221
		adj = 0;
	} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
		if (adj < 0)
			adj *= 2;
1222 1223
		else /* CHV needs even encode values */
			adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
1224

1225
		if (new_delay <= rps->min_freq_softlimit)
1226
			adj = 0;
1227
	} else { /* unknown event */
1228
		adj = 0;
1229
	}
1230

1231
	rps->last_adj = adj;
1232

1233 1234 1235
	/* sysfs frequency interfaces may have snuck in while servicing the
	 * interrupt
	 */
1236
	new_delay += adj;
1237
	new_delay = clamp_t(int, new_delay, min, max);
1238

1239 1240
	if (intel_set_rps(dev_priv, new_delay)) {
		DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n");
1241
		rps->last_adj = 0;
1242
	}
1243

1244
	mutex_unlock(&dev_priv->pcu_lock);
1245 1246 1247 1248

out:
	/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
	spin_lock_irq(&dev_priv->irq_lock);
1249
	if (rps->interrupts_enabled)
1250 1251
		gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events);
	spin_unlock_irq(&dev_priv->irq_lock);
1252 1253
}

1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265

/**
 * ivybridge_parity_work - Workqueue called when a parity error interrupt
 * occurred.
 * @work: workqueue struct
 *
 * Doesn't actually do anything except notify userspace. As a consequence of
 * this event, userspace should try to remap the bad rows since statistically
 * it is likely the same row is more likely to go bad again.
 */
static void ivybridge_parity_work(struct work_struct *work)
{
1266
	struct drm_i915_private *dev_priv =
1267
		container_of(work, typeof(*dev_priv), l3_parity.error_work);
1268
	u32 error_status, row, bank, subbank;
1269
	char *parity_event[6];
1270
	uint32_t misccpctl;
1271
	uint8_t slice = 0;
1272 1273 1274 1275 1276

	/* We must turn off DOP level clock gating to access the L3 registers.
	 * In order to prevent a get/put style interface, acquire struct mutex
	 * any time we access those registers.
	 */
1277
	mutex_lock(&dev_priv->drm.struct_mutex);
1278

1279 1280 1281 1282
	/* If we've screwed up tracking, just let the interrupt fire again */
	if (WARN_ON(!dev_priv->l3_parity.which_slice))
		goto out;

1283 1284 1285 1286
	misccpctl = I915_READ(GEN7_MISCCPCTL);
	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
	POSTING_READ(GEN7_MISCCPCTL);

1287
	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1288
		i915_reg_t reg;
1289

1290
		slice--;
1291
		if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv)))
1292
			break;
1293

1294
		dev_priv->l3_parity.which_slice &= ~(1<<slice);
1295

1296
		reg = GEN7_L3CDERRST1(slice);
1297

1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312
		error_status = I915_READ(reg);
		row = GEN7_PARITY_ERROR_ROW(error_status);
		bank = GEN7_PARITY_ERROR_BANK(error_status);
		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);

		I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
		POSTING_READ(reg);

		parity_event[0] = I915_L3_PARITY_UEVENT "=1";
		parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
		parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
		parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
		parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
		parity_event[5] = NULL;

1313
		kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
1314
				   KOBJ_CHANGE, parity_event);
1315

1316 1317
		DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
			  slice, row, bank, subbank);
1318

1319 1320 1321 1322 1323
		kfree(parity_event[4]);
		kfree(parity_event[3]);
		kfree(parity_event[2]);
		kfree(parity_event[1]);
	}
1324

1325
	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1326

1327 1328
out:
	WARN_ON(dev_priv->l3_parity.which_slice);
1329
	spin_lock_irq(&dev_priv->irq_lock);
1330
	gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1331
	spin_unlock_irq(&dev_priv->irq_lock);
1332

1333
	mutex_unlock(&dev_priv->drm.struct_mutex);
1334 1335
}

1336 1337
static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv,
					       u32 iir)
1338
{
1339
	if (!HAS_L3_DPF(dev_priv))
1340 1341
		return;

1342
	spin_lock(&dev_priv->irq_lock);
1343
	gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1344
	spin_unlock(&dev_priv->irq_lock);
1345

1346
	iir &= GT_PARITY_ERROR(dev_priv);
1347 1348 1349 1350 1351 1352
	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
		dev_priv->l3_parity.which_slice |= 1 << 1;

	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
		dev_priv->l3_parity.which_slice |= 1 << 0;

1353
	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1354 1355
}

1356
static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
1357 1358
			       u32 gt_iir)
{
1359
	if (gt_iir & GT_RENDER_USER_INTERRUPT)
1360
		notify_ring(dev_priv->engine[RCS]);
1361
	if (gt_iir & ILK_BSD_USER_INTERRUPT)
1362
		notify_ring(dev_priv->engine[VCS]);
1363 1364
}

1365
static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
1366 1367
			       u32 gt_iir)
{
1368
	if (gt_iir & GT_RENDER_USER_INTERRUPT)
1369
		notify_ring(dev_priv->engine[RCS]);
1370
	if (gt_iir & GT_BSD_USER_INTERRUPT)
1371
		notify_ring(dev_priv->engine[VCS]);
1372
	if (gt_iir & GT_BLT_USER_INTERRUPT)
1373
		notify_ring(dev_priv->engine[BCS]);
1374

1375 1376
	if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
		      GT_BSD_CS_ERROR_INTERRUPT |
1377 1378
		      GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
		DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
1379

1380 1381
	if (gt_iir & GT_PARITY_ERROR(dev_priv))
		ivybridge_parity_error_irq_handler(dev_priv, gt_iir);
1382 1383
}

1384
static void
1385
gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
1386
{
1387
	struct intel_engine_execlists * const execlists = &engine->execlists;
1388
	bool tasklet = false;
1389 1390

	if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift)) {
C
Chris Wilson 已提交
1391 1392
		__set_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
		tasklet = true;
1393
	}
1394 1395 1396

	if (iir & (GT_RENDER_USER_INTERRUPT << test_shift)) {
		notify_ring(engine);
1397
		tasklet |= i915_modparams.enable_guc_submission;
1398 1399 1400
	}

	if (tasklet)
1401
		tasklet_hi_schedule(&execlists->irq_tasklet);
1402 1403
}

1404 1405 1406
static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv,
				   u32 master_ctl,
				   u32 gt_iir[4])
1407 1408 1409 1410
{
	irqreturn_t ret = IRQ_NONE;

	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1411 1412 1413
		gt_iir[0] = I915_READ_FW(GEN8_GT_IIR(0));
		if (gt_iir[0]) {
			I915_WRITE_FW(GEN8_GT_IIR(0), gt_iir[0]);
1414 1415 1416 1417 1418
			ret = IRQ_HANDLED;
		} else
			DRM_ERROR("The master control interrupt lied (GT0)!\n");
	}

1419
	if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1420 1421 1422
		gt_iir[1] = I915_READ_FW(GEN8_GT_IIR(1));
		if (gt_iir[1]) {
			I915_WRITE_FW(GEN8_GT_IIR(1), gt_iir[1]);
1423
			ret = IRQ_HANDLED;
1424
		} else
1425
			DRM_ERROR("The master control interrupt lied (GT1)!\n");
1426 1427
	}

1428
	if (master_ctl & GEN8_GT_VECS_IRQ) {
1429 1430 1431
		gt_iir[3] = I915_READ_FW(GEN8_GT_IIR(3));
		if (gt_iir[3]) {
			I915_WRITE_FW(GEN8_GT_IIR(3), gt_iir[3]);
1432 1433 1434 1435 1436
			ret = IRQ_HANDLED;
		} else
			DRM_ERROR("The master control interrupt lied (GT3)!\n");
	}

1437
	if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
1438
		gt_iir[2] = I915_READ_FW(GEN8_GT_IIR(2));
1439 1440
		if (gt_iir[2] & (dev_priv->pm_rps_events |
				 dev_priv->pm_guc_events)) {
1441
			I915_WRITE_FW(GEN8_GT_IIR(2),
1442 1443
				      gt_iir[2] & (dev_priv->pm_rps_events |
						   dev_priv->pm_guc_events));
1444
			ret = IRQ_HANDLED;
1445 1446 1447 1448
		} else
			DRM_ERROR("The master control interrupt lied (PM)!\n");
	}

1449 1450 1451
	return ret;
}

1452 1453 1454 1455
static void gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
				u32 gt_iir[4])
{
	if (gt_iir[0]) {
1456
		gen8_cs_irq_handler(dev_priv->engine[RCS],
1457
				    gt_iir[0], GEN8_RCS_IRQ_SHIFT);
1458
		gen8_cs_irq_handler(dev_priv->engine[BCS],
1459 1460 1461 1462
				    gt_iir[0], GEN8_BCS_IRQ_SHIFT);
	}

	if (gt_iir[1]) {
1463
		gen8_cs_irq_handler(dev_priv->engine[VCS],
1464
				    gt_iir[1], GEN8_VCS1_IRQ_SHIFT);
1465
		gen8_cs_irq_handler(dev_priv->engine[VCS2],
1466 1467 1468 1469
				    gt_iir[1], GEN8_VCS2_IRQ_SHIFT);
	}

	if (gt_iir[3])
1470
		gen8_cs_irq_handler(dev_priv->engine[VECS],
1471 1472 1473 1474
				    gt_iir[3], GEN8_VECS_IRQ_SHIFT);

	if (gt_iir[2] & dev_priv->pm_rps_events)
		gen6_rps_irq_handler(dev_priv, gt_iir[2]);
1475 1476 1477

	if (gt_iir[2] & dev_priv->pm_guc_events)
		gen9_guc_irq_handler(dev_priv, gt_iir[2]);
1478 1479
}

1480 1481 1482 1483
static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
{
	switch (port) {
	case PORT_A:
1484
		return val & PORTA_HOTPLUG_LONG_DETECT;
1485 1486 1487 1488 1489 1490 1491 1492 1493
	case PORT_B:
		return val & PORTB_HOTPLUG_LONG_DETECT;
	case PORT_C:
		return val & PORTC_HOTPLUG_LONG_DETECT;
	default:
		return false;
	}
}

1494 1495 1496 1497 1498 1499 1500 1501 1502 1503
static bool spt_port_hotplug2_long_detect(enum port port, u32 val)
{
	switch (port) {
	case PORT_E:
		return val & PORTE_HOTPLUG_LONG_DETECT;
	default:
		return false;
	}
}

1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519
static bool spt_port_hotplug_long_detect(enum port port, u32 val)
{
	switch (port) {
	case PORT_A:
		return val & PORTA_HOTPLUG_LONG_DETECT;
	case PORT_B:
		return val & PORTB_HOTPLUG_LONG_DETECT;
	case PORT_C:
		return val & PORTC_HOTPLUG_LONG_DETECT;
	case PORT_D:
		return val & PORTD_HOTPLUG_LONG_DETECT;
	default:
		return false;
	}
}

1520 1521 1522 1523 1524 1525 1526 1527 1528 1529
static bool ilk_port_hotplug_long_detect(enum port port, u32 val)
{
	switch (port) {
	case PORT_A:
		return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
	default:
		return false;
	}
}

1530
static bool pch_port_hotplug_long_detect(enum port port, u32 val)
1531 1532 1533
{
	switch (port) {
	case PORT_B:
1534
		return val & PORTB_HOTPLUG_LONG_DETECT;
1535
	case PORT_C:
1536
		return val & PORTC_HOTPLUG_LONG_DETECT;
1537
	case PORT_D:
1538 1539 1540
		return val & PORTD_HOTPLUG_LONG_DETECT;
	default:
		return false;
1541 1542 1543
	}
}

1544
static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
1545 1546 1547
{
	switch (port) {
	case PORT_B:
1548
		return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1549
	case PORT_C:
1550
		return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1551
	case PORT_D:
1552 1553 1554
		return val & PORTD_HOTPLUG_INT_LONG_PULSE;
	default:
		return false;
1555 1556 1557
	}
}

1558 1559 1560 1561 1562 1563 1564
/*
 * Get a bit mask of pins that have triggered, and which ones may be long.
 * This can be called multiple times with the same masks to accumulate
 * hotplug detection results from several registers.
 *
 * Note that the caller is expected to zero out the masks initially.
 */
1565
static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
1566
			     u32 hotplug_trigger, u32 dig_hotplug_reg,
1567 1568
			     const u32 hpd[HPD_NUM_PINS],
			     bool long_pulse_detect(enum port port, u32 val))
1569
{
1570
	enum port port;
1571 1572 1573
	int i;

	for_each_hpd_pin(i) {
1574 1575
		if ((hpd[i] & hotplug_trigger) == 0)
			continue;
1576

1577 1578
		*pin_mask |= BIT(i);

1579 1580
		port = intel_hpd_pin_to_port(i);
		if (port == PORT_NONE)
1581 1582
			continue;

1583
		if (long_pulse_detect(port, dig_hotplug_reg))
1584
			*long_mask |= BIT(i);
1585 1586 1587 1588 1589 1590 1591
	}

	DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
			 hotplug_trigger, dig_hotplug_reg, *pin_mask);

}

1592
static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
1593
{
1594
	wake_up_all(&dev_priv->gmbus_wait_queue);
1595 1596
}

1597
static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
1598
{
1599
	wake_up_all(&dev_priv->gmbus_wait_queue);
1600 1601
}

1602
#if defined(CONFIG_DEBUG_FS)
1603 1604
static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
					 enum pipe pipe,
1605 1606 1607
					 uint32_t crc0, uint32_t crc1,
					 uint32_t crc2, uint32_t crc3,
					 uint32_t crc4)
1608 1609 1610
{
	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
	struct intel_pipe_crc_entry *entry;
T
Tomeu Vizoso 已提交
1611 1612 1613
	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
	struct drm_driver *driver = dev_priv->drm.driver;
	uint32_t crcs[5];
1614
	int head, tail;
1615

1616
	spin_lock(&pipe_crc->lock);
T
Tomeu Vizoso 已提交
1617 1618 1619 1620 1621 1622
	if (pipe_crc->source) {
		if (!pipe_crc->entries) {
			spin_unlock(&pipe_crc->lock);
			DRM_DEBUG_KMS("spurious interrupt\n");
			return;
		}
1623

T
Tomeu Vizoso 已提交
1624 1625
		head = pipe_crc->head;
		tail = pipe_crc->tail;
1626

T
Tomeu Vizoso 已提交
1627 1628 1629 1630 1631
		if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
			spin_unlock(&pipe_crc->lock);
			DRM_ERROR("CRC buffer overflowing\n");
			return;
		}
1632

T
Tomeu Vizoso 已提交
1633
		entry = &pipe_crc->entries[head];
1634

T
Tomeu Vizoso 已提交
1635 1636 1637 1638 1639 1640
		entry->frame = driver->get_vblank_counter(&dev_priv->drm, pipe);
		entry->crc[0] = crc0;
		entry->crc[1] = crc1;
		entry->crc[2] = crc2;
		entry->crc[3] = crc3;
		entry->crc[4] = crc4;
1641

T
Tomeu Vizoso 已提交
1642 1643
		head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
		pipe_crc->head = head;
1644

T
Tomeu Vizoso 已提交
1645
		spin_unlock(&pipe_crc->lock);
1646

T
Tomeu Vizoso 已提交
1647 1648 1649 1650 1651 1652 1653
		wake_up_interruptible(&pipe_crc->wq);
	} else {
		/*
		 * For some not yet identified reason, the first CRC is
		 * bonkers. So let's just wait for the next vblank and read
		 * out the buggy result.
		 *
1654
		 * On GEN8+ sometimes the second CRC is bonkers as well, so
T
Tomeu Vizoso 已提交
1655 1656 1657
		 * don't trust that one either.
		 */
		if (pipe_crc->skipped == 0 ||
1658
		    (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
T
Tomeu Vizoso 已提交
1659 1660 1661 1662 1663 1664 1665 1666 1667 1668
			pipe_crc->skipped++;
			spin_unlock(&pipe_crc->lock);
			return;
		}
		spin_unlock(&pipe_crc->lock);
		crcs[0] = crc0;
		crcs[1] = crc1;
		crcs[2] = crc2;
		crcs[3] = crc3;
		crcs[4] = crc4;
1669
		drm_crtc_add_crc_entry(&crtc->base, true,
1670
				       drm_crtc_accurate_vblank_count(&crtc->base),
1671
				       crcs);
T
Tomeu Vizoso 已提交
1672
	}
1673
}
1674 1675
#else
static inline void
1676 1677
display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
			     enum pipe pipe,
1678 1679 1680 1681 1682
			     uint32_t crc0, uint32_t crc1,
			     uint32_t crc2, uint32_t crc3,
			     uint32_t crc4) {}
#endif

1683

1684 1685
static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
				     enum pipe pipe)
D
Daniel Vetter 已提交
1686
{
1687
	display_pipe_crc_irq_handler(dev_priv, pipe,
1688 1689
				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
				     0, 0, 0, 0);
D
Daniel Vetter 已提交
1690 1691
}

1692 1693
static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
				     enum pipe pipe)
1694
{
1695
	display_pipe_crc_irq_handler(dev_priv, pipe,
1696 1697 1698 1699 1700
				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
				     I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
				     I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
				     I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
				     I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1701
}
1702

1703 1704
static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
				      enum pipe pipe)
1705
{
1706 1707
	uint32_t res1, res2;

1708
	if (INTEL_GEN(dev_priv) >= 3)
1709 1710 1711 1712
		res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
	else
		res1 = 0;

1713
	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1714 1715 1716
		res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
	else
		res2 = 0;
1717

1718
	display_pipe_crc_irq_handler(dev_priv, pipe,
1719 1720 1721 1722
				     I915_READ(PIPE_CRC_RES_RED(pipe)),
				     I915_READ(PIPE_CRC_RES_GREEN(pipe)),
				     I915_READ(PIPE_CRC_RES_BLUE(pipe)),
				     res1, res2);
1723
}
1724

1725 1726 1727 1728
/* The RPS events need forcewake, so we add them to a work queue and mask their
 * IMR bits until the work is done. Other interrupts can be processed without
 * the work queue. */
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1729
{
1730 1731
	struct intel_rps *rps = &dev_priv->gt_pm.rps;

1732
	if (pm_iir & dev_priv->pm_rps_events) {
1733
		spin_lock(&dev_priv->irq_lock);
1734
		gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1735 1736 1737
		if (rps->interrupts_enabled) {
			rps->pm_iir |= pm_iir & dev_priv->pm_rps_events;
			schedule_work(&rps->work);
I
Imre Deak 已提交
1738
		}
1739
		spin_unlock(&dev_priv->irq_lock);
1740 1741
	}

1742
	if (INTEL_GEN(dev_priv) >= 8)
1743 1744
		return;

1745
	if (HAS_VEBOX(dev_priv)) {
1746
		if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1747
			notify_ring(dev_priv->engine[VECS]);
B
Ben Widawsky 已提交
1748

1749 1750
		if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
			DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
B
Ben Widawsky 已提交
1751
	}
1752 1753
}

1754 1755 1756
static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir)
{
	if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT) {
1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769
		/* Sample the log buffer flush related bits & clear them out now
		 * itself from the message identity register to minimize the
		 * probability of losing a flush interrupt, when there are back
		 * to back flush interrupts.
		 * There can be a new flush interrupt, for different log buffer
		 * type (like for ISR), whilst Host is handling one (for DPC).
		 * Since same bit is used in message register for ISR & DPC, it
		 * could happen that GuC sets the bit for 2nd interrupt but Host
		 * clears out the bit on handling the 1st interrupt.
		 */
		u32 msg, flush;

		msg = I915_READ(SOFT_SCRATCH(15));
1770 1771
		flush = msg & (INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED |
			       INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER);
1772 1773 1774 1775 1776
		if (flush) {
			/* Clear the message bits that are handled */
			I915_WRITE(SOFT_SCRATCH(15), msg & ~flush);

			/* Handle flush interrupt in bottom half */
1777 1778
			queue_work(dev_priv->guc.log.runtime.flush_wq,
				   &dev_priv->guc.log.runtime.flush_work);
1779 1780

			dev_priv->guc.log.flush_interrupt_count++;
1781 1782 1783 1784 1785
		} else {
			/* Not clearing of unhandled event bits won't result in
			 * re-triggering of the interrupt.
			 */
		}
1786 1787 1788
	}
}

1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801
static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
{
	enum pipe pipe;

	for_each_pipe(dev_priv, pipe) {
		I915_WRITE(PIPESTAT(pipe),
			   PIPESTAT_INT_STATUS_MASK |
			   PIPE_FIFO_UNDERRUN_STATUS);

		dev_priv->pipestat_irq_mask[pipe] = 0;
	}
}

1802 1803
static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
				  u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1804 1805 1806
{
	int pipe;

1807
	spin_lock(&dev_priv->irq_lock);
1808 1809 1810 1811 1812 1813

	if (!dev_priv->display_irqs_enabled) {
		spin_unlock(&dev_priv->irq_lock);
		return;
	}

1814
	for_each_pipe(dev_priv, pipe) {
1815
		i915_reg_t reg;
1816
		u32 status_mask, enable_mask, iir_bit = 0;
1817

1818 1819 1820 1821 1822 1823 1824
		/*
		 * PIPESTAT bits get signalled even when the interrupt is
		 * disabled with the mask bits, and some of the status bits do
		 * not generate interrupts at all (like the underrun bit). Hence
		 * we need to be careful that we only handle what we want to
		 * handle.
		 */
1825 1826

		/* fifo underruns are filterered in the underrun handler. */
1827
		status_mask = PIPE_FIFO_UNDERRUN_STATUS;
1828 1829 1830 1831 1832 1833 1834 1835

		switch (pipe) {
		case PIPE_A:
			iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
			break;
		case PIPE_B:
			iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
			break;
1836 1837 1838
		case PIPE_C:
			iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
			break;
1839 1840
		}
		if (iir & iir_bit)
1841
			status_mask |= dev_priv->pipestat_irq_mask[pipe];
1842

1843
		if (!status_mask)
1844 1845 1846
			continue;

		reg = PIPESTAT(pipe);
1847 1848
		pipe_stats[pipe] = I915_READ(reg) & status_mask;
		enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
1849 1850 1851 1852

		/*
		 * Clear the PIPE*STAT regs before the IIR
		 */
1853 1854
		if (pipe_stats[pipe])
			I915_WRITE(reg, enable_mask | pipe_stats[pipe]);
1855
	}
1856
	spin_unlock(&dev_priv->irq_lock);
1857 1858
}

1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926
static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
				      u16 iir, u32 pipe_stats[I915_MAX_PIPES])
{
	enum pipe pipe;

	for_each_pipe(dev_priv, pipe) {
		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
			drm_handle_vblank(&dev_priv->drm, pipe);

		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
			i9xx_pipe_crc_irq_handler(dev_priv, pipe);

		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
	}
}

static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
{
	bool blc_event = false;
	enum pipe pipe;

	for_each_pipe(dev_priv, pipe) {
		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
			drm_handle_vblank(&dev_priv->drm, pipe);

		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
			blc_event = true;

		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
			i9xx_pipe_crc_irq_handler(dev_priv, pipe);

		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
	}

	if (blc_event || (iir & I915_ASLE_INTERRUPT))
		intel_opregion_asle_intr(dev_priv);
}

static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
{
	bool blc_event = false;
	enum pipe pipe;

	for_each_pipe(dev_priv, pipe) {
		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
			drm_handle_vblank(&dev_priv->drm, pipe);

		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
			blc_event = true;

		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
			i9xx_pipe_crc_irq_handler(dev_priv, pipe);

		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
	}

	if (blc_event || (iir & I915_ASLE_INTERRUPT))
		intel_opregion_asle_intr(dev_priv);

	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
		gmbus_irq_handler(dev_priv);
}

1927
static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1928 1929 1930
					    u32 pipe_stats[I915_MAX_PIPES])
{
	enum pipe pipe;
1931

1932
	for_each_pipe(dev_priv, pipe) {
1933 1934
		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
			drm_handle_vblank(&dev_priv->drm, pipe);
1935 1936

		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1937
			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1938

1939 1940
		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1941 1942 1943
	}

	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1944
		gmbus_irq_handler(dev_priv);
1945 1946
}

1947
static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
1948 1949 1950
{
	u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);

1951 1952
	if (hotplug_status)
		I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1953

1954 1955 1956
	return hotplug_status;
}

1957
static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1958 1959 1960
				 u32 hotplug_status)
{
	u32 pin_mask = 0, long_mask = 0;
1961

1962 1963
	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
	    IS_CHERRYVIEW(dev_priv)) {
1964
		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1965

1966 1967 1968 1969 1970
		if (hotplug_trigger) {
			intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
					   hotplug_trigger, hpd_status_g4x,
					   i9xx_port_hotplug_long_detect);

1971
			intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1972
		}
1973 1974

		if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1975
			dp_aux_irq_handler(dev_priv);
1976 1977
	} else {
		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1978

1979 1980
		if (hotplug_trigger) {
			intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1981
					   hotplug_trigger, hpd_status_i915,
1982
					   i9xx_port_hotplug_long_detect);
1983
			intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1984
		}
1985
	}
1986 1987
}

1988
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
J
Jesse Barnes 已提交
1989
{
1990
	struct drm_device *dev = arg;
1991
	struct drm_i915_private *dev_priv = to_i915(dev);
J
Jesse Barnes 已提交
1992 1993
	irqreturn_t ret = IRQ_NONE;

1994 1995 1996
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

1997 1998 1999
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
	disable_rpm_wakeref_asserts(dev_priv);

2000
	do {
2001
		u32 iir, gt_iir, pm_iir;
2002
		u32 pipe_stats[I915_MAX_PIPES] = {};
2003
		u32 hotplug_status = 0;
2004
		u32 ier = 0;
2005

J
Jesse Barnes 已提交
2006 2007
		gt_iir = I915_READ(GTIIR);
		pm_iir = I915_READ(GEN6_PMIIR);
2008
		iir = I915_READ(VLV_IIR);
J
Jesse Barnes 已提交
2009 2010

		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
2011
			break;
J
Jesse Barnes 已提交
2012 2013 2014

		ret = IRQ_HANDLED;

2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027
		/*
		 * Theory on interrupt generation, based on empirical evidence:
		 *
		 * x = ((VLV_IIR & VLV_IER) ||
		 *      (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
		 *       (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
		 *
		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
		 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
		 * guarantee the CPU interrupt will be raised again even if we
		 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
		 * bits this time around.
		 */
2028
		I915_WRITE(VLV_MASTER_IER, 0);
2029 2030
		ier = I915_READ(VLV_IER);
		I915_WRITE(VLV_IER, 0);
2031 2032 2033 2034 2035 2036

		if (gt_iir)
			I915_WRITE(GTIIR, gt_iir);
		if (pm_iir)
			I915_WRITE(GEN6_PMIIR, pm_iir);

2037
		if (iir & I915_DISPLAY_PORT_INTERRUPT)
2038
			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
2039

2040 2041
		/* Call regardless, as some status bits might not be
		 * signalled in iir */
2042
		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
2043

2044 2045 2046 2047
		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
			   I915_LPE_PIPE_B_INTERRUPT))
			intel_lpe_audio_irq_handler(dev_priv);

2048 2049 2050 2051 2052 2053
		/*
		 * VLV_IIR is single buffered, and reflects the level
		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
		 */
		if (iir)
			I915_WRITE(VLV_IIR, iir);
2054

2055
		I915_WRITE(VLV_IER, ier);
2056 2057
		I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
		POSTING_READ(VLV_MASTER_IER);
2058

2059
		if (gt_iir)
2060
			snb_gt_irq_handler(dev_priv, gt_iir);
2061 2062 2063
		if (pm_iir)
			gen6_rps_irq_handler(dev_priv, pm_iir);

2064
		if (hotplug_status)
2065
			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
2066

2067
		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
2068
	} while (0);
J
Jesse Barnes 已提交
2069

2070 2071
	enable_rpm_wakeref_asserts(dev_priv);

J
Jesse Barnes 已提交
2072 2073 2074
	return ret;
}

2075 2076
static irqreturn_t cherryview_irq_handler(int irq, void *arg)
{
2077
	struct drm_device *dev = arg;
2078
	struct drm_i915_private *dev_priv = to_i915(dev);
2079 2080
	irqreturn_t ret = IRQ_NONE;

2081 2082 2083
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

2084 2085 2086
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
	disable_rpm_wakeref_asserts(dev_priv);

2087
	do {
2088
		u32 master_ctl, iir;
2089
		u32 gt_iir[4] = {};
2090
		u32 pipe_stats[I915_MAX_PIPES] = {};
2091
		u32 hotplug_status = 0;
2092 2093
		u32 ier = 0;

2094 2095
		master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
		iir = I915_READ(VLV_IIR);
2096

2097 2098
		if (master_ctl == 0 && iir == 0)
			break;
2099

2100 2101
		ret = IRQ_HANDLED;

2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114
		/*
		 * Theory on interrupt generation, based on empirical evidence:
		 *
		 * x = ((VLV_IIR & VLV_IER) ||
		 *      ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
		 *       (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
		 *
		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
		 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
		 * guarantee the CPU interrupt will be raised again even if we
		 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
		 * bits this time around.
		 */
2115
		I915_WRITE(GEN8_MASTER_IRQ, 0);
2116 2117
		ier = I915_READ(VLV_IER);
		I915_WRITE(VLV_IER, 0);
2118

2119
		gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
2120

2121
		if (iir & I915_DISPLAY_PORT_INTERRUPT)
2122
			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
2123

2124 2125
		/* Call regardless, as some status bits might not be
		 * signalled in iir */
2126
		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
2127

2128 2129 2130 2131 2132
		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
			   I915_LPE_PIPE_B_INTERRUPT |
			   I915_LPE_PIPE_C_INTERRUPT))
			intel_lpe_audio_irq_handler(dev_priv);

2133 2134 2135 2136 2137 2138 2139
		/*
		 * VLV_IIR is single buffered, and reflects the level
		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
		 */
		if (iir)
			I915_WRITE(VLV_IIR, iir);

2140
		I915_WRITE(VLV_IER, ier);
2141
		I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2142
		POSTING_READ(GEN8_MASTER_IRQ);
2143

2144 2145
		gen8_gt_irq_handler(dev_priv, gt_iir);

2146
		if (hotplug_status)
2147
			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
2148

2149
		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
2150
	} while (0);
2151

2152 2153
	enable_rpm_wakeref_asserts(dev_priv);

2154 2155 2156
	return ret;
}

2157 2158
static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
				u32 hotplug_trigger,
2159 2160 2161 2162
				const u32 hpd[HPD_NUM_PINS])
{
	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;

2163 2164 2165 2166 2167 2168
	/*
	 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
	 * unless we touch the hotplug register, even if hotplug_trigger is
	 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
	 * errors.
	 */
2169
	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2170 2171 2172 2173 2174 2175 2176 2177
	if (!hotplug_trigger) {
		u32 mask = PORTA_HOTPLUG_STATUS_MASK |
			PORTD_HOTPLUG_STATUS_MASK |
			PORTC_HOTPLUG_STATUS_MASK |
			PORTB_HOTPLUG_STATUS_MASK;
		dig_hotplug_reg &= ~mask;
	}

2178
	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2179 2180
	if (!hotplug_trigger)
		return;
2181 2182 2183 2184 2185

	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
			   dig_hotplug_reg, hpd,
			   pch_port_hotplug_long_detect);

2186
	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2187 2188
}

2189
static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2190
{
2191
	int pipe;
2192
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
2193

2194
	ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
2195

2196 2197 2198
	if (pch_iir & SDE_AUDIO_POWER_MASK) {
		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
			       SDE_AUDIO_POWER_SHIFT);
2199
		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
2200 2201
				 port_name(port));
	}
2202

2203
	if (pch_iir & SDE_AUX_MASK)
2204
		dp_aux_irq_handler(dev_priv);
2205

2206
	if (pch_iir & SDE_GMBUS)
2207
		gmbus_irq_handler(dev_priv);
2208 2209 2210 2211 2212 2213 2214 2215 2216 2217

	if (pch_iir & SDE_AUDIO_HDCP_MASK)
		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");

	if (pch_iir & SDE_AUDIO_TRANS_MASK)
		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");

	if (pch_iir & SDE_POISON)
		DRM_ERROR("PCH poison interrupt\n");

2218
	if (pch_iir & SDE_FDI_MASK)
2219
		for_each_pipe(dev_priv, pipe)
2220 2221 2222
			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
					 pipe_name(pipe),
					 I915_READ(FDI_RX_IIR(pipe)));
2223 2224 2225 2226 2227 2228 2229 2230

	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");

	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");

	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
2231
		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
2232 2233

	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
2234
		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
2235 2236
}

2237
static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
2238 2239
{
	u32 err_int = I915_READ(GEN7_ERR_INT);
D
Daniel Vetter 已提交
2240
	enum pipe pipe;
2241

2242 2243 2244
	if (err_int & ERR_INT_POISON)
		DRM_ERROR("Poison interrupt\n");

2245
	for_each_pipe(dev_priv, pipe) {
2246 2247
		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2248

D
Daniel Vetter 已提交
2249
		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
2250 2251
			if (IS_IVYBRIDGE(dev_priv))
				ivb_pipe_crc_irq_handler(dev_priv, pipe);
D
Daniel Vetter 已提交
2252
			else
2253
				hsw_pipe_crc_irq_handler(dev_priv, pipe);
D
Daniel Vetter 已提交
2254 2255
		}
	}
2256

2257 2258 2259
	I915_WRITE(GEN7_ERR_INT, err_int);
}

2260
static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
2261 2262
{
	u32 serr_int = I915_READ(SERR_INT);
2263
	enum pipe pipe;
2264

2265 2266 2267
	if (serr_int & SERR_INT_POISON)
		DRM_ERROR("PCH poison interrupt\n");

2268 2269 2270
	for_each_pipe(dev_priv, pipe)
		if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
			intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
2271 2272

	I915_WRITE(SERR_INT, serr_int);
2273 2274
}

2275
static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2276 2277
{
	int pipe;
2278
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
2279

2280
	ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
2281

2282 2283 2284 2285 2286 2287
	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
			       SDE_AUDIO_POWER_SHIFT_CPT);
		DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
				 port_name(port));
	}
2288 2289

	if (pch_iir & SDE_AUX_MASK_CPT)
2290
		dp_aux_irq_handler(dev_priv);
2291 2292

	if (pch_iir & SDE_GMBUS_CPT)
2293
		gmbus_irq_handler(dev_priv);
2294 2295 2296 2297 2298 2299 2300 2301

	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
		DRM_DEBUG_DRIVER("Audio CP request interrupt\n");

	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
		DRM_DEBUG_DRIVER("Audio CP change interrupt\n");

	if (pch_iir & SDE_FDI_MASK_CPT)
2302
		for_each_pipe(dev_priv, pipe)
2303 2304 2305
			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
					 pipe_name(pipe),
					 I915_READ(FDI_RX_IIR(pipe)));
2306 2307

	if (pch_iir & SDE_ERROR_CPT)
2308
		cpt_serr_int_handler(dev_priv);
2309 2310
}

2311
static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325
{
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
		~SDE_PORTE_HOTPLUG_SPT;
	u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
	u32 pin_mask = 0, long_mask = 0;

	if (hotplug_trigger) {
		u32 dig_hotplug_reg;

		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
		I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);

		intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
				   dig_hotplug_reg, hpd_spt,
2326
				   spt_port_hotplug_long_detect);
2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340
	}

	if (hotplug2_trigger) {
		u32 dig_hotplug_reg;

		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
		I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);

		intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger,
				   dig_hotplug_reg, hpd_spt,
				   spt_port_hotplug2_long_detect);
	}

	if (pin_mask)
2341
		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2342 2343

	if (pch_iir & SDE_GMBUS_CPT)
2344
		gmbus_irq_handler(dev_priv);
2345 2346
}

2347 2348
static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
				u32 hotplug_trigger,
2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359
				const u32 hpd[HPD_NUM_PINS])
{
	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;

	dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);

	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
			   dig_hotplug_reg, hpd,
			   ilk_port_hotplug_long_detect);

2360
	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2361 2362
}

2363 2364
static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
				    u32 de_iir)
2365
{
2366
	enum pipe pipe;
2367 2368
	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;

2369
	if (hotplug_trigger)
2370
		ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk);
2371 2372

	if (de_iir & DE_AUX_CHANNEL_A)
2373
		dp_aux_irq_handler(dev_priv);
2374 2375

	if (de_iir & DE_GSE)
2376
		intel_opregion_asle_intr(dev_priv);
2377 2378 2379 2380

	if (de_iir & DE_POISON)
		DRM_ERROR("Poison interrupt\n");

2381
	for_each_pipe(dev_priv, pipe) {
2382 2383
		if (de_iir & DE_PIPE_VBLANK(pipe))
			drm_handle_vblank(&dev_priv->drm, pipe);
2384

2385
		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2386
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2387

2388
		if (de_iir & DE_PIPE_CRC_DONE(pipe))
2389
			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2390 2391 2392 2393 2394 2395
	}

	/* check event from PCH */
	if (de_iir & DE_PCH_EVENT) {
		u32 pch_iir = I915_READ(SDEIIR);

2396 2397
		if (HAS_PCH_CPT(dev_priv))
			cpt_irq_handler(dev_priv, pch_iir);
2398
		else
2399
			ibx_irq_handler(dev_priv, pch_iir);
2400 2401 2402 2403 2404

		/* should clear PCH hotplug event before clear CPU irq */
		I915_WRITE(SDEIIR, pch_iir);
	}

2405 2406
	if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT)
		ironlake_rps_change_irq_handler(dev_priv);
2407 2408
}

2409 2410
static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
				    u32 de_iir)
2411
{
2412
	enum pipe pipe;
2413 2414
	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;

2415
	if (hotplug_trigger)
2416
		ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb);
2417 2418

	if (de_iir & DE_ERR_INT_IVB)
2419
		ivb_err_int_handler(dev_priv);
2420 2421

	if (de_iir & DE_AUX_CHANNEL_A_IVB)
2422
		dp_aux_irq_handler(dev_priv);
2423 2424

	if (de_iir & DE_GSE_IVB)
2425
		intel_opregion_asle_intr(dev_priv);
2426

2427
	for_each_pipe(dev_priv, pipe) {
2428 2429
		if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
			drm_handle_vblank(&dev_priv->drm, pipe);
2430 2431 2432
	}

	/* check event from PCH */
2433
	if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
2434 2435
		u32 pch_iir = I915_READ(SDEIIR);

2436
		cpt_irq_handler(dev_priv, pch_iir);
2437 2438 2439 2440 2441 2442

		/* clear PCH hotplug event before clear CPU irq */
		I915_WRITE(SDEIIR, pch_iir);
	}
}

2443 2444 2445 2446 2447 2448 2449 2450
/*
 * To handle irqs with the minimum potential races with fresh interrupts, we:
 * 1 - Disable Master Interrupt Control.
 * 2 - Find the source(s) of the interrupt.
 * 3 - Clear the Interrupt Identity bits (IIR).
 * 4 - Process the interrupt(s) that had bits set in the IIRs.
 * 5 - Re-enable Master Interrupt Control.
 */
2451
static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2452
{
2453
	struct drm_device *dev = arg;
2454
	struct drm_i915_private *dev_priv = to_i915(dev);
2455
	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2456
	irqreturn_t ret = IRQ_NONE;
2457

2458 2459 2460
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

2461 2462 2463
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
	disable_rpm_wakeref_asserts(dev_priv);

2464 2465 2466
	/* disable master interrupt before clearing iir  */
	de_ier = I915_READ(DEIER);
	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2467
	POSTING_READ(DEIER);
2468

2469 2470 2471 2472 2473
	/* Disable south interrupts. We'll only write to SDEIIR once, so further
	 * interrupts will will be stored on its back queue, and then we'll be
	 * able to process them after we restore SDEIER (as soon as we restore
	 * it, we'll get an interrupt if SDEIIR still has something to process
	 * due to its back queue). */
2474
	if (!HAS_PCH_NOP(dev_priv)) {
2475 2476 2477 2478
		sde_ier = I915_READ(SDEIER);
		I915_WRITE(SDEIER, 0);
		POSTING_READ(SDEIER);
	}
2479

2480 2481
	/* Find, clear, then process each source of interrupt */

2482
	gt_iir = I915_READ(GTIIR);
2483
	if (gt_iir) {
2484 2485
		I915_WRITE(GTIIR, gt_iir);
		ret = IRQ_HANDLED;
2486
		if (INTEL_GEN(dev_priv) >= 6)
2487
			snb_gt_irq_handler(dev_priv, gt_iir);
2488
		else
2489
			ilk_gt_irq_handler(dev_priv, gt_iir);
2490 2491
	}

2492 2493
	de_iir = I915_READ(DEIIR);
	if (de_iir) {
2494 2495
		I915_WRITE(DEIIR, de_iir);
		ret = IRQ_HANDLED;
2496 2497
		if (INTEL_GEN(dev_priv) >= 7)
			ivb_display_irq_handler(dev_priv, de_iir);
2498
		else
2499
			ilk_display_irq_handler(dev_priv, de_iir);
2500 2501
	}

2502
	if (INTEL_GEN(dev_priv) >= 6) {
2503 2504 2505 2506
		u32 pm_iir = I915_READ(GEN6_PMIIR);
		if (pm_iir) {
			I915_WRITE(GEN6_PMIIR, pm_iir);
			ret = IRQ_HANDLED;
2507
			gen6_rps_irq_handler(dev_priv, pm_iir);
2508
		}
2509
	}
2510 2511 2512

	I915_WRITE(DEIER, de_ier);
	POSTING_READ(DEIER);
2513
	if (!HAS_PCH_NOP(dev_priv)) {
2514 2515 2516
		I915_WRITE(SDEIER, sde_ier);
		POSTING_READ(SDEIER);
	}
2517

2518 2519 2520
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
	enable_rpm_wakeref_asserts(dev_priv);

2521 2522 2523
	return ret;
}

2524 2525
static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
				u32 hotplug_trigger,
2526
				const u32 hpd[HPD_NUM_PINS])
2527
{
2528
	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2529

2530 2531
	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2532

2533
	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2534
			   dig_hotplug_reg, hpd,
2535
			   bxt_port_hotplug_long_detect);
2536

2537
	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2538 2539
}

2540 2541
static irqreturn_t
gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2542 2543
{
	irqreturn_t ret = IRQ_NONE;
2544
	u32 iir;
2545
	enum pipe pipe;
J
Jesse Barnes 已提交
2546

2547
	if (master_ctl & GEN8_DE_MISC_IRQ) {
2548 2549 2550
		iir = I915_READ(GEN8_DE_MISC_IIR);
		if (iir) {
			I915_WRITE(GEN8_DE_MISC_IIR, iir);
2551
			ret = IRQ_HANDLED;
2552
			if (iir & GEN8_DE_MISC_GSE)
2553
				intel_opregion_asle_intr(dev_priv);
2554 2555
			else
				DRM_ERROR("Unexpected DE Misc interrupt\n");
2556
		}
2557 2558
		else
			DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2559 2560
	}

2561
	if (master_ctl & GEN8_DE_PORT_IRQ) {
2562 2563 2564
		iir = I915_READ(GEN8_DE_PORT_IIR);
		if (iir) {
			u32 tmp_mask;
2565
			bool found = false;
2566

2567
			I915_WRITE(GEN8_DE_PORT_IIR, iir);
2568
			ret = IRQ_HANDLED;
J
Jesse Barnes 已提交
2569

2570
			tmp_mask = GEN8_AUX_CHANNEL_A;
2571
			if (INTEL_GEN(dev_priv) >= 9)
2572 2573 2574 2575 2576
				tmp_mask |= GEN9_AUX_CHANNEL_B |
					    GEN9_AUX_CHANNEL_C |
					    GEN9_AUX_CHANNEL_D;

			if (iir & tmp_mask) {
2577
				dp_aux_irq_handler(dev_priv);
2578 2579 2580
				found = true;
			}

2581
			if (IS_GEN9_LP(dev_priv)) {
2582 2583
				tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
				if (tmp_mask) {
2584 2585
					bxt_hpd_irq_handler(dev_priv, tmp_mask,
							    hpd_bxt);
2586 2587 2588 2589 2590
					found = true;
				}
			} else if (IS_BROADWELL(dev_priv)) {
				tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
				if (tmp_mask) {
2591 2592
					ilk_hpd_irq_handler(dev_priv,
							    tmp_mask, hpd_bdw);
2593 2594
					found = true;
				}
2595 2596
			}

2597
			if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
2598
				gmbus_irq_handler(dev_priv);
S
Shashank Sharma 已提交
2599 2600 2601
				found = true;
			}

2602
			if (!found)
2603
				DRM_ERROR("Unexpected DE Port interrupt\n");
2604
		}
2605 2606
		else
			DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2607 2608
	}

2609
	for_each_pipe(dev_priv, pipe) {
2610
		u32 fault_errors;
2611

2612 2613
		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
			continue;
2614

2615 2616 2617 2618 2619
		iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
		if (!iir) {
			DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
			continue;
		}
2620

2621 2622
		ret = IRQ_HANDLED;
		I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
2623

2624 2625
		if (iir & GEN8_PIPE_VBLANK)
			drm_handle_vblank(&dev_priv->drm, pipe);
2626

2627
		if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
2628
			hsw_pipe_crc_irq_handler(dev_priv, pipe);
2629

2630 2631
		if (iir & GEN8_PIPE_FIFO_UNDERRUN)
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2632

2633
		fault_errors = iir;
2634
		if (INTEL_GEN(dev_priv) >= 9)
2635 2636 2637
			fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
		else
			fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2638

2639
		if (fault_errors)
2640
			DRM_ERROR("Fault errors on pipe %c: 0x%08x\n",
2641 2642
				  pipe_name(pipe),
				  fault_errors);
2643 2644
	}

2645
	if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2646
	    master_ctl & GEN8_DE_PCH_IRQ) {
2647 2648 2649 2650 2651
		/*
		 * FIXME(BDW): Assume for now that the new interrupt handling
		 * scheme also closed the SDE interrupt handling race we've seen
		 * on older pch-split platforms. But this needs testing.
		 */
2652 2653 2654
		iir = I915_READ(SDEIIR);
		if (iir) {
			I915_WRITE(SDEIIR, iir);
2655
			ret = IRQ_HANDLED;
2656

2657 2658
			if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) ||
			    HAS_PCH_CNP(dev_priv))
2659
				spt_irq_handler(dev_priv, iir);
2660
			else
2661
				cpt_irq_handler(dev_priv, iir);
2662 2663 2664 2665 2666 2667 2668
		} else {
			/*
			 * Like on previous PCH there seems to be something
			 * fishy going on with forwarding PCH interrupts.
			 */
			DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
		}
2669 2670
	}

2671 2672 2673 2674 2675 2676
	return ret;
}

static irqreturn_t gen8_irq_handler(int irq, void *arg)
{
	struct drm_device *dev = arg;
2677
	struct drm_i915_private *dev_priv = to_i915(dev);
2678
	u32 master_ctl;
2679
	u32 gt_iir[4] = {};
2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695
	irqreturn_t ret;

	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

	master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
	master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
	if (!master_ctl)
		return IRQ_NONE;

	I915_WRITE_FW(GEN8_MASTER_IRQ, 0);

	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
	disable_rpm_wakeref_asserts(dev_priv);

	/* Find, clear, then process each source of interrupt */
2696 2697
	ret = gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
	gen8_gt_irq_handler(dev_priv, gt_iir);
2698 2699
	ret |= gen8_de_irq_handler(dev_priv, master_ctl);

2700 2701
	I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
	POSTING_READ_FW(GEN8_MASTER_IRQ);
2702

2703 2704
	enable_rpm_wakeref_asserts(dev_priv);

2705 2706 2707
	return ret;
}

2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747
struct wedge_me {
	struct delayed_work work;
	struct drm_i915_private *i915;
	const char *name;
};

static void wedge_me(struct work_struct *work)
{
	struct wedge_me *w = container_of(work, typeof(*w), work.work);

	dev_err(w->i915->drm.dev,
		"%s timed out, cancelling all in-flight rendering.\n",
		w->name);
	i915_gem_set_wedged(w->i915);
}

static void __init_wedge(struct wedge_me *w,
			 struct drm_i915_private *i915,
			 long timeout,
			 const char *name)
{
	w->i915 = i915;
	w->name = name;

	INIT_DELAYED_WORK_ONSTACK(&w->work, wedge_me);
	schedule_delayed_work(&w->work, timeout);
}

static void __fini_wedge(struct wedge_me *w)
{
	cancel_delayed_work_sync(&w->work);
	destroy_delayed_work_on_stack(&w->work);
	w->i915 = NULL;
}

#define i915_wedge_on_timeout(W, DEV, TIMEOUT)				\
	for (__init_wedge((W), (DEV), (TIMEOUT), __func__);		\
	     (W)->i915;							\
	     __fini_wedge((W)))

2748
/**
2749
 * i915_reset_device - do process context error handling work
2750
 * @dev_priv: i915 device private
2751 2752 2753 2754
 *
 * Fire an error uevent so userspace can see that a hang or error
 * was detected.
 */
2755
static void i915_reset_device(struct drm_i915_private *dev_priv)
2756
{
2757
	struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj;
2758 2759 2760
	char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
	char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
	char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2761
	struct wedge_me w;
2762

2763
	kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
2764

2765 2766 2767
	DRM_DEBUG_DRIVER("resetting chip\n");
	kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);

2768 2769 2770
	/* Use a watchdog to ensure that our reset completes */
	i915_wedge_on_timeout(&w, dev_priv, 5*HZ) {
		intel_prepare_reset(dev_priv);
2771

2772 2773 2774
		/* Signal that locked waiters should reset the GPU */
		set_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags);
		wake_up_all(&dev_priv->gpu_error.wait_queue);
2775

2776 2777
		/* Wait for anyone holding the lock to wakeup, without
		 * blocking indefinitely on struct_mutex.
2778
		 */
2779 2780
		do {
			if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
2781
				i915_reset(dev_priv, 0);
2782 2783 2784 2785 2786 2787
				mutex_unlock(&dev_priv->drm.struct_mutex);
			}
		} while (wait_on_bit_timeout(&dev_priv->gpu_error.flags,
					     I915_RESET_HANDOFF,
					     TASK_UNINTERRUPTIBLE,
					     1));
2788

2789 2790
		intel_finish_reset(dev_priv);
	}
2791

2792
	if (!test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
2793 2794
		kobject_uevent_env(kobj,
				   KOBJ_CHANGE, reset_done_event);
2795 2796
}

2797
static void i915_clear_error_registers(struct drm_i915_private *dev_priv)
2798
{
2799
	u32 eir;
2800

2801 2802
	if (!IS_GEN2(dev_priv))
		I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER));
2803

2804 2805 2806 2807
	if (INTEL_GEN(dev_priv) < 4)
		I915_WRITE(IPEIR, I915_READ(IPEIR));
	else
		I915_WRITE(IPEIR_I965, I915_READ(IPEIR_I965));
2808

2809
	I915_WRITE(EIR, I915_READ(EIR));
2810 2811 2812 2813 2814 2815
	eir = I915_READ(EIR);
	if (eir) {
		/*
		 * some errors might have become stuck,
		 * mask them.
		 */
2816
		DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
2817 2818 2819
		I915_WRITE(EMR, I915_READ(EMR) | eir);
		I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
	}
2820 2821 2822
}

/**
2823
 * i915_handle_error - handle a gpu error
2824
 * @dev_priv: i915 device private
2825
 * @engine_mask: mask representing engines that are hung
2826 2827
 * @fmt: Error message format string
 *
2828
 * Do some basic checking of register state at error time and
2829 2830 2831 2832 2833
 * dump it to the syslog.  Also call i915_capture_error_state() to make
 * sure we get a record and make it available in debugfs.  Fire a uevent
 * so userspace knows something bad happened (should trigger collection
 * of a ring dump etc.).
 */
2834 2835
void i915_handle_error(struct drm_i915_private *dev_priv,
		       u32 engine_mask,
2836
		       const char *fmt, ...)
2837
{
2838 2839
	struct intel_engine_cs *engine;
	unsigned int tmp;
2840 2841
	va_list args;
	char error_msg[80];
2842

2843 2844 2845 2846
	va_start(args, fmt);
	vscnprintf(error_msg, sizeof(error_msg), fmt, args);
	va_end(args);

2847 2848 2849 2850 2851 2852 2853 2854 2855
	/*
	 * In most cases it's guaranteed that we get here with an RPM
	 * reference held, for example because there is a pending GPU
	 * request that won't finish until the reset is done. This
	 * isn't the case at least when we get here by doing a
	 * simulated reset via debugfs, so get an RPM reference.
	 */
	intel_runtime_pm_get(dev_priv);

2856
	i915_capture_error_state(dev_priv, engine_mask, error_msg);
2857
	i915_clear_error_registers(dev_priv);
2858

2859 2860 2861 2862 2863 2864
	/*
	 * Try engine reset when available. We fall back to full reset if
	 * single reset fails.
	 */
	if (intel_has_reset_engine(dev_priv)) {
		for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
2865
			BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
2866 2867 2868 2869
			if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
					     &dev_priv->gpu_error.flags))
				continue;

2870
			if (i915_reset_engine(engine, 0) == 0)
2871 2872 2873 2874 2875 2876 2877 2878 2879
				engine_mask &= ~intel_engine_flag(engine);

			clear_bit(I915_RESET_ENGINE + engine->id,
				  &dev_priv->gpu_error.flags);
			wake_up_bit(&dev_priv->gpu_error.flags,
				    I915_RESET_ENGINE + engine->id);
		}
	}

2880
	if (!engine_mask)
2881
		goto out;
2882

2883
	/* Full reset needs the mutex, stop any other user trying to do so. */
2884 2885 2886 2887
	if (test_and_set_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags)) {
		wait_event(dev_priv->gpu_error.reset_queue,
			   !test_bit(I915_RESET_BACKOFF,
				     &dev_priv->gpu_error.flags));
2888
		goto out;
2889 2890
	}

2891 2892 2893 2894 2895 2896 2897 2898 2899
	/* Prevent any other reset-engine attempt. */
	for_each_engine(engine, dev_priv, tmp) {
		while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
					&dev_priv->gpu_error.flags))
			wait_on_bit(&dev_priv->gpu_error.flags,
				    I915_RESET_ENGINE + engine->id,
				    TASK_UNINTERRUPTIBLE);
	}

2900
	i915_reset_device(dev_priv);
2901

2902 2903 2904 2905 2906
	for_each_engine(engine, dev_priv, tmp) {
		clear_bit(I915_RESET_ENGINE + engine->id,
			  &dev_priv->gpu_error.flags);
	}

2907 2908
	clear_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags);
	wake_up_all(&dev_priv->gpu_error.reset_queue);
2909 2910 2911

out:
	intel_runtime_pm_put(dev_priv);
2912 2913
}

2914 2915 2916
/* Called from drm generic code, passed 'crtc' which
 * we use as a pipe index
 */
2917
static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe)
2918
{
2919
	struct drm_i915_private *dev_priv = to_i915(dev);
2920
	unsigned long irqflags;
2921

2922
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2923
	i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2924
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2925

2926 2927 2928
	return 0;
}

2929
static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe)
2930
{
2931
	struct drm_i915_private *dev_priv = to_i915(dev);
2932 2933 2934
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2935 2936
	i915_enable_pipestat(dev_priv, pipe,
			     PIPE_START_VBLANK_INTERRUPT_STATUS);
2937 2938 2939 2940 2941
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);

	return 0;
}

2942
static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
J
Jesse Barnes 已提交
2943
{
2944
	struct drm_i915_private *dev_priv = to_i915(dev);
J
Jesse Barnes 已提交
2945
	unsigned long irqflags;
2946
	uint32_t bit = INTEL_GEN(dev_priv) >= 7 ?
2947
		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
J
Jesse Barnes 已提交
2948 2949

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2950
	ilk_enable_display_irq(dev_priv, bit);
J
Jesse Barnes 已提交
2951 2952 2953 2954 2955
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);

	return 0;
}

2956
static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
2957
{
2958
	struct drm_i915_private *dev_priv = to_i915(dev);
2959 2960 2961
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2962
	bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2963
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2964

2965 2966 2967
	return 0;
}

2968 2969 2970
/* Called from drm generic code, passed 'crtc' which
 * we use as a pipe index
 */
2971
static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe)
2972
{
2973
	struct drm_i915_private *dev_priv = to_i915(dev);
2974
	unsigned long irqflags;
2975

2976
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2977
	i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2978 2979 2980
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

2981
static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe)
2982
{
2983
	struct drm_i915_private *dev_priv = to_i915(dev);
2984 2985 2986
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2987 2988
	i915_disable_pipestat(dev_priv, pipe,
			      PIPE_START_VBLANK_INTERRUPT_STATUS);
2989 2990 2991
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

2992
static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
J
Jesse Barnes 已提交
2993
{
2994
	struct drm_i915_private *dev_priv = to_i915(dev);
J
Jesse Barnes 已提交
2995
	unsigned long irqflags;
2996
	uint32_t bit = INTEL_GEN(dev_priv) >= 7 ?
2997
		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
J
Jesse Barnes 已提交
2998 2999

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3000
	ilk_disable_display_irq(dev_priv, bit);
J
Jesse Barnes 已提交
3001 3002 3003
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

3004
static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
3005
{
3006
	struct drm_i915_private *dev_priv = to_i915(dev);
3007 3008 3009
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3010
	bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
3011 3012 3013
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

3014
static void ibx_irq_reset(struct drm_i915_private *dev_priv)
P
Paulo Zanoni 已提交
3015
{
3016
	if (HAS_PCH_NOP(dev_priv))
P
Paulo Zanoni 已提交
3017 3018
		return;

V
Ville Syrjälä 已提交
3019
	GEN3_IRQ_RESET(SDE);
3020

3021
	if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3022
		I915_WRITE(SERR_INT, 0xffffffff);
P
Paulo Zanoni 已提交
3023
}
3024

P
Paulo Zanoni 已提交
3025 3026 3027 3028 3029 3030 3031 3032 3033 3034
/*
 * SDEIER is also touched by the interrupt handler to work around missed PCH
 * interrupts. Hence we can't update it after the interrupt handler is enabled -
 * instead we unconditionally enable all PCH interrupt sources here, but then
 * only unmask them as needed with SDEIMR.
 *
 * This function needs to be called before interrupts are enabled.
 */
static void ibx_irq_pre_postinstall(struct drm_device *dev)
{
3035
	struct drm_i915_private *dev_priv = to_i915(dev);
P
Paulo Zanoni 已提交
3036

3037
	if (HAS_PCH_NOP(dev_priv))
P
Paulo Zanoni 已提交
3038 3039 3040
		return;

	WARN_ON(I915_READ(SDEIER) != 0);
P
Paulo Zanoni 已提交
3041 3042 3043 3044
	I915_WRITE(SDEIER, 0xffffffff);
	POSTING_READ(SDEIER);
}

3045
static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv)
3046
{
V
Ville Syrjälä 已提交
3047
	GEN3_IRQ_RESET(GT);
3048
	if (INTEL_GEN(dev_priv) >= 6)
V
Ville Syrjälä 已提交
3049
		GEN3_IRQ_RESET(GEN6_PM);
3050 3051
}

3052 3053
static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
{
3054 3055 3056 3057 3058
	if (IS_CHERRYVIEW(dev_priv))
		I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
	else
		I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);

3059
	i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
3060 3061
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));

3062
	i9xx_pipestat_irq_reset(dev_priv);
3063

V
Ville Syrjälä 已提交
3064
	GEN3_IRQ_RESET(VLV_);
3065
	dev_priv->irq_mask = ~0;
3066 3067
}

3068 3069 3070
static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
{
	u32 pipestat_mask;
3071
	u32 enable_mask;
3072 3073
	enum pipe pipe;

3074
	pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
3075 3076 3077 3078 3079

	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
	for_each_pipe(dev_priv, pipe)
		i915_enable_pipestat(dev_priv, pipe, pipestat_mask);

3080 3081
	enable_mask = I915_DISPLAY_PORT_INTERRUPT |
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3082 3083 3084 3085
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
		I915_LPE_PIPE_A_INTERRUPT |
		I915_LPE_PIPE_B_INTERRUPT;

3086
	if (IS_CHERRYVIEW(dev_priv))
3087 3088
		enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
			I915_LPE_PIPE_C_INTERRUPT;
3089 3090 3091

	WARN_ON(dev_priv->irq_mask != ~0);

3092 3093
	dev_priv->irq_mask = ~enable_mask;

V
Ville Syrjälä 已提交
3094
	GEN3_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
3095 3096 3097 3098 3099 3100
}

/* drm_dma.h hooks
*/
static void ironlake_irq_reset(struct drm_device *dev)
{
3101
	struct drm_i915_private *dev_priv = to_i915(dev);
3102

3103 3104
	if (IS_GEN5(dev_priv))
		I915_WRITE(HWSTAM, 0xffffffff);
3105

V
Ville Syrjälä 已提交
3106
	GEN3_IRQ_RESET(DE);
3107
	if (IS_GEN7(dev_priv))
3108 3109
		I915_WRITE(GEN7_ERR_INT, 0xffffffff);

3110
	gen5_gt_irq_reset(dev_priv);
3111

3112
	ibx_irq_reset(dev_priv);
3113 3114
}

3115
static void valleyview_irq_reset(struct drm_device *dev)
J
Jesse Barnes 已提交
3116
{
3117
	struct drm_i915_private *dev_priv = to_i915(dev);
J
Jesse Barnes 已提交
3118

3119 3120 3121
	I915_WRITE(VLV_MASTER_IER, 0);
	POSTING_READ(VLV_MASTER_IER);

3122
	gen5_gt_irq_reset(dev_priv);
J
Jesse Barnes 已提交
3123

3124
	spin_lock_irq(&dev_priv->irq_lock);
3125 3126
	if (dev_priv->display_irqs_enabled)
		vlv_display_irq_reset(dev_priv);
3127
	spin_unlock_irq(&dev_priv->irq_lock);
J
Jesse Barnes 已提交
3128 3129
}

3130 3131 3132 3133 3134 3135 3136 3137
static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
{
	GEN8_IRQ_RESET_NDX(GT, 0);
	GEN8_IRQ_RESET_NDX(GT, 1);
	GEN8_IRQ_RESET_NDX(GT, 2);
	GEN8_IRQ_RESET_NDX(GT, 3);
}

P
Paulo Zanoni 已提交
3138
static void gen8_irq_reset(struct drm_device *dev)
3139
{
3140
	struct drm_i915_private *dev_priv = to_i915(dev);
3141 3142 3143 3144 3145
	int pipe;

	I915_WRITE(GEN8_MASTER_IRQ, 0);
	POSTING_READ(GEN8_MASTER_IRQ);

3146
	gen8_gt_irq_reset(dev_priv);
3147

3148
	for_each_pipe(dev_priv, pipe)
3149 3150
		if (intel_display_power_is_enabled(dev_priv,
						   POWER_DOMAIN_PIPE(pipe)))
3151
			GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3152

V
Ville Syrjälä 已提交
3153 3154 3155
	GEN3_IRQ_RESET(GEN8_DE_PORT_);
	GEN3_IRQ_RESET(GEN8_DE_MISC_);
	GEN3_IRQ_RESET(GEN8_PCU_);
3156

3157
	if (HAS_PCH_SPLIT(dev_priv))
3158
		ibx_irq_reset(dev_priv);
3159
}
3160

3161
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3162
				     u8 pipe_mask)
3163
{
3164
	uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3165
	enum pipe pipe;
3166

3167
	spin_lock_irq(&dev_priv->irq_lock);
3168 3169 3170 3171 3172 3173

	if (!intel_irqs_enabled(dev_priv)) {
		spin_unlock_irq(&dev_priv->irq_lock);
		return;
	}

3174 3175 3176 3177
	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
		GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
				  dev_priv->de_irq_mask[pipe],
				  ~dev_priv->de_irq_mask[pipe] | extra_ier);
3178

3179
	spin_unlock_irq(&dev_priv->irq_lock);
3180 3181
}

3182
void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
3183
				     u8 pipe_mask)
3184
{
3185 3186
	enum pipe pipe;

3187
	spin_lock_irq(&dev_priv->irq_lock);
3188 3189 3190 3191 3192 3193

	if (!intel_irqs_enabled(dev_priv)) {
		spin_unlock_irq(&dev_priv->irq_lock);
		return;
	}

3194 3195
	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
		GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3196

3197 3198 3199
	spin_unlock_irq(&dev_priv->irq_lock);

	/* make sure we're done processing display irqs */
3200
	synchronize_irq(dev_priv->drm.irq);
3201 3202
}

3203
static void cherryview_irq_reset(struct drm_device *dev)
3204
{
3205
	struct drm_i915_private *dev_priv = to_i915(dev);
3206 3207 3208 3209

	I915_WRITE(GEN8_MASTER_IRQ, 0);
	POSTING_READ(GEN8_MASTER_IRQ);

3210
	gen8_gt_irq_reset(dev_priv);
3211

V
Ville Syrjälä 已提交
3212
	GEN3_IRQ_RESET(GEN8_PCU_);
3213

3214
	spin_lock_irq(&dev_priv->irq_lock);
3215 3216
	if (dev_priv->display_irqs_enabled)
		vlv_display_irq_reset(dev_priv);
3217
	spin_unlock_irq(&dev_priv->irq_lock);
3218 3219
}

3220
static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
3221 3222 3223 3224 3225
				  const u32 hpd[HPD_NUM_PINS])
{
	struct intel_encoder *encoder;
	u32 enabled_irqs = 0;

3226
	for_each_intel_encoder(&dev_priv->drm, encoder)
3227 3228 3229 3230 3231 3232
		if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
			enabled_irqs |= hpd[encoder->hpd_pin];

	return enabled_irqs;
}

3233
static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
3234
{
3235
	u32 hotplug;
3236 3237 3238

	/*
	 * Enable digital hotplug on the PCH, and configure the DP short pulse
3239 3240
	 * duration to 2ms (which is the minimum in the Display Port spec).
	 * The pulse duration bits are reserved on LPT+.
3241
	 */
3242
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3243 3244 3245
	hotplug &= ~(PORTB_PULSE_DURATION_MASK |
		     PORTC_PULSE_DURATION_MASK |
		     PORTD_PULSE_DURATION_MASK);
3246
	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3247 3248
	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3249 3250 3251 3252
	/*
	 * When CPU and PCH are on the same package, port A
	 * HPD must be enabled in both north and south.
	 */
3253
	if (HAS_PCH_LPT_LP(dev_priv))
3254
		hotplug |= PORTA_HOTPLUG_ENABLE;
3255
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3256
}
X
Xiong Zhang 已提交
3257

3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274
static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug_irqs, enabled_irqs;

	if (HAS_PCH_IBX(dev_priv)) {
		hotplug_irqs = SDE_HOTPLUG_MASK;
		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
	} else {
		hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
	}

	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);

	ibx_hpd_detection_setup(dev_priv);
}

3275
static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3276
{
3277 3278 3279 3280 3281 3282 3283 3284 3285
	u32 val, hotplug;

	/* Display WA #1179 WaHardHangonHotPlug: cnp */
	if (HAS_PCH_CNP(dev_priv)) {
		val = I915_READ(SOUTH_CHICKEN1);
		val &= ~CHASSIS_CLK_REQ_DURATION_MASK;
		val |= CHASSIS_CLK_REQ_DURATION(0xf);
		I915_WRITE(SOUTH_CHICKEN1, val);
	}
3286 3287 3288

	/* Enable digital hotplug on the PCH */
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3289 3290 3291 3292
	hotplug |= PORTA_HOTPLUG_ENABLE |
		   PORTB_HOTPLUG_ENABLE |
		   PORTC_HOTPLUG_ENABLE |
		   PORTD_HOTPLUG_ENABLE;
3293 3294 3295 3296 3297
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);

	hotplug = I915_READ(PCH_PORT_HOTPLUG2);
	hotplug |= PORTE_HOTPLUG_ENABLE;
	I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3298 3299
}

3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311
static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug_irqs, enabled_irqs;

	hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);

	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);

	spt_hpd_detection_setup(dev_priv);
}

3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327
static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug;

	/*
	 * Enable digital hotplug on the CPU, and configure the DP short pulse
	 * duration to 2ms (which is the minimum in the Display Port spec)
	 * The pulse duration bits are reserved on HSW+.
	 */
	hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
	hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
	hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE |
		   DIGITAL_PORTA_PULSE_DURATION_2ms;
	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
}

3328
static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3329
{
3330
	u32 hotplug_irqs, enabled_irqs;
3331

3332
	if (INTEL_GEN(dev_priv) >= 8) {
3333
		hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
3334
		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw);
3335 3336

		bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3337
	} else if (INTEL_GEN(dev_priv) >= 7) {
3338
		hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
3339
		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb);
3340 3341

		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3342 3343
	} else {
		hotplug_irqs = DE_DP_A_HOTPLUG;
3344
		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk);
3345

3346 3347
		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
	}
3348

3349
	ilk_hpd_detection_setup(dev_priv);
3350

3351
	ibx_hpd_irq_setup(dev_priv);
3352 3353
}

3354 3355
static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv,
				      u32 enabled_irqs)
3356
{
3357
	u32 hotplug;
3358

3359
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3360 3361 3362
	hotplug |= PORTA_HOTPLUG_ENABLE |
		   PORTB_HOTPLUG_ENABLE |
		   PORTC_HOTPLUG_ENABLE;
3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381

	DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n",
		      hotplug, enabled_irqs);
	hotplug &= ~BXT_DDI_HPD_INVERT_MASK;

	/*
	 * For BXT invert bit has to be set based on AOB design
	 * for HPD detection logic, update it based on VBT fields.
	 */
	if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
		hotplug |= BXT_DDIA_HPD_INVERT;
	if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) &&
	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_B))
		hotplug |= BXT_DDIB_HPD_INVERT;
	if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) &&
	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_C))
		hotplug |= BXT_DDIC_HPD_INVERT;

3382
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3383 3384
}

3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401
static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
	__bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK);
}

static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug_irqs, enabled_irqs;

	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
	hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;

	bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);

	__bxt_hpd_detection_setup(dev_priv, enabled_irqs);
}

P
Paulo Zanoni 已提交
3402 3403
static void ibx_irq_postinstall(struct drm_device *dev)
{
3404
	struct drm_i915_private *dev_priv = to_i915(dev);
3405
	u32 mask;
3406

3407
	if (HAS_PCH_NOP(dev_priv))
D
Daniel Vetter 已提交
3408 3409
		return;

3410
	if (HAS_PCH_IBX(dev_priv))
3411
		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3412
	else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3413
		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3414 3415
	else
		mask = SDE_GMBUS_CPT;
3416

V
Ville Syrjälä 已提交
3417
	gen3_assert_iir_is_zero(dev_priv, SDEIIR);
P
Paulo Zanoni 已提交
3418
	I915_WRITE(SDEIMR, ~mask);
3419 3420 3421

	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
	    HAS_PCH_LPT(dev_priv))
3422
		ibx_hpd_detection_setup(dev_priv);
3423 3424
	else
		spt_hpd_detection_setup(dev_priv);
P
Paulo Zanoni 已提交
3425 3426
}

3427 3428
static void gen5_gt_irq_postinstall(struct drm_device *dev)
{
3429
	struct drm_i915_private *dev_priv = to_i915(dev);
3430 3431 3432 3433 3434
	u32 pm_irqs, gt_irqs;

	pm_irqs = gt_irqs = 0;

	dev_priv->gt_irq_mask = ~0;
3435
	if (HAS_L3_DPF(dev_priv)) {
3436
		/* L3 parity interrupt is always unmasked. */
3437 3438
		dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv);
		gt_irqs |= GT_PARITY_ERROR(dev_priv);
3439 3440 3441
	}

	gt_irqs |= GT_RENDER_USER_INTERRUPT;
3442
	if (IS_GEN5(dev_priv)) {
3443
		gt_irqs |= ILK_BSD_USER_INTERRUPT;
3444 3445 3446 3447
	} else {
		gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
	}

V
Ville Syrjälä 已提交
3448
	GEN3_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
3449

3450
	if (INTEL_GEN(dev_priv) >= 6) {
3451 3452 3453 3454
		/*
		 * RPS interrupts will get enabled/disabled on demand when RPS
		 * itself is enabled/disabled.
		 */
3455
		if (HAS_VEBOX(dev_priv)) {
3456
			pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3457 3458
			dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT;
		}
3459

3460
		dev_priv->pm_imr = 0xffffffff;
V
Ville Syrjälä 已提交
3461
		GEN3_IRQ_INIT(GEN6_PM, dev_priv->pm_imr, pm_irqs);
3462 3463 3464
	}
}

3465
static int ironlake_irq_postinstall(struct drm_device *dev)
3466
{
3467
	struct drm_i915_private *dev_priv = to_i915(dev);
3468 3469
	u32 display_mask, extra_mask;

3470
	if (INTEL_GEN(dev_priv) >= 7) {
3471
		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3472
				DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
3473
		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3474 3475
			      DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
			      DE_DP_A_HOTPLUG_IVB);
3476 3477
	} else {
		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3478 3479
				DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
				DE_PIPEA_CRC_DONE | DE_POISON);
3480 3481 3482
		extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
			      DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
			      DE_DP_A_HOTPLUG);
3483
	}
3484

3485
	dev_priv->irq_mask = ~display_mask;
3486

P
Paulo Zanoni 已提交
3487 3488
	ibx_irq_pre_postinstall(dev);

V
Ville Syrjälä 已提交
3489
	GEN3_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3490

3491
	gen5_gt_irq_postinstall(dev);
3492

3493 3494
	ilk_hpd_detection_setup(dev_priv);

P
Paulo Zanoni 已提交
3495
	ibx_irq_postinstall(dev);
3496

3497
	if (IS_IRONLAKE_M(dev_priv)) {
3498 3499 3500
		/* Enable PCU event interrupts
		 *
		 * spinlocking not required here for correctness since interrupt
3501 3502
		 * setup is guaranteed to run in single-threaded context. But we
		 * need it to make the assert_spin_locked happy. */
3503
		spin_lock_irq(&dev_priv->irq_lock);
3504
		ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
3505
		spin_unlock_irq(&dev_priv->irq_lock);
3506 3507
	}

3508 3509 3510
	return 0;
}

3511 3512
void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
{
3513
	lockdep_assert_held(&dev_priv->irq_lock);
3514 3515 3516 3517 3518 3519

	if (dev_priv->display_irqs_enabled)
		return;

	dev_priv->display_irqs_enabled = true;

3520 3521
	if (intel_irqs_enabled(dev_priv)) {
		vlv_display_irq_reset(dev_priv);
3522
		vlv_display_irq_postinstall(dev_priv);
3523
	}
3524 3525 3526 3527
}

void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
{
3528
	lockdep_assert_held(&dev_priv->irq_lock);
3529 3530 3531 3532 3533 3534

	if (!dev_priv->display_irqs_enabled)
		return;

	dev_priv->display_irqs_enabled = false;

3535
	if (intel_irqs_enabled(dev_priv))
3536
		vlv_display_irq_reset(dev_priv);
3537 3538
}

3539 3540 3541

static int valleyview_irq_postinstall(struct drm_device *dev)
{
3542
	struct drm_i915_private *dev_priv = to_i915(dev);
3543

3544
	gen5_gt_irq_postinstall(dev);
J
Jesse Barnes 已提交
3545

3546
	spin_lock_irq(&dev_priv->irq_lock);
3547 3548
	if (dev_priv->display_irqs_enabled)
		vlv_display_irq_postinstall(dev_priv);
3549 3550
	spin_unlock_irq(&dev_priv->irq_lock);

J
Jesse Barnes 已提交
3551
	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3552
	POSTING_READ(VLV_MASTER_IER);
3553 3554 3555 3556

	return 0;
}

3557 3558 3559 3560 3561
static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
{
	/* These are interrupts we'll toggle with the ring mask register */
	uint32_t gt_interrupts[] = {
		GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3562 3563 3564
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
			GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3565
		GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3566 3567 3568
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
			GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3569
		0,
3570 3571
		GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3572 3573
		};

3574 3575 3576
	if (HAS_L3_DPF(dev_priv))
		gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;

3577 3578
	dev_priv->pm_ier = 0x0;
	dev_priv->pm_imr = ~dev_priv->pm_ier;
3579 3580
	GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
	GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
3581 3582
	/*
	 * RPS interrupts will get enabled/disabled on demand when RPS itself
3583
	 * is enabled/disabled. Same wil be the case for GuC interrupts.
3584
	 */
3585
	GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_imr, dev_priv->pm_ier);
3586
	GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
3587 3588 3589 3590
}

static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
{
3591 3592
	uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
	uint32_t de_pipe_enables;
3593 3594
	u32 de_port_masked = GEN8_AUX_CHANNEL_A;
	u32 de_port_enables;
3595
	u32 de_misc_masked = GEN8_DE_MISC_GSE;
3596
	enum pipe pipe;
3597

3598
	if (INTEL_GEN(dev_priv) >= 9) {
3599
		de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3600 3601
		de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
				  GEN9_AUX_CHANNEL_D;
3602
		if (IS_GEN9_LP(dev_priv))
3603 3604
			de_port_masked |= BXT_DE_PORT_GMBUS;
	} else {
3605
		de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3606
	}
3607 3608 3609 3610

	de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
					   GEN8_PIPE_FIFO_UNDERRUN;

3611
	de_port_enables = de_port_masked;
3612
	if (IS_GEN9_LP(dev_priv))
3613 3614
		de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
	else if (IS_BROADWELL(dev_priv))
3615 3616
		de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;

M
Mika Kahola 已提交
3617 3618
	for_each_pipe(dev_priv, pipe) {
		dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
3619

3620
		if (intel_display_power_is_enabled(dev_priv,
3621 3622 3623 3624
				POWER_DOMAIN_PIPE(pipe)))
			GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
					  dev_priv->de_irq_mask[pipe],
					  de_pipe_enables);
M
Mika Kahola 已提交
3625
	}
3626

V
Ville Syrjälä 已提交
3627 3628
	GEN3_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
	GEN3_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
3629 3630 3631

	if (IS_GEN9_LP(dev_priv))
		bxt_hpd_detection_setup(dev_priv);
3632 3633
	else if (IS_BROADWELL(dev_priv))
		ilk_hpd_detection_setup(dev_priv);
3634 3635 3636 3637
}

static int gen8_irq_postinstall(struct drm_device *dev)
{
3638
	struct drm_i915_private *dev_priv = to_i915(dev);
3639

3640
	if (HAS_PCH_SPLIT(dev_priv))
3641
		ibx_irq_pre_postinstall(dev);
P
Paulo Zanoni 已提交
3642

3643 3644 3645
	gen8_gt_irq_postinstall(dev_priv);
	gen8_de_irq_postinstall(dev_priv);

3646
	if (HAS_PCH_SPLIT(dev_priv))
3647
		ibx_irq_postinstall(dev);
3648

3649
	I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3650 3651 3652 3653 3654
	POSTING_READ(GEN8_MASTER_IRQ);

	return 0;
}

3655 3656
static int cherryview_irq_postinstall(struct drm_device *dev)
{
3657
	struct drm_i915_private *dev_priv = to_i915(dev);
3658 3659 3660

	gen8_gt_irq_postinstall(dev_priv);

3661
	spin_lock_irq(&dev_priv->irq_lock);
3662 3663
	if (dev_priv->display_irqs_enabled)
		vlv_display_irq_postinstall(dev_priv);
3664 3665
	spin_unlock_irq(&dev_priv->irq_lock);

3666
	I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3667 3668 3669 3670 3671
	POSTING_READ(GEN8_MASTER_IRQ);

	return 0;
}

3672
static void i8xx_irq_reset(struct drm_device *dev)
L
Linus Torvalds 已提交
3673
{
3674
	struct drm_i915_private *dev_priv = to_i915(dev);
3675

3676 3677
	i9xx_pipestat_irq_reset(dev_priv);

3678 3679
	I915_WRITE16(HWSTAM, 0xffff);

3680
	GEN2_IRQ_RESET();
C
Chris Wilson 已提交
3681 3682 3683 3684
}

static int i8xx_irq_postinstall(struct drm_device *dev)
{
3685
	struct drm_i915_private *dev_priv = to_i915(dev);
3686
	u16 enable_mask;
C
Chris Wilson 已提交
3687

3688 3689
	I915_WRITE16(EMR, ~(I915_ERROR_PAGE_TABLE |
			    I915_ERROR_MEMORY_REFRESH));
C
Chris Wilson 已提交
3690 3691 3692 3693

	/* Unmask the interrupts that we always want on. */
	dev_priv->irq_mask =
		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3694
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
C
Chris Wilson 已提交
3695

3696 3697 3698 3699 3700 3701
	enable_mask =
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
		I915_USER_INTERRUPT;

	GEN2_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
C
Chris Wilson 已提交
3702

3703 3704
	/* Interrupt setup is already guaranteed to be single-threaded, this is
	 * just to make the assert_spin_locked check happy. */
3705
	spin_lock_irq(&dev_priv->irq_lock);
3706 3707
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3708
	spin_unlock_irq(&dev_priv->irq_lock);
3709

C
Chris Wilson 已提交
3710 3711 3712
	return 0;
}

3713
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
C
Chris Wilson 已提交
3714
{
3715
	struct drm_device *dev = arg;
3716
	struct drm_i915_private *dev_priv = to_i915(dev);
3717
	irqreturn_t ret = IRQ_NONE;
C
Chris Wilson 已提交
3718

3719 3720 3721
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

3722 3723 3724
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
	disable_rpm_wakeref_asserts(dev_priv);

3725
	do {
3726
		u32 pipe_stats[I915_MAX_PIPES] = {};
3727
		u16 iir;
3728

3729 3730 3731 3732 3733
		iir = I915_READ16(IIR);
		if (iir == 0)
			break;

		ret = IRQ_HANDLED;
C
Chris Wilson 已提交
3734

3735 3736 3737
		/* Call regardless, as some status bits might not be
		 * signalled in iir */
		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
C
Chris Wilson 已提交
3738

3739
		I915_WRITE16(IIR, iir);
C
Chris Wilson 已提交
3740 3741

		if (iir & I915_USER_INTERRUPT)
3742
			notify_ring(dev_priv->engine[RCS]);
C
Chris Wilson 已提交
3743

3744 3745
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
C
Chris Wilson 已提交
3746

3747 3748
		i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
	} while (0);
3749 3750

	enable_rpm_wakeref_asserts(dev_priv);
C
Chris Wilson 已提交
3751

3752
	return ret;
C
Chris Wilson 已提交
3753 3754
}

3755
static void i915_irq_reset(struct drm_device *dev)
3756
{
3757
	struct drm_i915_private *dev_priv = to_i915(dev);
3758

3759
	if (I915_HAS_HOTPLUG(dev_priv)) {
3760
		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3761 3762 3763
		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
	}

3764 3765
	i9xx_pipestat_irq_reset(dev_priv);

3766
	I915_WRITE(HWSTAM, 0xffffffff);
3767

3768
	GEN3_IRQ_RESET();
3769 3770 3771 3772
}

static int i915_irq_postinstall(struct drm_device *dev)
{
3773
	struct drm_i915_private *dev_priv = to_i915(dev);
3774
	u32 enable_mask;
3775

3776 3777
	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE |
			  I915_ERROR_MEMORY_REFRESH));
3778 3779 3780 3781 3782

	/* Unmask the interrupts that we always want on. */
	dev_priv->irq_mask =
		~(I915_ASLE_INTERRUPT |
		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3783
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
3784 3785 3786 3787 3788 3789 3790

	enable_mask =
		I915_ASLE_INTERRUPT |
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
		I915_USER_INTERRUPT;

3791
	if (I915_HAS_HOTPLUG(dev_priv)) {
3792 3793 3794 3795 3796 3797
		/* Enable in IER... */
		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
		/* and unmask in IMR */
		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
	}

3798
	GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
3799

3800 3801
	/* Interrupt setup is already guaranteed to be single-threaded, this is
	 * just to make the assert_spin_locked check happy. */
3802
	spin_lock_irq(&dev_priv->irq_lock);
3803 3804
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3805
	spin_unlock_irq(&dev_priv->irq_lock);
3806

3807 3808
	i915_enable_asle_pipestat(dev_priv);

3809 3810 3811
	return 0;
}

3812
static irqreturn_t i915_irq_handler(int irq, void *arg)
3813
{
3814
	struct drm_device *dev = arg;
3815
	struct drm_i915_private *dev_priv = to_i915(dev);
3816
	irqreturn_t ret = IRQ_NONE;
3817

3818 3819 3820
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

3821 3822 3823
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
	disable_rpm_wakeref_asserts(dev_priv);

3824
	do {
3825
		u32 pipe_stats[I915_MAX_PIPES] = {};
3826 3827
		u32 hotplug_status = 0;
		u32 iir;
3828

3829 3830 3831 3832 3833 3834 3835 3836 3837
		iir = I915_READ(IIR);
		if (iir == 0)
			break;

		ret = IRQ_HANDLED;

		if (I915_HAS_HOTPLUG(dev_priv) &&
		    iir & I915_DISPLAY_PORT_INTERRUPT)
			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
3838

3839 3840 3841
		/* Call regardless, as some status bits might not be
		 * signalled in iir */
		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
3842

3843
		I915_WRITE(IIR, iir);
3844 3845

		if (iir & I915_USER_INTERRUPT)
3846
			notify_ring(dev_priv->engine[RCS]);
3847

3848 3849
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3850

3851 3852 3853 3854 3855
		if (hotplug_status)
			i9xx_hpd_irq_handler(dev_priv, hotplug_status);

		i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
	} while (0);
3856

3857 3858
	enable_rpm_wakeref_asserts(dev_priv);

3859 3860 3861
	return ret;
}

3862
static void i965_irq_reset(struct drm_device *dev)
3863
{
3864
	struct drm_i915_private *dev_priv = to_i915(dev);
3865

3866
	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3867
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3868

3869 3870
	i9xx_pipestat_irq_reset(dev_priv);

3871
	I915_WRITE(HWSTAM, 0xffffffff);
3872

3873
	GEN3_IRQ_RESET();
3874 3875 3876 3877
}

static int i965_irq_postinstall(struct drm_device *dev)
{
3878
	struct drm_i915_private *dev_priv = to_i915(dev);
3879
	u32 enable_mask;
3880 3881
	u32 error_mask;

3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896
	/*
	 * Enable some error detection, note the instruction error mask
	 * bit is reserved, so we leave it masked.
	 */
	if (IS_G4X(dev_priv)) {
		error_mask = ~(GM45_ERROR_PAGE_TABLE |
			       GM45_ERROR_MEM_PRIV |
			       GM45_ERROR_CP_PRIV |
			       I915_ERROR_MEMORY_REFRESH);
	} else {
		error_mask = ~(I915_ERROR_PAGE_TABLE |
			       I915_ERROR_MEMORY_REFRESH);
	}
	I915_WRITE(EMR, error_mask);

3897
	/* Unmask the interrupts that we always want on. */
3898 3899 3900 3901 3902 3903
	dev_priv->irq_mask =
		~(I915_ASLE_INTERRUPT |
		  I915_DISPLAY_PORT_INTERRUPT |
		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
		  I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3904

3905 3906 3907 3908 3909 3910 3911
	enable_mask =
		I915_ASLE_INTERRUPT |
		I915_DISPLAY_PORT_INTERRUPT |
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
		I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
		I915_USER_INTERRUPT;
3912

3913
	if (IS_G4X(dev_priv))
3914
		enable_mask |= I915_BSD_USER_INTERRUPT;
3915

3916 3917
	GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask);

3918 3919
	/* Interrupt setup is already guaranteed to be single-threaded, this is
	 * just to make the assert_spin_locked check happy. */
3920
	spin_lock_irq(&dev_priv->irq_lock);
3921 3922 3923
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3924
	spin_unlock_irq(&dev_priv->irq_lock);
3925

3926
	i915_enable_asle_pipestat(dev_priv);
3927 3928 3929 3930

	return 0;
}

3931
static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
3932 3933 3934
{
	u32 hotplug_en;

3935
	lockdep_assert_held(&dev_priv->irq_lock);
3936

3937 3938
	/* Note HDMI and DP share hotplug bits */
	/* enable bits are the same for all generations */
3939
	hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
3940 3941 3942 3943
	/* Programming the CRT detection parameters tends
	   to generate a spurious hotplug event about three
	   seconds later.  So just do it once.
	*/
3944
	if (IS_G4X(dev_priv))
3945 3946 3947 3948
		hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
	hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;

	/* Ignore TV since it's buggy */
3949
	i915_hotplug_interrupt_update_locked(dev_priv,
3950 3951 3952 3953
					     HOTPLUG_INT_EN_MASK |
					     CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
					     CRT_HOTPLUG_ACTIVATION_PERIOD_64,
					     hotplug_en);
3954 3955
}

3956
static irqreturn_t i965_irq_handler(int irq, void *arg)
3957
{
3958
	struct drm_device *dev = arg;
3959
	struct drm_i915_private *dev_priv = to_i915(dev);
3960
	irqreturn_t ret = IRQ_NONE;
3961

3962 3963 3964
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

3965 3966 3967
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
	disable_rpm_wakeref_asserts(dev_priv);

3968
	do {
3969
		u32 pipe_stats[I915_MAX_PIPES] = {};
3970 3971
		u32 hotplug_status = 0;
		u32 iir;
3972

3973 3974
		iir = I915_READ(IIR);
		if (iir == 0)
3975 3976 3977 3978
			break;

		ret = IRQ_HANDLED;

3979 3980 3981 3982 3983 3984
		if (iir & I915_DISPLAY_PORT_INTERRUPT)
			hotplug_status = i9xx_hpd_irq_ack(dev_priv);

		/* Call regardless, as some status bits might not be
		 * signalled in iir */
		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
3985

3986
		I915_WRITE(IIR, iir);
3987 3988

		if (iir & I915_USER_INTERRUPT)
3989
			notify_ring(dev_priv->engine[RCS]);
3990

3991
		if (iir & I915_BSD_USER_INTERRUPT)
3992
			notify_ring(dev_priv->engine[VCS]);
3993

3994 3995
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3996

3997 3998 3999 4000 4001
		if (hotplug_status)
			i9xx_hpd_irq_handler(dev_priv, hotplug_status);

		i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
	} while (0);
4002

4003 4004
	enable_rpm_wakeref_asserts(dev_priv);

4005 4006 4007
	return ret;
}

4008 4009 4010 4011 4012 4013 4014
/**
 * intel_irq_init - initializes irq support
 * @dev_priv: i915 device instance
 *
 * This function initializes all the irq support including work items, timers
 * and all the vtables. It does not setup the interrupt itself though.
 */
4015
void intel_irq_init(struct drm_i915_private *dev_priv)
4016
{
4017
	struct drm_device *dev = &dev_priv->drm;
4018
	struct intel_rps *rps = &dev_priv->gt_pm.rps;
4019
	int i;
4020

4021 4022
	intel_hpd_init_work(dev_priv);

4023
	INIT_WORK(&rps->work, gen6_pm_rps_work);
4024

4025
	INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4026 4027
	for (i = 0; i < MAX_L3_SLICES; ++i)
		dev_priv->l3_parity.remap_info[i] = NULL;
4028

4029
	if (HAS_GUC_SCHED(dev_priv))
4030 4031
		dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT;

4032
	/* Let's track the enabled rps events */
4033
	if (IS_VALLEYVIEW(dev_priv))
4034
		/* WaGsvRC0ResidencyMethod:vlv */
4035
		dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
4036 4037
	else
		dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4038

4039
	rps->pm_intrmsk_mbz = 0;
4040 4041

	/*
4042
	 * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer
4043 4044 4045 4046
	 * if GEN6_PM_UP_EI_EXPIRED is masked.
	 *
	 * TODO: verify if this can be reproduced on VLV,CHV.
	 */
4047
	if (INTEL_GEN(dev_priv) <= 7)
4048
		rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
4049

4050
	if (INTEL_GEN(dev_priv) >= 8)
4051
		rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
4052

4053
	if (IS_GEN2(dev_priv)) {
4054
		/* Gen2 doesn't have a hardware frame counter */
4055
		dev->max_vblank_count = 0;
4056
	} else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
4057
		dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4058
		dev->driver->get_vblank_counter = g4x_get_vblank_counter;
4059 4060 4061
	} else {
		dev->driver->get_vblank_counter = i915_get_vblank_counter;
		dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4062 4063
	}

4064 4065 4066 4067 4068
	/*
	 * Opt out of the vblank disable timer on everything except gen2.
	 * Gen2 doesn't have a hardware frame counter and so depends on
	 * vblank interrupts to produce sane vblank seuquence numbers.
	 */
4069
	if (!IS_GEN2(dev_priv))
4070 4071
		dev->vblank_disable_immediate = true;

4072 4073 4074 4075 4076 4077 4078 4079 4080 4081
	/* Most platforms treat the display irq block as an always-on
	 * power domain. vlv/chv can disable it at runtime and need
	 * special care to avoid writing any of the display block registers
	 * outside of the power domain. We defer setting up the display irqs
	 * in this case to the runtime pm.
	 */
	dev_priv->display_irqs_enabled = true;
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
		dev_priv->display_irqs_enabled = false;

L
Lyude 已提交
4082 4083
	dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;

4084
	dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos;
4085
	dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4086

4087
	if (IS_CHERRYVIEW(dev_priv)) {
4088
		dev->driver->irq_handler = cherryview_irq_handler;
4089
		dev->driver->irq_preinstall = cherryview_irq_reset;
4090
		dev->driver->irq_postinstall = cherryview_irq_postinstall;
4091
		dev->driver->irq_uninstall = cherryview_irq_reset;
4092 4093
		dev->driver->enable_vblank = i965_enable_vblank;
		dev->driver->disable_vblank = i965_disable_vblank;
4094
		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4095
	} else if (IS_VALLEYVIEW(dev_priv)) {
J
Jesse Barnes 已提交
4096
		dev->driver->irq_handler = valleyview_irq_handler;
4097
		dev->driver->irq_preinstall = valleyview_irq_reset;
J
Jesse Barnes 已提交
4098
		dev->driver->irq_postinstall = valleyview_irq_postinstall;
4099
		dev->driver->irq_uninstall = valleyview_irq_reset;
4100 4101
		dev->driver->enable_vblank = i965_enable_vblank;
		dev->driver->disable_vblank = i965_disable_vblank;
4102
		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4103
	} else if (INTEL_GEN(dev_priv) >= 8) {
4104
		dev->driver->irq_handler = gen8_irq_handler;
4105
		dev->driver->irq_preinstall = gen8_irq_reset;
4106
		dev->driver->irq_postinstall = gen8_irq_postinstall;
4107
		dev->driver->irq_uninstall = gen8_irq_reset;
4108 4109
		dev->driver->enable_vblank = gen8_enable_vblank;
		dev->driver->disable_vblank = gen8_disable_vblank;
4110
		if (IS_GEN9_LP(dev_priv))
4111
			dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
4112 4113
		else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) ||
			 HAS_PCH_CNP(dev_priv))
4114 4115
			dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
		else
4116
			dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4117
	} else if (HAS_PCH_SPLIT(dev_priv)) {
4118
		dev->driver->irq_handler = ironlake_irq_handler;
4119
		dev->driver->irq_preinstall = ironlake_irq_reset;
4120
		dev->driver->irq_postinstall = ironlake_irq_postinstall;
4121
		dev->driver->irq_uninstall = ironlake_irq_reset;
4122 4123
		dev->driver->enable_vblank = ironlake_enable_vblank;
		dev->driver->disable_vblank = ironlake_disable_vblank;
4124
		dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4125
	} else {
4126
		if (IS_GEN2(dev_priv)) {
4127
			dev->driver->irq_preinstall = i8xx_irq_reset;
C
Chris Wilson 已提交
4128 4129
			dev->driver->irq_postinstall = i8xx_irq_postinstall;
			dev->driver->irq_handler = i8xx_irq_handler;
4130
			dev->driver->irq_uninstall = i8xx_irq_reset;
4131 4132
			dev->driver->enable_vblank = i8xx_enable_vblank;
			dev->driver->disable_vblank = i8xx_disable_vblank;
4133
		} else if (IS_GEN3(dev_priv)) {
4134
			dev->driver->irq_preinstall = i915_irq_reset;
4135
			dev->driver->irq_postinstall = i915_irq_postinstall;
4136
			dev->driver->irq_uninstall = i915_irq_reset;
4137
			dev->driver->irq_handler = i915_irq_handler;
4138 4139
			dev->driver->enable_vblank = i8xx_enable_vblank;
			dev->driver->disable_vblank = i8xx_disable_vblank;
C
Chris Wilson 已提交
4140
		} else {
4141
			dev->driver->irq_preinstall = i965_irq_reset;
4142
			dev->driver->irq_postinstall = i965_irq_postinstall;
4143
			dev->driver->irq_uninstall = i965_irq_reset;
4144
			dev->driver->irq_handler = i965_irq_handler;
4145 4146
			dev->driver->enable_vblank = i965_enable_vblank;
			dev->driver->disable_vblank = i965_disable_vblank;
C
Chris Wilson 已提交
4147
		}
4148 4149
		if (I915_HAS_HOTPLUG(dev_priv))
			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4150 4151
	}
}
4152

4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166
/**
 * intel_irq_fini - deinitializes IRQ support
 * @i915: i915 device instance
 *
 * This function deinitializes all the IRQ support.
 */
void intel_irq_fini(struct drm_i915_private *i915)
{
	int i;

	for (i = 0; i < MAX_L3_SLICES; ++i)
		kfree(i915->l3_parity.remap_info[i]);
}

4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177
/**
 * intel_irq_install - enables the hardware interrupt
 * @dev_priv: i915 device instance
 *
 * This function enables the hardware interrupt handling, but leaves the hotplug
 * handling still disabled. It is called after intel_irq_init().
 *
 * In the driver load and resume code we need working interrupts in a few places
 * but don't want to deal with the hassle of concurrent probe and hotplug
 * workers. Hence the split into this two-stage approach.
 */
4178 4179 4180 4181 4182 4183 4184
int intel_irq_install(struct drm_i915_private *dev_priv)
{
	/*
	 * We enable some interrupt sources in our postinstall hooks, so mark
	 * interrupts as enabled _before_ actually enabling them to avoid
	 * special cases in our ordering checks.
	 */
4185
	dev_priv->runtime_pm.irqs_enabled = true;
4186

4187
	return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq);
4188 4189
}

4190 4191 4192 4193 4194 4195 4196
/**
 * intel_irq_uninstall - finilizes all irq handling
 * @dev_priv: i915 device instance
 *
 * This stops interrupt and hotplug handling and unregisters and frees all
 * resources acquired in the init functions.
 */
4197 4198
void intel_irq_uninstall(struct drm_i915_private *dev_priv)
{
4199
	drm_irq_uninstall(&dev_priv->drm);
4200
	intel_hpd_cancel_work(dev_priv);
4201
	dev_priv->runtime_pm.irqs_enabled = false;
4202 4203
}

4204 4205 4206 4207 4208 4209 4210
/**
 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
 * @dev_priv: i915 device instance
 *
 * This function is used to disable interrupts at runtime, both in the runtime
 * pm and the system suspend/resume code.
 */
4211
void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4212
{
4213
	dev_priv->drm.driver->irq_uninstall(&dev_priv->drm);
4214
	dev_priv->runtime_pm.irqs_enabled = false;
4215
	synchronize_irq(dev_priv->drm.irq);
4216 4217
}

4218 4219 4220 4221 4222 4223 4224
/**
 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
 * @dev_priv: i915 device instance
 *
 * This function is used to enable interrupts at runtime, both in the runtime
 * pm and the system suspend/resume code.
 */
4225
void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4226
{
4227
	dev_priv->runtime_pm.irqs_enabled = true;
4228 4229
	dev_priv->drm.driver->irq_preinstall(&dev_priv->drm);
	dev_priv->drm.driver->irq_postinstall(&dev_priv->drm);
4230
}