i915_irq.c 129.1 KB
Newer Older
D
Dave Airlie 已提交
1
/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
L
Linus Torvalds 已提交
2
 */
D
Dave Airlie 已提交
3
/*
L
Linus Torvalds 已提交
4 5
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
 * All Rights Reserved.
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
D
Dave Airlie 已提交
27
 */
L
Linus Torvalds 已提交
28

29 30
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

31
#include <linux/sysrq.h>
32
#include <linux/slab.h>
33
#include <linux/circ_buf.h>
34 35
#include <drm/drmP.h>
#include <drm/i915_drm.h>
L
Linus Torvalds 已提交
36
#include "i915_drv.h"
C
Chris Wilson 已提交
37
#include "i915_trace.h"
J
Jesse Barnes 已提交
38
#include "intel_drv.h"
L
Linus Torvalds 已提交
39

40 41 42 43 44 45 46 47
/**
 * DOC: interrupt handling
 *
 * These functions provide the basic support for enabling and disabling the
 * interrupt handling support. There's a lot more functionality in i915_irq.c
 * and related files, but that will be described in separate chapters.
 */

48 49 50 51
static const u32 hpd_ilk[HPD_NUM_PINS] = {
	[HPD_PORT_A] = DE_DP_A_HOTPLUG,
};

52 53 54 55
static const u32 hpd_ivb[HPD_NUM_PINS] = {
	[HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
};

56 57 58 59
static const u32 hpd_bdw[HPD_NUM_PINS] = {
	[HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
};

60
static const u32 hpd_ibx[HPD_NUM_PINS] = {
61 62 63 64 65 66 67
	[HPD_CRT] = SDE_CRT_HOTPLUG,
	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG
};

68
static const u32 hpd_cpt[HPD_NUM_PINS] = {
69
	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
70
	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
71 72 73 74 75
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
};

X
Xiong Zhang 已提交
76
static const u32 hpd_spt[HPD_NUM_PINS] = {
77
	[HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
X
Xiong Zhang 已提交
78 79 80 81 82 83
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
	[HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
};

84
static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
85 86 87 88 89 90 91 92
	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
};

93
static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
94 95 96 97 98 99 100 101
	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
};

102
static const u32 hpd_status_i915[HPD_NUM_PINS] = {
103 104 105 106 107 108 109 110
	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
};

111 112
/* BXT hpd list */
static const u32 hpd_bxt[HPD_NUM_PINS] = {
113
	[HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
114 115 116 117
	[HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
	[HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
};

118
/* IIR can theoretically queue up two events. Be paranoid. */
119
#define GEN8_IRQ_RESET_NDX(type, which) do { \
120 121 122 123 124 125 126 127 128
	I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
	POSTING_READ(GEN8_##type##_IMR(which)); \
	I915_WRITE(GEN8_##type##_IER(which), 0); \
	I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
	POSTING_READ(GEN8_##type##_IIR(which)); \
	I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
	POSTING_READ(GEN8_##type##_IIR(which)); \
} while (0)

129
#define GEN5_IRQ_RESET(type) do { \
P
Paulo Zanoni 已提交
130
	I915_WRITE(type##IMR, 0xffffffff); \
131
	POSTING_READ(type##IMR); \
P
Paulo Zanoni 已提交
132
	I915_WRITE(type##IER, 0); \
133 134 135 136
	I915_WRITE(type##IIR, 0xffffffff); \
	POSTING_READ(type##IIR); \
	I915_WRITE(type##IIR, 0xffffffff); \
	POSTING_READ(type##IIR); \
P
Paulo Zanoni 已提交
137 138
} while (0)

139 140 141 142 143 144
static void
intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, unsigned pipe)
{
	DRM_DEBUG_KMS("Finished page flip\n");
}

145 146 147
/*
 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
 */
148 149
static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv,
				    i915_reg_t reg)
150 151 152 153 154 155 156
{
	u32 val = I915_READ(reg);

	if (val == 0)
		return;

	WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
157
	     i915_mmio_reg_offset(reg), val);
158 159 160 161 162
	I915_WRITE(reg, 0xffffffff);
	POSTING_READ(reg);
	I915_WRITE(reg, 0xffffffff);
	POSTING_READ(reg);
}
163

P
Paulo Zanoni 已提交
164
#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
165
	gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
P
Paulo Zanoni 已提交
166
	I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
167 168
	I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
	POSTING_READ(GEN8_##type##_IMR(which)); \
P
Paulo Zanoni 已提交
169 170 171
} while (0)

#define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
172
	gen5_assert_iir_is_zero(dev_priv, type##IIR); \
P
Paulo Zanoni 已提交
173
	I915_WRITE(type##IER, (ier_val)); \
174 175
	I915_WRITE(type##IMR, (imr_val)); \
	POSTING_READ(type##IMR); \
P
Paulo Zanoni 已提交
176 177
} while (0)

178 179
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);

180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217
/* For display hotplug interrupt */
static inline void
i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
				     uint32_t mask,
				     uint32_t bits)
{
	uint32_t val;

	assert_spin_locked(&dev_priv->irq_lock);
	WARN_ON(bits & ~mask);

	val = I915_READ(PORT_HOTPLUG_EN);
	val &= ~mask;
	val |= bits;
	I915_WRITE(PORT_HOTPLUG_EN, val);
}

/**
 * i915_hotplug_interrupt_update - update hotplug interrupt enable
 * @dev_priv: driver private
 * @mask: bits to update
 * @bits: bits to enable
 * NOTE: the HPD enable bits are modified both inside and outside
 * of an interrupt context. To avoid that read-modify-write cycles
 * interfer, these bits are protected by a spinlock. Since this
 * function is usually not called from a context where the lock is
 * held already, this function acquires the lock itself. A non-locking
 * version is also available.
 */
void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
				   uint32_t mask,
				   uint32_t bits)
{
	spin_lock_irq(&dev_priv->irq_lock);
	i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
	spin_unlock_irq(&dev_priv->irq_lock);
}

218 219 220 221 222 223
/**
 * ilk_update_display_irq - update DEIMR
 * @dev_priv: driver private
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
224 225 226
void ilk_update_display_irq(struct drm_i915_private *dev_priv,
			    uint32_t interrupt_mask,
			    uint32_t enabled_irq_mask)
227
{
228 229
	uint32_t new_val;

230 231
	assert_spin_locked(&dev_priv->irq_lock);

232 233
	WARN_ON(enabled_irq_mask & ~interrupt_mask);

234
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
235 236
		return;

237 238 239 240 241 242
	new_val = dev_priv->irq_mask;
	new_val &= ~interrupt_mask;
	new_val |= (~enabled_irq_mask & interrupt_mask);

	if (new_val != dev_priv->irq_mask) {
		dev_priv->irq_mask = new_val;
243
		I915_WRITE(DEIMR, dev_priv->irq_mask);
244
		POSTING_READ(DEIMR);
245 246 247
	}
}

P
Paulo Zanoni 已提交
248 249 250 251 252 253 254 255 256 257 258 259
/**
 * ilk_update_gt_irq - update GTIMR
 * @dev_priv: driver private
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
			      uint32_t interrupt_mask,
			      uint32_t enabled_irq_mask)
{
	assert_spin_locked(&dev_priv->irq_lock);

260 261
	WARN_ON(enabled_irq_mask & ~interrupt_mask);

262
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
263 264
		return;

P
Paulo Zanoni 已提交
265 266 267 268 269 270
	dev_priv->gt_irq_mask &= ~interrupt_mask;
	dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
	POSTING_READ(GTIMR);
}

271
void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
P
Paulo Zanoni 已提交
272 273 274 275
{
	ilk_update_gt_irq(dev_priv, mask, mask);
}

276
void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
P
Paulo Zanoni 已提交
277 278 279 280
{
	ilk_update_gt_irq(dev_priv, mask, 0);
}

281
static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
282 283 284 285
{
	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
}

286
static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv)
287 288 289 290
{
	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
}

291
static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
292 293 294 295
{
	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
}

P
Paulo Zanoni 已提交
296
/**
297 298 299 300 301
 * snb_update_pm_irq - update GEN6_PMIMR
 * @dev_priv: driver private
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
P
Paulo Zanoni 已提交
302 303 304 305
static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
			      uint32_t interrupt_mask,
			      uint32_t enabled_irq_mask)
{
306
	uint32_t new_val;
P
Paulo Zanoni 已提交
307

308 309
	WARN_ON(enabled_irq_mask & ~interrupt_mask);

P
Paulo Zanoni 已提交
310 311
	assert_spin_locked(&dev_priv->irq_lock);

312
	new_val = dev_priv->pm_irq_mask;
313 314 315
	new_val &= ~interrupt_mask;
	new_val |= (~enabled_irq_mask & interrupt_mask);

316 317
	if (new_val != dev_priv->pm_irq_mask) {
		dev_priv->pm_irq_mask = new_val;
318 319
		I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
		POSTING_READ(gen6_pm_imr(dev_priv));
320
	}
P
Paulo Zanoni 已提交
321 322
}

323
void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
P
Paulo Zanoni 已提交
324
{
325 326 327
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
		return;

P
Paulo Zanoni 已提交
328 329 330
	snb_update_pm_irq(dev_priv, mask, mask);
}

331 332
static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
				  uint32_t mask)
P
Paulo Zanoni 已提交
333 334 335 336
{
	snb_update_pm_irq(dev_priv, mask, 0);
}

337 338 339 340 341 342 343 344
void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
{
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
		return;

	__gen6_disable_pm_irq(dev_priv, mask);
}

345
void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
I
Imre Deak 已提交
346
{
347
	i915_reg_t reg = gen6_pm_iir(dev_priv);
I
Imre Deak 已提交
348 349 350 351 352

	spin_lock_irq(&dev_priv->irq_lock);
	I915_WRITE(reg, dev_priv->pm_rps_events);
	I915_WRITE(reg, dev_priv->pm_rps_events);
	POSTING_READ(reg);
353
	dev_priv->rps.pm_iir = 0;
I
Imre Deak 已提交
354 355 356
	spin_unlock_irq(&dev_priv->irq_lock);
}

357
void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
358 359
{
	spin_lock_irq(&dev_priv->irq_lock);
360

361
	WARN_ON(dev_priv->rps.pm_iir);
I
Imre Deak 已提交
362
	WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
I
Imre Deak 已提交
363
	dev_priv->rps.interrupts_enabled = true;
364 365
	I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
				dev_priv->pm_rps_events);
366
	gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
367

368 369 370
	spin_unlock_irq(&dev_priv->irq_lock);
}

371 372 373
u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
{
	/*
374
	 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
375
	 * if GEN6_PM_UP_EI_EXPIRED is masked.
376 377
	 *
	 * TODO: verify if this can be reproduced on VLV,CHV.
378 379 380 381 382 383 384 385 386 387
	 */
	if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
		mask &= ~GEN6_PM_RP_UP_EI_EXPIRED;

	if (INTEL_INFO(dev_priv)->gen >= 8)
		mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;

	return mask;
}

388
void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
389
{
I
Imre Deak 已提交
390 391 392 393 394 395
	spin_lock_irq(&dev_priv->irq_lock);
	dev_priv->rps.interrupts_enabled = false;
	spin_unlock_irq(&dev_priv->irq_lock);

	cancel_work_sync(&dev_priv->rps.work);

396 397
	spin_lock_irq(&dev_priv->irq_lock);

398
	I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
399 400

	__gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
401 402
	I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
				~dev_priv->pm_rps_events);
403 404 405

	spin_unlock_irq(&dev_priv->irq_lock);

406
	synchronize_irq(dev_priv->dev->irq);
407 408
}

409
/**
410 411 412 413 414
 * bdw_update_port_irq - update DE port interrupt
 * @dev_priv: driver private
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440
static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
				uint32_t interrupt_mask,
				uint32_t enabled_irq_mask)
{
	uint32_t new_val;
	uint32_t old_val;

	assert_spin_locked(&dev_priv->irq_lock);

	WARN_ON(enabled_irq_mask & ~interrupt_mask);

	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
		return;

	old_val = I915_READ(GEN8_DE_PORT_IMR);

	new_val = old_val;
	new_val &= ~interrupt_mask;
	new_val |= (~enabled_irq_mask & interrupt_mask);

	if (new_val != old_val) {
		I915_WRITE(GEN8_DE_PORT_IMR, new_val);
		POSTING_READ(GEN8_DE_PORT_IMR);
	}
}

441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472
/**
 * bdw_update_pipe_irq - update DE pipe interrupt
 * @dev_priv: driver private
 * @pipe: pipe whose interrupt to update
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
			 enum pipe pipe,
			 uint32_t interrupt_mask,
			 uint32_t enabled_irq_mask)
{
	uint32_t new_val;

	assert_spin_locked(&dev_priv->irq_lock);

	WARN_ON(enabled_irq_mask & ~interrupt_mask);

	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
		return;

	new_val = dev_priv->de_irq_mask[pipe];
	new_val &= ~interrupt_mask;
	new_val |= (~enabled_irq_mask & interrupt_mask);

	if (new_val != dev_priv->de_irq_mask[pipe]) {
		dev_priv->de_irq_mask[pipe] = new_val;
		I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
		POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
	}
}

473 474 475 476 477 478
/**
 * ibx_display_interrupt_update - update SDEIMR
 * @dev_priv: driver private
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
479 480 481
void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
				  uint32_t interrupt_mask,
				  uint32_t enabled_irq_mask)
482 483 484 485 486
{
	uint32_t sdeimr = I915_READ(SDEIMR);
	sdeimr &= ~interrupt_mask;
	sdeimr |= (~enabled_irq_mask & interrupt_mask);

487 488
	WARN_ON(enabled_irq_mask & ~interrupt_mask);

489 490
	assert_spin_locked(&dev_priv->irq_lock);

491
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
492 493
		return;

494 495 496
	I915_WRITE(SDEIMR, sdeimr);
	POSTING_READ(SDEIMR);
}
497

D
Daniel Vetter 已提交
498
static void
499 500
__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
		       u32 enable_mask, u32 status_mask)
501
{
502
	i915_reg_t reg = PIPESTAT(pipe);
503
	u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
504

505
	assert_spin_locked(&dev_priv->irq_lock);
506
	WARN_ON(!intel_irqs_enabled(dev_priv));
507

508 509 510 511
	if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
		      pipe_name(pipe), enable_mask, status_mask))
512 513 514
		return;

	if ((pipestat & enable_mask) == enable_mask)
515 516
		return;

517 518
	dev_priv->pipestat_irq_mask[pipe] |= status_mask;

519
	/* Enable the interrupt, clear any pending status */
520
	pipestat |= enable_mask | status_mask;
521 522
	I915_WRITE(reg, pipestat);
	POSTING_READ(reg);
523 524
}

D
Daniel Vetter 已提交
525
static void
526 527
__i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
		        u32 enable_mask, u32 status_mask)
528
{
529
	i915_reg_t reg = PIPESTAT(pipe);
530
	u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
531

532
	assert_spin_locked(&dev_priv->irq_lock);
533
	WARN_ON(!intel_irqs_enabled(dev_priv));
534

535 536 537 538
	if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
		      pipe_name(pipe), enable_mask, status_mask))
539 540
		return;

541 542 543
	if ((pipestat & enable_mask) == 0)
		return;

544 545
	dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;

546
	pipestat &= ~enable_mask;
547 548
	I915_WRITE(reg, pipestat);
	POSTING_READ(reg);
549 550
}

551 552 553 554 555
static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
{
	u32 enable_mask = status_mask << 16;

	/*
556 557
	 * On pipe A we don't support the PSR interrupt yet,
	 * on pipe B and C the same bit MBZ.
558 559 560
	 */
	if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
		return 0;
561 562 563 564 565 566
	/*
	 * On pipe B and C we don't support the PSR interrupt yet, on pipe
	 * A the same bit is for perf counters which we don't use either.
	 */
	if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
		return 0;
567 568 569 570 571 572 573 574 575 576 577 578

	enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
			 SPRITE0_FLIP_DONE_INT_EN_VLV |
			 SPRITE1_FLIP_DONE_INT_EN_VLV);
	if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
		enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
	if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
		enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;

	return enable_mask;
}

579 580 581 582 583 584
void
i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
		     u32 status_mask)
{
	u32 enable_mask;

585
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
586 587 588 589
		enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
							   status_mask);
	else
		enable_mask = status_mask << 16;
590 591 592 593 594 595 596 597 598
	__i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
}

void
i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
		      u32 status_mask)
{
	u32 enable_mask;

599
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
600 601 602 603
		enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
							   status_mask);
	else
		enable_mask = status_mask << 16;
604 605 606
	__i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
}

607
/**
608
 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
609
 * @dev: drm device
610
 */
611
static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
612
{
613
	if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv))
614 615
		return;

616
	spin_lock_irq(&dev_priv->irq_lock);
617

618
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
619
	if (INTEL_GEN(dev_priv) >= 4)
620
		i915_enable_pipestat(dev_priv, PIPE_A,
621
				     PIPE_LEGACY_BLC_EVENT_STATUS);
622

623
	spin_unlock_irq(&dev_priv->irq_lock);
624 625
}

626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675
/*
 * This timing diagram depicts the video signal in and
 * around the vertical blanking period.
 *
 * Assumptions about the fictitious mode used in this example:
 *  vblank_start >= 3
 *  vsync_start = vblank_start + 1
 *  vsync_end = vblank_start + 2
 *  vtotal = vblank_start + 3
 *
 *           start of vblank:
 *           latch double buffered registers
 *           increment frame counter (ctg+)
 *           generate start of vblank interrupt (gen4+)
 *           |
 *           |          frame start:
 *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
 *           |          may be shifted forward 1-3 extra lines via PIPECONF
 *           |          |
 *           |          |  start of vsync:
 *           |          |  generate vsync interrupt
 *           |          |  |
 * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
 *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
 * ----va---> <-----------------vb--------------------> <--------va-------------
 *       |          |       <----vs----->                     |
 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
 *       |          |                                         |
 *       last visible pixel                                   first visible pixel
 *                  |                                         increment frame counter (gen3/4)
 *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
 *
 * x  = horizontal active
 * _  = horizontal blanking
 * hs = horizontal sync
 * va = vertical active
 * vb = vertical blanking
 * vs = vertical sync
 * vbs = vblank_start (number)
 *
 * Summary:
 * - most events happen at the start of horizontal sync
 * - frame start happens at the start of horizontal blank, 1-4 lines
 *   (depending on PIPECONF settings) after the start of vblank
 * - gen3/4 pixel and frame counter are synchronized with the start
 *   of horizontal active on the first line of vertical active
 */

676
static u32 i8xx_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
677 678 679 680 681
{
	/* Gen2 doesn't have a hardware frame counter */
	return 0;
}

682 683 684
/* Called from drm generic code, passed a 'crtc', which
 * we use as a pipe index
 */
685
static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
686
{
687
	struct drm_i915_private *dev_priv = dev->dev_private;
688
	i915_reg_t high_frame, low_frame;
689
	u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
690 691
	struct intel_crtc *intel_crtc =
		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
692
	const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
693

694 695 696 697 698
	htotal = mode->crtc_htotal;
	hsync_start = mode->crtc_hsync_start;
	vbl_start = mode->crtc_vblank_start;
	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
		vbl_start = DIV_ROUND_UP(vbl_start, 2);
699

700 701 702 703 704 705
	/* Convert to pixel count */
	vbl_start *= htotal;

	/* Start of vblank event occurs at start of hsync */
	vbl_start -= htotal - hsync_start;

706 707
	high_frame = PIPEFRAME(pipe);
	low_frame = PIPEFRAMEPIXEL(pipe);
708

709 710 711 712 713 714
	/*
	 * High & low register fields aren't synchronized, so make sure
	 * we get a low value that's stable across two reads of the high
	 * register.
	 */
	do {
715
		high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
716
		low   = I915_READ(low_frame);
717
		high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
718 719
	} while (high1 != high2);

720
	high1 >>= PIPE_FRAME_HIGH_SHIFT;
721
	pixel = low & PIPE_PIXEL_MASK;
722
	low >>= PIPE_FRAME_LOW_SHIFT;
723 724 725 726 727 728

	/*
	 * The frame counter increments at beginning of active.
	 * Cook up a vblank counter by also checking the pixel
	 * counter against vblank start.
	 */
729
	return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
730 731
}

732
static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
733
{
734
	struct drm_i915_private *dev_priv = dev->dev_private;
735

736
	return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
737 738
}

739
/* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
740 741 742 743
static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
{
	struct drm_device *dev = crtc->base.dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
744
	const struct drm_display_mode *mode = &crtc->base.hwmode;
745
	enum pipe pipe = crtc->pipe;
746
	int position, vtotal;
747

748
	vtotal = mode->crtc_vtotal;
749 750 751
	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
		vtotal /= 2;

752
	if (IS_GEN2(dev_priv))
753
		position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
754
	else
755
		position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
756

757 758 759 760 761 762 763 764 765 766 767 768
	/*
	 * On HSW, the DSL reg (0x70000) appears to return 0 if we
	 * read it just before the start of vblank.  So try it again
	 * so we don't accidentally end up spanning a vblank frame
	 * increment, causing the pipe_update_end() code to squak at us.
	 *
	 * The nature of this problem means we can't simply check the ISR
	 * bit and return the vblank start value; nor can we use the scanline
	 * debug register in the transcoder as it appears to have the same
	 * problem.  We may need to extend this to include other platforms,
	 * but so far testing only shows the problem on HSW.
	 */
769
	if (HAS_DDI(dev_priv) && !position) {
770 771 772 773 774 775 776 777 778 779 780 781 782
		int i, temp;

		for (i = 0; i < 100; i++) {
			udelay(1);
			temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) &
				DSL_LINEMASK_GEN3;
			if (temp != position) {
				position = temp;
				break;
			}
		}
	}

783
	/*
784 785
	 * See update_scanline_offset() for the details on the
	 * scanline_offset adjustment.
786
	 */
787
	return (position + crtc->scanline_offset) % vtotal;
788 789
}

790
static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
791
				    unsigned int flags, int *vpos, int *hpos,
792 793
				    ktime_t *stime, ktime_t *etime,
				    const struct drm_display_mode *mode)
794
{
795 796 797
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
798
	int position;
799
	int vbl_start, vbl_end, hsync_start, htotal, vtotal;
800 801
	bool in_vbl = true;
	int ret = 0;
802
	unsigned long irqflags;
803

804
	if (WARN_ON(!mode->crtc_clock)) {
805
		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
806
				 "pipe %c\n", pipe_name(pipe));
807 808 809
		return 0;
	}

810
	htotal = mode->crtc_htotal;
811
	hsync_start = mode->crtc_hsync_start;
812 813 814
	vtotal = mode->crtc_vtotal;
	vbl_start = mode->crtc_vblank_start;
	vbl_end = mode->crtc_vblank_end;
815

816 817 818 819 820 821
	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
		vbl_start = DIV_ROUND_UP(vbl_start, 2);
		vbl_end /= 2;
		vtotal /= 2;
	}

822 823
	ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;

824 825 826 827 828 829
	/*
	 * Lock uncore.lock, as we will do multiple timing critical raw
	 * register reads, potentially with preemption disabled, so the
	 * following code must not block on uncore.lock.
	 */
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
830

831 832 833 834 835 836
	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */

	/* Get optional system timestamp before query. */
	if (stime)
		*stime = ktime_get();

837
	if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
838 839 840
		/* No obvious pixelcount register. Only query vertical
		 * scanout position from Display scan line register.
		 */
841
		position = __intel_get_crtc_scanline(intel_crtc);
842 843 844 845 846
	} else {
		/* Have access to pixelcount since start of frame.
		 * We can split this into vertical and horizontal
		 * scanout position.
		 */
847
		position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
848

849 850 851 852
		/* convert to pixel counts */
		vbl_start *= htotal;
		vbl_end *= htotal;
		vtotal *= htotal;
853

854 855 856 857 858 859 860 861 862 863 864 865
		/*
		 * In interlaced modes, the pixel counter counts all pixels,
		 * so one field will have htotal more pixels. In order to avoid
		 * the reported position from jumping backwards when the pixel
		 * counter is beyond the length of the shorter field, just
		 * clamp the position the length of the shorter field. This
		 * matches how the scanline counter based position works since
		 * the scanline counter doesn't count the two half lines.
		 */
		if (position >= vtotal)
			position = vtotal - 1;

866 867 868 869 870 871 872 873 874 875
		/*
		 * Start of vblank interrupt is triggered at start of hsync,
		 * just prior to the first active line of vblank. However we
		 * consider lines to start at the leading edge of horizontal
		 * active. So, should we get here before we've crossed into
		 * the horizontal active of the first line in vblank, we would
		 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
		 * always add htotal-hsync_start to the current pixel position.
		 */
		position = (position + htotal - hsync_start) % vtotal;
876 877
	}

878 879 880 881 882 883 884 885
	/* Get optional system timestamp after query. */
	if (etime)
		*etime = ktime_get();

	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */

	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);

886 887 888 889 890 891 892 893 894 895 896 897
	in_vbl = position >= vbl_start && position < vbl_end;

	/*
	 * While in vblank, position will be negative
	 * counting up towards 0 at vbl_end. And outside
	 * vblank, position will be positive counting
	 * up since vbl_end.
	 */
	if (position >= vbl_start)
		position -= vbl_end;
	else
		position += vtotal - vbl_end;
898

899
	if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
900 901 902 903 904 905
		*vpos = position;
		*hpos = 0;
	} else {
		*vpos = position / htotal;
		*hpos = position - (*vpos * htotal);
	}
906 907 908

	/* In vblank? */
	if (in_vbl)
909
		ret |= DRM_SCANOUTPOS_IN_VBLANK;
910 911 912 913

	return ret;
}

914 915 916 917 918 919 920 921 922 923 924 925 926
int intel_get_crtc_scanline(struct intel_crtc *crtc)
{
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
	unsigned long irqflags;
	int position;

	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
	position = __intel_get_crtc_scanline(crtc);
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);

	return position;
}

927
static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
928 929 930 931
			      int *max_error,
			      struct timeval *vblank_time,
			      unsigned flags)
{
932
	struct drm_crtc *crtc;
933

934 935
	if (pipe >= INTEL_INFO(dev)->num_pipes) {
		DRM_ERROR("Invalid crtc %u\n", pipe);
936 937 938 939
		return -EINVAL;
	}

	/* Get drm_crtc to timestamp: */
940 941
	crtc = intel_get_crtc_for_pipe(dev, pipe);
	if (crtc == NULL) {
942
		DRM_ERROR("Invalid crtc %u\n", pipe);
943 944 945
		return -EINVAL;
	}

946
	if (!crtc->hwmode.crtc_clock) {
947
		DRM_DEBUG_KMS("crtc %u is disabled\n", pipe);
948 949
		return -EBUSY;
	}
950 951

	/* Helper routine in DRM core does all the work: */
952 953
	return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
						     vblank_time, flags,
954
						     &crtc->hwmode);
955 956
}

957
static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
958
{
959
	u32 busy_up, busy_down, max_avg, min_avg;
960 961
	u8 new_delay;

962
	spin_lock(&mchdev_lock);
963

964 965
	I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));

966
	new_delay = dev_priv->ips.cur_delay;
967

968
	I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
969 970
	busy_up = I915_READ(RCPREVBSYTUPAVG);
	busy_down = I915_READ(RCPREVBSYTDNAVG);
971 972 973 974
	max_avg = I915_READ(RCBMAXAVG);
	min_avg = I915_READ(RCBMINAVG);

	/* Handle RCS change request from hw */
975
	if (busy_up > max_avg) {
976 977 978 979
		if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
			new_delay = dev_priv->ips.cur_delay - 1;
		if (new_delay < dev_priv->ips.max_delay)
			new_delay = dev_priv->ips.max_delay;
980
	} else if (busy_down < min_avg) {
981 982 983 984
		if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
			new_delay = dev_priv->ips.cur_delay + 1;
		if (new_delay > dev_priv->ips.min_delay)
			new_delay = dev_priv->ips.min_delay;
985 986
	}

987
	if (ironlake_set_drps(dev_priv, new_delay))
988
		dev_priv->ips.cur_delay = new_delay;
989

990
	spin_unlock(&mchdev_lock);
991

992 993 994
	return;
}

995
static void notify_ring(struct intel_engine_cs *engine)
996
{
997
	if (!intel_engine_initialized(engine))
998 999
		return;

1000
	trace_i915_gem_request_notify(engine);
1001
	engine->user_interrupts++;
1002

1003
	wake_up_all(&engine->irq_queue);
1004 1005
}

1006 1007
static void vlv_c0_read(struct drm_i915_private *dev_priv,
			struct intel_rps_ei *ei)
1008
{
1009 1010 1011 1012
	ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
	ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
	ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
}
1013

1014 1015 1016 1017 1018 1019
static bool vlv_c0_above(struct drm_i915_private *dev_priv,
			 const struct intel_rps_ei *old,
			 const struct intel_rps_ei *now,
			 int threshold)
{
	u64 time, c0;
1020
	unsigned int mul = 100;
1021

1022 1023
	if (old->cz_clock == 0)
		return false;
1024

1025 1026 1027
	if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
		mul <<= 8;

1028
	time = now->cz_clock - old->cz_clock;
1029
	time *= threshold * dev_priv->czclk_freq;
1030

1031 1032 1033
	/* Workload can be split between render + media, e.g. SwapBuffers
	 * being blitted in X after being rendered in mesa. To account for
	 * this we need to combine both engines into our activity counter.
1034
	 */
1035 1036
	c0 = now->render_c0 - old->render_c0;
	c0 += now->media_c0 - old->media_c0;
1037
	c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC;
1038

1039
	return c0 >= time;
1040 1041
}

1042
void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
1043
{
1044 1045 1046
	vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
	dev_priv->rps.up_ei = dev_priv->rps.down_ei;
}
1047

1048 1049 1050 1051
static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
{
	struct intel_rps_ei now;
	u32 events = 0;
1052

1053
	if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
1054
		return 0;
1055

1056 1057 1058
	vlv_c0_read(dev_priv, &now);
	if (now.cz_clock == 0)
		return 0;
1059

1060 1061 1062
	if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
		if (!vlv_c0_above(dev_priv,
				  &dev_priv->rps.down_ei, &now,
1063
				  dev_priv->rps.down_threshold))
1064 1065 1066
			events |= GEN6_PM_RP_DOWN_THRESHOLD;
		dev_priv->rps.down_ei = now;
	}
1067

1068 1069 1070
	if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
		if (vlv_c0_above(dev_priv,
				 &dev_priv->rps.up_ei, &now,
1071
				 dev_priv->rps.up_threshold))
1072 1073
			events |= GEN6_PM_RP_UP_THRESHOLD;
		dev_priv->rps.up_ei = now;
1074 1075
	}

1076
	return events;
1077 1078
}

1079 1080
static bool any_waiters(struct drm_i915_private *dev_priv)
{
1081
	struct intel_engine_cs *engine;
1082

1083
	for_each_engine(engine, dev_priv)
1084
		if (engine->irq_refcount)
1085 1086 1087 1088 1089
			return true;

	return false;
}

1090
static void gen6_pm_rps_work(struct work_struct *work)
1091
{
1092 1093
	struct drm_i915_private *dev_priv =
		container_of(work, struct drm_i915_private, rps.work);
1094 1095
	bool client_boost;
	int new_delay, adj, min, max;
P
Paulo Zanoni 已提交
1096
	u32 pm_iir;
1097

1098
	spin_lock_irq(&dev_priv->irq_lock);
I
Imre Deak 已提交
1099 1100 1101 1102 1103
	/* Speed up work cancelation during disabling rps interrupts. */
	if (!dev_priv->rps.interrupts_enabled) {
		spin_unlock_irq(&dev_priv->irq_lock);
		return;
	}
1104 1105 1106 1107 1108 1109 1110 1111

	/*
	 * The RPS work is synced during runtime suspend, we don't require a
	 * wakeref. TODO: instead of disabling the asserts make sure that we
	 * always hold an RPM reference while the work is running.
	 */
	DISABLE_RPM_WAKEREF_ASSERTS(dev_priv);

1112 1113
	pm_iir = dev_priv->rps.pm_iir;
	dev_priv->rps.pm_iir = 0;
1114 1115
	/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
	gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1116 1117
	client_boost = dev_priv->rps.client_boost;
	dev_priv->rps.client_boost = false;
1118
	spin_unlock_irq(&dev_priv->irq_lock);
1119

1120
	/* Make sure we didn't queue anything we're not going to process. */
1121
	WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1122

1123
	if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
1124
		goto out;
1125

1126
	mutex_lock(&dev_priv->rps.hw_lock);
1127

1128 1129
	pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);

1130
	adj = dev_priv->rps.last_adj;
1131
	new_delay = dev_priv->rps.cur_freq;
1132 1133 1134 1135 1136 1137 1138
	min = dev_priv->rps.min_freq_softlimit;
	max = dev_priv->rps.max_freq_softlimit;

	if (client_boost) {
		new_delay = dev_priv->rps.max_freq_softlimit;
		adj = 0;
	} else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1139 1140
		if (adj > 0)
			adj *= 2;
1141 1142
		else /* CHV needs even encode values */
			adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
1143 1144 1145 1146
		/*
		 * For better performance, jump directly
		 * to RPe if we're below it.
		 */
1147
		if (new_delay < dev_priv->rps.efficient_freq - adj) {
1148
			new_delay = dev_priv->rps.efficient_freq;
1149 1150
			adj = 0;
		}
1151 1152
	} else if (any_waiters(dev_priv)) {
		adj = 0;
1153
	} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1154 1155
		if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
			new_delay = dev_priv->rps.efficient_freq;
1156
		else
1157
			new_delay = dev_priv->rps.min_freq_softlimit;
1158 1159 1160 1161
		adj = 0;
	} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
		if (adj < 0)
			adj *= 2;
1162 1163
		else /* CHV needs even encode values */
			adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
1164
	} else { /* unknown event */
1165
		adj = 0;
1166
	}
1167

1168 1169
	dev_priv->rps.last_adj = adj;

1170 1171 1172
	/* sysfs frequency interfaces may have snuck in while servicing the
	 * interrupt
	 */
1173
	new_delay += adj;
1174
	new_delay = clamp_t(int, new_delay, min, max);
1175

1176
	intel_set_rps(dev_priv, new_delay);
1177

1178
	mutex_unlock(&dev_priv->rps.hw_lock);
1179 1180
out:
	ENABLE_RPM_WAKEREF_ASSERTS(dev_priv);
1181 1182
}

1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194

/**
 * ivybridge_parity_work - Workqueue called when a parity error interrupt
 * occurred.
 * @work: workqueue struct
 *
 * Doesn't actually do anything except notify userspace. As a consequence of
 * this event, userspace should try to remap the bad rows since statistically
 * it is likely the same row is more likely to go bad again.
 */
static void ivybridge_parity_work(struct work_struct *work)
{
1195 1196
	struct drm_i915_private *dev_priv =
		container_of(work, struct drm_i915_private, l3_parity.error_work);
1197
	u32 error_status, row, bank, subbank;
1198
	char *parity_event[6];
1199
	uint32_t misccpctl;
1200
	uint8_t slice = 0;
1201 1202 1203 1204 1205 1206 1207

	/* We must turn off DOP level clock gating to access the L3 registers.
	 * In order to prevent a get/put style interface, acquire struct mutex
	 * any time we access those registers.
	 */
	mutex_lock(&dev_priv->dev->struct_mutex);

1208 1209 1210 1211
	/* If we've screwed up tracking, just let the interrupt fire again */
	if (WARN_ON(!dev_priv->l3_parity.which_slice))
		goto out;

1212 1213 1214 1215
	misccpctl = I915_READ(GEN7_MISCCPCTL);
	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
	POSTING_READ(GEN7_MISCCPCTL);

1216
	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1217
		i915_reg_t reg;
1218

1219
		slice--;
1220
		if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv)))
1221
			break;
1222

1223
		dev_priv->l3_parity.which_slice &= ~(1<<slice);
1224

1225
		reg = GEN7_L3CDERRST1(slice);
1226

1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241
		error_status = I915_READ(reg);
		row = GEN7_PARITY_ERROR_ROW(error_status);
		bank = GEN7_PARITY_ERROR_BANK(error_status);
		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);

		I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
		POSTING_READ(reg);

		parity_event[0] = I915_L3_PARITY_UEVENT "=1";
		parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
		parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
		parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
		parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
		parity_event[5] = NULL;

1242
		kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
1243
				   KOBJ_CHANGE, parity_event);
1244

1245 1246
		DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
			  slice, row, bank, subbank);
1247

1248 1249 1250 1251 1252
		kfree(parity_event[4]);
		kfree(parity_event[3]);
		kfree(parity_event[2]);
		kfree(parity_event[1]);
	}
1253

1254
	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1255

1256 1257
out:
	WARN_ON(dev_priv->l3_parity.which_slice);
1258
	spin_lock_irq(&dev_priv->irq_lock);
1259
	gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1260
	spin_unlock_irq(&dev_priv->irq_lock);
1261 1262

	mutex_unlock(&dev_priv->dev->struct_mutex);
1263 1264
}

1265 1266
static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv,
					       u32 iir)
1267
{
1268
	if (!HAS_L3_DPF(dev_priv))
1269 1270
		return;

1271
	spin_lock(&dev_priv->irq_lock);
1272
	gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1273
	spin_unlock(&dev_priv->irq_lock);
1274

1275
	iir &= GT_PARITY_ERROR(dev_priv);
1276 1277 1278 1279 1280 1281
	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
		dev_priv->l3_parity.which_slice |= 1 << 1;

	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
		dev_priv->l3_parity.which_slice |= 1 << 0;

1282
	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1283 1284
}

1285
static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
1286 1287 1288 1289
			       u32 gt_iir)
{
	if (gt_iir &
	    (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1290
		notify_ring(&dev_priv->engine[RCS]);
1291
	if (gt_iir & ILK_BSD_USER_INTERRUPT)
1292
		notify_ring(&dev_priv->engine[VCS]);
1293 1294
}

1295
static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
1296 1297 1298
			       u32 gt_iir)
{

1299 1300
	if (gt_iir &
	    (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1301
		notify_ring(&dev_priv->engine[RCS]);
1302
	if (gt_iir & GT_BSD_USER_INTERRUPT)
1303
		notify_ring(&dev_priv->engine[VCS]);
1304
	if (gt_iir & GT_BLT_USER_INTERRUPT)
1305
		notify_ring(&dev_priv->engine[BCS]);
1306

1307 1308
	if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
		      GT_BSD_CS_ERROR_INTERRUPT |
1309 1310
		      GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
		DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
1311

1312 1313
	if (gt_iir & GT_PARITY_ERROR(dev_priv))
		ivybridge_parity_error_irq_handler(dev_priv, gt_iir);
1314 1315
}

1316
static __always_inline void
1317
gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
1318 1319
{
	if (iir & (GT_RENDER_USER_INTERRUPT << test_shift))
1320
		notify_ring(engine);
1321
	if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift))
1322
		tasklet_schedule(&engine->irq_tasklet);
1323 1324
}

1325 1326 1327
static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv,
				   u32 master_ctl,
				   u32 gt_iir[4])
1328 1329 1330 1331
{
	irqreturn_t ret = IRQ_NONE;

	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1332 1333 1334
		gt_iir[0] = I915_READ_FW(GEN8_GT_IIR(0));
		if (gt_iir[0]) {
			I915_WRITE_FW(GEN8_GT_IIR(0), gt_iir[0]);
1335 1336 1337 1338 1339
			ret = IRQ_HANDLED;
		} else
			DRM_ERROR("The master control interrupt lied (GT0)!\n");
	}

1340
	if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1341 1342 1343
		gt_iir[1] = I915_READ_FW(GEN8_GT_IIR(1));
		if (gt_iir[1]) {
			I915_WRITE_FW(GEN8_GT_IIR(1), gt_iir[1]);
1344
			ret = IRQ_HANDLED;
1345
		} else
1346
			DRM_ERROR("The master control interrupt lied (GT1)!\n");
1347 1348
	}

1349
	if (master_ctl & GEN8_GT_VECS_IRQ) {
1350 1351 1352
		gt_iir[3] = I915_READ_FW(GEN8_GT_IIR(3));
		if (gt_iir[3]) {
			I915_WRITE_FW(GEN8_GT_IIR(3), gt_iir[3]);
1353 1354 1355 1356 1357
			ret = IRQ_HANDLED;
		} else
			DRM_ERROR("The master control interrupt lied (GT3)!\n");
	}

1358
	if (master_ctl & GEN8_GT_PM_IRQ) {
1359 1360
		gt_iir[2] = I915_READ_FW(GEN8_GT_IIR(2));
		if (gt_iir[2] & dev_priv->pm_rps_events) {
1361
			I915_WRITE_FW(GEN8_GT_IIR(2),
1362
				      gt_iir[2] & dev_priv->pm_rps_events);
1363
			ret = IRQ_HANDLED;
1364 1365 1366 1367
		} else
			DRM_ERROR("The master control interrupt lied (PM)!\n");
	}

1368 1369 1370
	return ret;
}

1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395
static void gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
				u32 gt_iir[4])
{
	if (gt_iir[0]) {
		gen8_cs_irq_handler(&dev_priv->engine[RCS],
				    gt_iir[0], GEN8_RCS_IRQ_SHIFT);
		gen8_cs_irq_handler(&dev_priv->engine[BCS],
				    gt_iir[0], GEN8_BCS_IRQ_SHIFT);
	}

	if (gt_iir[1]) {
		gen8_cs_irq_handler(&dev_priv->engine[VCS],
				    gt_iir[1], GEN8_VCS1_IRQ_SHIFT);
		gen8_cs_irq_handler(&dev_priv->engine[VCS2],
				    gt_iir[1], GEN8_VCS2_IRQ_SHIFT);
	}

	if (gt_iir[3])
		gen8_cs_irq_handler(&dev_priv->engine[VECS],
				    gt_iir[3], GEN8_VECS_IRQ_SHIFT);

	if (gt_iir[2] & dev_priv->pm_rps_events)
		gen6_rps_irq_handler(dev_priv, gt_iir[2]);
}

1396 1397 1398 1399
static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
{
	switch (port) {
	case PORT_A:
1400
		return val & PORTA_HOTPLUG_LONG_DETECT;
1401 1402 1403 1404 1405 1406 1407 1408 1409
	case PORT_B:
		return val & PORTB_HOTPLUG_LONG_DETECT;
	case PORT_C:
		return val & PORTC_HOTPLUG_LONG_DETECT;
	default:
		return false;
	}
}

1410 1411 1412 1413 1414 1415 1416 1417 1418 1419
static bool spt_port_hotplug2_long_detect(enum port port, u32 val)
{
	switch (port) {
	case PORT_E:
		return val & PORTE_HOTPLUG_LONG_DETECT;
	default:
		return false;
	}
}

1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435
static bool spt_port_hotplug_long_detect(enum port port, u32 val)
{
	switch (port) {
	case PORT_A:
		return val & PORTA_HOTPLUG_LONG_DETECT;
	case PORT_B:
		return val & PORTB_HOTPLUG_LONG_DETECT;
	case PORT_C:
		return val & PORTC_HOTPLUG_LONG_DETECT;
	case PORT_D:
		return val & PORTD_HOTPLUG_LONG_DETECT;
	default:
		return false;
	}
}

1436 1437 1438 1439 1440 1441 1442 1443 1444 1445
static bool ilk_port_hotplug_long_detect(enum port port, u32 val)
{
	switch (port) {
	case PORT_A:
		return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
	default:
		return false;
	}
}

1446
static bool pch_port_hotplug_long_detect(enum port port, u32 val)
1447 1448 1449
{
	switch (port) {
	case PORT_B:
1450
		return val & PORTB_HOTPLUG_LONG_DETECT;
1451
	case PORT_C:
1452
		return val & PORTC_HOTPLUG_LONG_DETECT;
1453
	case PORT_D:
1454 1455 1456
		return val & PORTD_HOTPLUG_LONG_DETECT;
	default:
		return false;
1457 1458 1459
	}
}

1460
static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
1461 1462 1463
{
	switch (port) {
	case PORT_B:
1464
		return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1465
	case PORT_C:
1466
		return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1467
	case PORT_D:
1468 1469 1470
		return val & PORTD_HOTPLUG_INT_LONG_PULSE;
	default:
		return false;
1471 1472 1473
	}
}

1474 1475 1476 1477 1478 1479 1480
/*
 * Get a bit mask of pins that have triggered, and which ones may be long.
 * This can be called multiple times with the same masks to accumulate
 * hotplug detection results from several registers.
 *
 * Note that the caller is expected to zero out the masks initially.
 */
1481
static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
1482
			     u32 hotplug_trigger, u32 dig_hotplug_reg,
1483 1484
			     const u32 hpd[HPD_NUM_PINS],
			     bool long_pulse_detect(enum port port, u32 val))
1485
{
1486
	enum port port;
1487 1488 1489
	int i;

	for_each_hpd_pin(i) {
1490 1491
		if ((hpd[i] & hotplug_trigger) == 0)
			continue;
1492

1493 1494
		*pin_mask |= BIT(i);

1495 1496 1497
		if (!intel_hpd_pin_to_port(i, &port))
			continue;

1498
		if (long_pulse_detect(port, dig_hotplug_reg))
1499
			*long_mask |= BIT(i);
1500 1501 1502 1503 1504 1505 1506
	}

	DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
			 hotplug_trigger, dig_hotplug_reg, *pin_mask);

}

1507
static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
1508
{
1509
	wake_up_all(&dev_priv->gmbus_wait_queue);
1510 1511
}

1512
static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
1513
{
1514
	wake_up_all(&dev_priv->gmbus_wait_queue);
1515 1516
}

1517
#if defined(CONFIG_DEBUG_FS)
1518 1519
static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
					 enum pipe pipe,
1520 1521 1522
					 uint32_t crc0, uint32_t crc1,
					 uint32_t crc2, uint32_t crc3,
					 uint32_t crc4)
1523 1524 1525
{
	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
	struct intel_pipe_crc_entry *entry;
1526
	int head, tail;
1527

1528 1529
	spin_lock(&pipe_crc->lock);

1530
	if (!pipe_crc->entries) {
1531
		spin_unlock(&pipe_crc->lock);
1532
		DRM_DEBUG_KMS("spurious interrupt\n");
1533 1534 1535
		return;
	}

1536 1537
	head = pipe_crc->head;
	tail = pipe_crc->tail;
1538 1539

	if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1540
		spin_unlock(&pipe_crc->lock);
1541 1542 1543 1544 1545
		DRM_ERROR("CRC buffer overflowing\n");
		return;
	}

	entry = &pipe_crc->entries[head];
1546

1547 1548
	entry->frame = dev_priv->dev->driver->get_vblank_counter(dev_priv->dev,
								 pipe);
1549 1550 1551 1552 1553
	entry->crc[0] = crc0;
	entry->crc[1] = crc1;
	entry->crc[2] = crc2;
	entry->crc[3] = crc3;
	entry->crc[4] = crc4;
1554 1555

	head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1556 1557 1558
	pipe_crc->head = head;

	spin_unlock(&pipe_crc->lock);
1559 1560

	wake_up_interruptible(&pipe_crc->wq);
1561
}
1562 1563
#else
static inline void
1564 1565
display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
			     enum pipe pipe,
1566 1567 1568 1569 1570
			     uint32_t crc0, uint32_t crc1,
			     uint32_t crc2, uint32_t crc3,
			     uint32_t crc4) {}
#endif

1571

1572 1573
static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
				     enum pipe pipe)
D
Daniel Vetter 已提交
1574
{
1575
	display_pipe_crc_irq_handler(dev_priv, pipe,
1576 1577
				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
				     0, 0, 0, 0);
D
Daniel Vetter 已提交
1578 1579
}

1580 1581
static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
				     enum pipe pipe)
1582
{
1583
	display_pipe_crc_irq_handler(dev_priv, pipe,
1584 1585 1586 1587 1588
				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
				     I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
				     I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
				     I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
				     I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1589
}
1590

1591 1592
static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
				      enum pipe pipe)
1593
{
1594 1595
	uint32_t res1, res2;

1596
	if (INTEL_GEN(dev_priv) >= 3)
1597 1598 1599 1600
		res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
	else
		res1 = 0;

1601
	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1602 1603 1604
		res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
	else
		res2 = 0;
1605

1606
	display_pipe_crc_irq_handler(dev_priv, pipe,
1607 1608 1609 1610
				     I915_READ(PIPE_CRC_RES_RED(pipe)),
				     I915_READ(PIPE_CRC_RES_GREEN(pipe)),
				     I915_READ(PIPE_CRC_RES_BLUE(pipe)),
				     res1, res2);
1611
}
1612

1613 1614 1615 1616
/* The RPS events need forcewake, so we add them to a work queue and mask their
 * IMR bits until the work is done. Other interrupts can be processed without
 * the work queue. */
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1617
{
1618
	if (pm_iir & dev_priv->pm_rps_events) {
1619
		spin_lock(&dev_priv->irq_lock);
1620
		gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
I
Imre Deak 已提交
1621 1622 1623 1624
		if (dev_priv->rps.interrupts_enabled) {
			dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
			queue_work(dev_priv->wq, &dev_priv->rps.work);
		}
1625
		spin_unlock(&dev_priv->irq_lock);
1626 1627
	}

1628 1629 1630
	if (INTEL_INFO(dev_priv)->gen >= 8)
		return;

1631
	if (HAS_VEBOX(dev_priv)) {
1632
		if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1633
			notify_ring(&dev_priv->engine[VECS]);
B
Ben Widawsky 已提交
1634

1635 1636
		if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
			DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
B
Ben Widawsky 已提交
1637
	}
1638 1639
}

1640
static void intel_pipe_handle_vblank(struct drm_i915_private *dev_priv,
1641
				     enum pipe pipe)
1642
{
1643
	if (drm_handle_vblank(dev_priv->dev, pipe))
1644
		intel_finish_page_flip_mmio(dev_priv, pipe);
1645 1646
}

1647 1648
static void valleyview_pipestat_irq_ack(struct drm_i915_private *dev_priv,
					u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1649 1650 1651
{
	int pipe;

1652
	spin_lock(&dev_priv->irq_lock);
1653 1654 1655 1656 1657 1658

	if (!dev_priv->display_irqs_enabled) {
		spin_unlock(&dev_priv->irq_lock);
		return;
	}

1659
	for_each_pipe(dev_priv, pipe) {
1660
		i915_reg_t reg;
1661
		u32 mask, iir_bit = 0;
1662

1663 1664 1665 1666 1667 1668 1669
		/*
		 * PIPESTAT bits get signalled even when the interrupt is
		 * disabled with the mask bits, and some of the status bits do
		 * not generate interrupts at all (like the underrun bit). Hence
		 * we need to be careful that we only handle what we want to
		 * handle.
		 */
1670 1671 1672

		/* fifo underruns are filterered in the underrun handler. */
		mask = PIPE_FIFO_UNDERRUN_STATUS;
1673 1674 1675 1676 1677 1678 1679 1680

		switch (pipe) {
		case PIPE_A:
			iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
			break;
		case PIPE_B:
			iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
			break;
1681 1682 1683
		case PIPE_C:
			iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
			break;
1684 1685 1686 1687 1688
		}
		if (iir & iir_bit)
			mask |= dev_priv->pipestat_irq_mask[pipe];

		if (!mask)
1689 1690 1691
			continue;

		reg = PIPESTAT(pipe);
1692 1693
		mask |= PIPESTAT_INT_ENABLE_MASK;
		pipe_stats[pipe] = I915_READ(reg) & mask;
1694 1695 1696 1697

		/*
		 * Clear the PIPE*STAT regs before the IIR
		 */
1698 1699
		if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
					PIPESTAT_INT_STATUS_MASK))
1700 1701
			I915_WRITE(reg, pipe_stats[pipe]);
	}
1702
	spin_unlock(&dev_priv->irq_lock);
1703 1704
}

1705
static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1706 1707 1708
					    u32 pipe_stats[I915_MAX_PIPES])
{
	enum pipe pipe;
1709

1710
	for_each_pipe(dev_priv, pipe) {
1711 1712
		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
			intel_pipe_handle_vblank(dev_priv, pipe);
1713

1714
		if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV)
1715
			intel_finish_page_flip_cs(dev_priv, pipe);
1716 1717

		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1718
			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1719

1720 1721
		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1722 1723 1724
	}

	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1725
		gmbus_irq_handler(dev_priv);
1726 1727
}

1728
static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
1729 1730 1731
{
	u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);

1732 1733
	if (hotplug_status)
		I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1734

1735 1736 1737
	return hotplug_status;
}

1738
static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1739 1740 1741
				 u32 hotplug_status)
{
	u32 pin_mask = 0, long_mask = 0;
1742

1743 1744
	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
	    IS_CHERRYVIEW(dev_priv)) {
1745
		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1746

1747 1748 1749 1750 1751
		if (hotplug_trigger) {
			intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
					   hotplug_trigger, hpd_status_g4x,
					   i9xx_port_hotplug_long_detect);

1752
			intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1753
		}
1754 1755

		if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1756
			dp_aux_irq_handler(dev_priv);
1757 1758
	} else {
		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1759

1760 1761
		if (hotplug_trigger) {
			intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1762
					   hotplug_trigger, hpd_status_i915,
1763
					   i9xx_port_hotplug_long_detect);
1764
			intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1765
		}
1766
	}
1767 1768
}

1769
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
J
Jesse Barnes 已提交
1770
{
1771
	struct drm_device *dev = arg;
1772
	struct drm_i915_private *dev_priv = dev->dev_private;
J
Jesse Barnes 已提交
1773 1774
	irqreturn_t ret = IRQ_NONE;

1775 1776 1777
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

1778 1779 1780
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
	disable_rpm_wakeref_asserts(dev_priv);

1781
	do {
1782
		u32 iir, gt_iir, pm_iir;
1783
		u32 pipe_stats[I915_MAX_PIPES] = {};
1784
		u32 hotplug_status = 0;
1785
		u32 ier = 0;
1786

J
Jesse Barnes 已提交
1787 1788
		gt_iir = I915_READ(GTIIR);
		pm_iir = I915_READ(GEN6_PMIIR);
1789
		iir = I915_READ(VLV_IIR);
J
Jesse Barnes 已提交
1790 1791

		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1792
			break;
J
Jesse Barnes 已提交
1793 1794 1795

		ret = IRQ_HANDLED;

1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808
		/*
		 * Theory on interrupt generation, based on empirical evidence:
		 *
		 * x = ((VLV_IIR & VLV_IER) ||
		 *      (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
		 *       (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
		 *
		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
		 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
		 * guarantee the CPU interrupt will be raised again even if we
		 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
		 * bits this time around.
		 */
1809
		I915_WRITE(VLV_MASTER_IER, 0);
1810 1811
		ier = I915_READ(VLV_IER);
		I915_WRITE(VLV_IER, 0);
1812 1813 1814 1815 1816 1817

		if (gt_iir)
			I915_WRITE(GTIIR, gt_iir);
		if (pm_iir)
			I915_WRITE(GEN6_PMIIR, pm_iir);

1818
		if (iir & I915_DISPLAY_PORT_INTERRUPT)
1819
			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1820

1821 1822
		/* Call regardless, as some status bits might not be
		 * signalled in iir */
1823
		valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1824 1825 1826 1827 1828 1829 1830

		/*
		 * VLV_IIR is single buffered, and reflects the level
		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
		 */
		if (iir)
			I915_WRITE(VLV_IIR, iir);
1831

1832
		I915_WRITE(VLV_IER, ier);
1833 1834
		I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
		POSTING_READ(VLV_MASTER_IER);
1835

1836
		if (gt_iir)
1837
			snb_gt_irq_handler(dev_priv, gt_iir);
1838 1839 1840
		if (pm_iir)
			gen6_rps_irq_handler(dev_priv, pm_iir);

1841
		if (hotplug_status)
1842
			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1843

1844
		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1845
	} while (0);
J
Jesse Barnes 已提交
1846

1847 1848
	enable_rpm_wakeref_asserts(dev_priv);

J
Jesse Barnes 已提交
1849 1850 1851
	return ret;
}

1852 1853
static irqreturn_t cherryview_irq_handler(int irq, void *arg)
{
1854
	struct drm_device *dev = arg;
1855 1856 1857
	struct drm_i915_private *dev_priv = dev->dev_private;
	irqreturn_t ret = IRQ_NONE;

1858 1859 1860
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

1861 1862 1863
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
	disable_rpm_wakeref_asserts(dev_priv);

1864
	do {
1865
		u32 master_ctl, iir;
1866
		u32 gt_iir[4] = {};
1867
		u32 pipe_stats[I915_MAX_PIPES] = {};
1868
		u32 hotplug_status = 0;
1869 1870
		u32 ier = 0;

1871 1872
		master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
		iir = I915_READ(VLV_IIR);
1873

1874 1875
		if (master_ctl == 0 && iir == 0)
			break;
1876

1877 1878
		ret = IRQ_HANDLED;

1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891
		/*
		 * Theory on interrupt generation, based on empirical evidence:
		 *
		 * x = ((VLV_IIR & VLV_IER) ||
		 *      ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
		 *       (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
		 *
		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
		 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
		 * guarantee the CPU interrupt will be raised again even if we
		 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
		 * bits this time around.
		 */
1892
		I915_WRITE(GEN8_MASTER_IRQ, 0);
1893 1894
		ier = I915_READ(VLV_IER);
		I915_WRITE(VLV_IER, 0);
1895

1896
		gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
1897

1898
		if (iir & I915_DISPLAY_PORT_INTERRUPT)
1899
			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1900

1901 1902
		/* Call regardless, as some status bits might not be
		 * signalled in iir */
1903
		valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1904

1905 1906 1907 1908 1909 1910 1911
		/*
		 * VLV_IIR is single buffered, and reflects the level
		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
		 */
		if (iir)
			I915_WRITE(VLV_IIR, iir);

1912
		I915_WRITE(VLV_IER, ier);
1913
		I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
1914
		POSTING_READ(GEN8_MASTER_IRQ);
1915

1916 1917
		gen8_gt_irq_handler(dev_priv, gt_iir);

1918
		if (hotplug_status)
1919
			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1920

1921
		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1922
	} while (0);
1923

1924 1925
	enable_rpm_wakeref_asserts(dev_priv);

1926 1927 1928
	return ret;
}

1929 1930
static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
				u32 hotplug_trigger,
1931 1932 1933 1934
				const u32 hpd[HPD_NUM_PINS])
{
	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;

1935 1936 1937 1938 1939 1940
	/*
	 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
	 * unless we touch the hotplug register, even if hotplug_trigger is
	 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
	 * errors.
	 */
1941
	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1942 1943 1944 1945 1946 1947 1948 1949
	if (!hotplug_trigger) {
		u32 mask = PORTA_HOTPLUG_STATUS_MASK |
			PORTD_HOTPLUG_STATUS_MASK |
			PORTC_HOTPLUG_STATUS_MASK |
			PORTB_HOTPLUG_STATUS_MASK;
		dig_hotplug_reg &= ~mask;
	}

1950
	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1951 1952
	if (!hotplug_trigger)
		return;
1953 1954 1955 1956 1957

	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
			   dig_hotplug_reg, hpd,
			   pch_port_hotplug_long_detect);

1958
	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1959 1960
}

1961
static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1962
{
1963
	int pipe;
1964
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1965

1966
	ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
1967

1968 1969 1970
	if (pch_iir & SDE_AUDIO_POWER_MASK) {
		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
			       SDE_AUDIO_POWER_SHIFT);
1971
		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1972 1973
				 port_name(port));
	}
1974

1975
	if (pch_iir & SDE_AUX_MASK)
1976
		dp_aux_irq_handler(dev_priv);
1977

1978
	if (pch_iir & SDE_GMBUS)
1979
		gmbus_irq_handler(dev_priv);
1980 1981 1982 1983 1984 1985 1986 1987 1988 1989

	if (pch_iir & SDE_AUDIO_HDCP_MASK)
		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");

	if (pch_iir & SDE_AUDIO_TRANS_MASK)
		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");

	if (pch_iir & SDE_POISON)
		DRM_ERROR("PCH poison interrupt\n");

1990
	if (pch_iir & SDE_FDI_MASK)
1991
		for_each_pipe(dev_priv, pipe)
1992 1993 1994
			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
					 pipe_name(pipe),
					 I915_READ(FDI_RX_IIR(pipe)));
1995 1996 1997 1998 1999 2000 2001 2002

	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");

	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");

	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
2003
		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
2004 2005

	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
2006
		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
2007 2008
}

2009
static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
2010 2011
{
	u32 err_int = I915_READ(GEN7_ERR_INT);
D
Daniel Vetter 已提交
2012
	enum pipe pipe;
2013

2014 2015 2016
	if (err_int & ERR_INT_POISON)
		DRM_ERROR("Poison interrupt\n");

2017
	for_each_pipe(dev_priv, pipe) {
2018 2019
		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2020

D
Daniel Vetter 已提交
2021
		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
2022 2023
			if (IS_IVYBRIDGE(dev_priv))
				ivb_pipe_crc_irq_handler(dev_priv, pipe);
D
Daniel Vetter 已提交
2024
			else
2025
				hsw_pipe_crc_irq_handler(dev_priv, pipe);
D
Daniel Vetter 已提交
2026 2027
		}
	}
2028

2029 2030 2031
	I915_WRITE(GEN7_ERR_INT, err_int);
}

2032
static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
2033 2034 2035
{
	u32 serr_int = I915_READ(SERR_INT);

2036 2037 2038
	if (serr_int & SERR_INT_POISON)
		DRM_ERROR("PCH poison interrupt\n");

2039
	if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
2040
		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
2041 2042

	if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
2043
		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
2044 2045

	if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
2046
		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
2047 2048

	I915_WRITE(SERR_INT, serr_int);
2049 2050
}

2051
static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2052 2053
{
	int pipe;
2054
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
2055

2056
	ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
2057

2058 2059 2060 2061 2062 2063
	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
			       SDE_AUDIO_POWER_SHIFT_CPT);
		DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
				 port_name(port));
	}
2064 2065

	if (pch_iir & SDE_AUX_MASK_CPT)
2066
		dp_aux_irq_handler(dev_priv);
2067 2068

	if (pch_iir & SDE_GMBUS_CPT)
2069
		gmbus_irq_handler(dev_priv);
2070 2071 2072 2073 2074 2075 2076 2077

	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
		DRM_DEBUG_DRIVER("Audio CP request interrupt\n");

	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
		DRM_DEBUG_DRIVER("Audio CP change interrupt\n");

	if (pch_iir & SDE_FDI_MASK_CPT)
2078
		for_each_pipe(dev_priv, pipe)
2079 2080 2081
			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
					 pipe_name(pipe),
					 I915_READ(FDI_RX_IIR(pipe)));
2082 2083

	if (pch_iir & SDE_ERROR_CPT)
2084
		cpt_serr_int_handler(dev_priv);
2085 2086
}

2087
static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101
{
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
		~SDE_PORTE_HOTPLUG_SPT;
	u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
	u32 pin_mask = 0, long_mask = 0;

	if (hotplug_trigger) {
		u32 dig_hotplug_reg;

		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
		I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);

		intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
				   dig_hotplug_reg, hpd_spt,
2102
				   spt_port_hotplug_long_detect);
2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116
	}

	if (hotplug2_trigger) {
		u32 dig_hotplug_reg;

		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
		I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);

		intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger,
				   dig_hotplug_reg, hpd_spt,
				   spt_port_hotplug2_long_detect);
	}

	if (pin_mask)
2117
		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2118 2119

	if (pch_iir & SDE_GMBUS_CPT)
2120
		gmbus_irq_handler(dev_priv);
2121 2122
}

2123 2124
static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
				u32 hotplug_trigger,
2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135
				const u32 hpd[HPD_NUM_PINS])
{
	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;

	dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);

	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
			   dig_hotplug_reg, hpd,
			   ilk_port_hotplug_long_detect);

2136
	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2137 2138
}

2139 2140
static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
				    u32 de_iir)
2141
{
2142
	enum pipe pipe;
2143 2144
	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;

2145
	if (hotplug_trigger)
2146
		ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk);
2147 2148

	if (de_iir & DE_AUX_CHANNEL_A)
2149
		dp_aux_irq_handler(dev_priv);
2150 2151

	if (de_iir & DE_GSE)
2152
		intel_opregion_asle_intr(dev_priv);
2153 2154 2155 2156

	if (de_iir & DE_POISON)
		DRM_ERROR("Poison interrupt\n");

2157
	for_each_pipe(dev_priv, pipe) {
2158 2159
		if (de_iir & DE_PIPE_VBLANK(pipe))
			intel_pipe_handle_vblank(dev_priv, pipe);
2160

2161
		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2162
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2163

2164
		if (de_iir & DE_PIPE_CRC_DONE(pipe))
2165
			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2166

2167
		/* plane/pipes map 1:1 on ilk+ */
2168
		if (de_iir & DE_PLANE_FLIP_DONE(pipe))
2169
			intel_finish_page_flip_cs(dev_priv, pipe);
2170 2171 2172 2173 2174 2175
	}

	/* check event from PCH */
	if (de_iir & DE_PCH_EVENT) {
		u32 pch_iir = I915_READ(SDEIIR);

2176 2177
		if (HAS_PCH_CPT(dev_priv))
			cpt_irq_handler(dev_priv, pch_iir);
2178
		else
2179
			ibx_irq_handler(dev_priv, pch_iir);
2180 2181 2182 2183 2184

		/* should clear PCH hotplug event before clear CPU irq */
		I915_WRITE(SDEIIR, pch_iir);
	}

2185 2186
	if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT)
		ironlake_rps_change_irq_handler(dev_priv);
2187 2188
}

2189 2190
static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
				    u32 de_iir)
2191
{
2192
	enum pipe pipe;
2193 2194
	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;

2195
	if (hotplug_trigger)
2196
		ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb);
2197 2198

	if (de_iir & DE_ERR_INT_IVB)
2199
		ivb_err_int_handler(dev_priv);
2200 2201

	if (de_iir & DE_AUX_CHANNEL_A_IVB)
2202
		dp_aux_irq_handler(dev_priv);
2203 2204

	if (de_iir & DE_GSE_IVB)
2205
		intel_opregion_asle_intr(dev_priv);
2206

2207
	for_each_pipe(dev_priv, pipe) {
2208 2209
		if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
			intel_pipe_handle_vblank(dev_priv, pipe);
2210 2211

		/* plane/pipes map 1:1 on ilk+ */
2212
		if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe))
2213
			intel_finish_page_flip_cs(dev_priv, pipe);
2214 2215 2216
	}

	/* check event from PCH */
2217
	if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
2218 2219
		u32 pch_iir = I915_READ(SDEIIR);

2220
		cpt_irq_handler(dev_priv, pch_iir);
2221 2222 2223 2224 2225 2226

		/* clear PCH hotplug event before clear CPU irq */
		I915_WRITE(SDEIIR, pch_iir);
	}
}

2227 2228 2229 2230 2231 2232 2233 2234
/*
 * To handle irqs with the minimum potential races with fresh interrupts, we:
 * 1 - Disable Master Interrupt Control.
 * 2 - Find the source(s) of the interrupt.
 * 3 - Clear the Interrupt Identity bits (IIR).
 * 4 - Process the interrupt(s) that had bits set in the IIRs.
 * 5 - Re-enable Master Interrupt Control.
 */
2235
static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2236
{
2237
	struct drm_device *dev = arg;
2238
	struct drm_i915_private *dev_priv = dev->dev_private;
2239
	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2240
	irqreturn_t ret = IRQ_NONE;
2241

2242 2243 2244
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

2245 2246 2247
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
	disable_rpm_wakeref_asserts(dev_priv);

2248 2249 2250
	/* disable master interrupt before clearing iir  */
	de_ier = I915_READ(DEIER);
	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2251
	POSTING_READ(DEIER);
2252

2253 2254 2255 2256 2257
	/* Disable south interrupts. We'll only write to SDEIIR once, so further
	 * interrupts will will be stored on its back queue, and then we'll be
	 * able to process them after we restore SDEIER (as soon as we restore
	 * it, we'll get an interrupt if SDEIIR still has something to process
	 * due to its back queue). */
2258
	if (!HAS_PCH_NOP(dev_priv)) {
2259 2260 2261 2262
		sde_ier = I915_READ(SDEIER);
		I915_WRITE(SDEIER, 0);
		POSTING_READ(SDEIER);
	}
2263

2264 2265
	/* Find, clear, then process each source of interrupt */

2266
	gt_iir = I915_READ(GTIIR);
2267
	if (gt_iir) {
2268 2269
		I915_WRITE(GTIIR, gt_iir);
		ret = IRQ_HANDLED;
2270
		if (INTEL_GEN(dev_priv) >= 6)
2271
			snb_gt_irq_handler(dev_priv, gt_iir);
2272
		else
2273
			ilk_gt_irq_handler(dev_priv, gt_iir);
2274 2275
	}

2276 2277
	de_iir = I915_READ(DEIIR);
	if (de_iir) {
2278 2279
		I915_WRITE(DEIIR, de_iir);
		ret = IRQ_HANDLED;
2280 2281
		if (INTEL_GEN(dev_priv) >= 7)
			ivb_display_irq_handler(dev_priv, de_iir);
2282
		else
2283
			ilk_display_irq_handler(dev_priv, de_iir);
2284 2285
	}

2286
	if (INTEL_GEN(dev_priv) >= 6) {
2287 2288 2289 2290
		u32 pm_iir = I915_READ(GEN6_PMIIR);
		if (pm_iir) {
			I915_WRITE(GEN6_PMIIR, pm_iir);
			ret = IRQ_HANDLED;
2291
			gen6_rps_irq_handler(dev_priv, pm_iir);
2292
		}
2293
	}
2294 2295 2296

	I915_WRITE(DEIER, de_ier);
	POSTING_READ(DEIER);
2297
	if (!HAS_PCH_NOP(dev_priv)) {
2298 2299 2300
		I915_WRITE(SDEIER, sde_ier);
		POSTING_READ(SDEIER);
	}
2301

2302 2303 2304
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
	enable_rpm_wakeref_asserts(dev_priv);

2305 2306 2307
	return ret;
}

2308 2309
static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
				u32 hotplug_trigger,
2310
				const u32 hpd[HPD_NUM_PINS])
2311
{
2312
	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2313

2314 2315
	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2316

2317
	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2318
			   dig_hotplug_reg, hpd,
2319
			   bxt_port_hotplug_long_detect);
2320

2321
	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2322 2323
}

2324 2325
static irqreturn_t
gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2326 2327
{
	irqreturn_t ret = IRQ_NONE;
2328
	u32 iir;
2329
	enum pipe pipe;
J
Jesse Barnes 已提交
2330

2331
	if (master_ctl & GEN8_DE_MISC_IRQ) {
2332 2333 2334
		iir = I915_READ(GEN8_DE_MISC_IIR);
		if (iir) {
			I915_WRITE(GEN8_DE_MISC_IIR, iir);
2335
			ret = IRQ_HANDLED;
2336
			if (iir & GEN8_DE_MISC_GSE)
2337
				intel_opregion_asle_intr(dev_priv);
2338 2339
			else
				DRM_ERROR("Unexpected DE Misc interrupt\n");
2340
		}
2341 2342
		else
			DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2343 2344
	}

2345
	if (master_ctl & GEN8_DE_PORT_IRQ) {
2346 2347 2348
		iir = I915_READ(GEN8_DE_PORT_IIR);
		if (iir) {
			u32 tmp_mask;
2349
			bool found = false;
2350

2351
			I915_WRITE(GEN8_DE_PORT_IIR, iir);
2352
			ret = IRQ_HANDLED;
J
Jesse Barnes 已提交
2353

2354 2355 2356 2357 2358 2359 2360
			tmp_mask = GEN8_AUX_CHANNEL_A;
			if (INTEL_INFO(dev_priv)->gen >= 9)
				tmp_mask |= GEN9_AUX_CHANNEL_B |
					    GEN9_AUX_CHANNEL_C |
					    GEN9_AUX_CHANNEL_D;

			if (iir & tmp_mask) {
2361
				dp_aux_irq_handler(dev_priv);
2362 2363 2364
				found = true;
			}

2365 2366 2367
			if (IS_BROXTON(dev_priv)) {
				tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
				if (tmp_mask) {
2368 2369
					bxt_hpd_irq_handler(dev_priv, tmp_mask,
							    hpd_bxt);
2370 2371 2372 2373 2374
					found = true;
				}
			} else if (IS_BROADWELL(dev_priv)) {
				tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
				if (tmp_mask) {
2375 2376
					ilk_hpd_irq_handler(dev_priv,
							    tmp_mask, hpd_bdw);
2377 2378
					found = true;
				}
2379 2380
			}

2381 2382
			if (IS_BROXTON(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
				gmbus_irq_handler(dev_priv);
S
Shashank Sharma 已提交
2383 2384 2385
				found = true;
			}

2386
			if (!found)
2387
				DRM_ERROR("Unexpected DE Port interrupt\n");
2388
		}
2389 2390
		else
			DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2391 2392
	}

2393
	for_each_pipe(dev_priv, pipe) {
2394
		u32 flip_done, fault_errors;
2395

2396 2397
		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
			continue;
2398

2399 2400 2401 2402 2403
		iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
		if (!iir) {
			DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
			continue;
		}
2404

2405 2406
		ret = IRQ_HANDLED;
		I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
2407

2408 2409
		if (iir & GEN8_PIPE_VBLANK)
			intel_pipe_handle_vblank(dev_priv, pipe);
2410

2411 2412 2413 2414 2415
		flip_done = iir;
		if (INTEL_INFO(dev_priv)->gen >= 9)
			flip_done &= GEN9_PIPE_PLANE1_FLIP_DONE;
		else
			flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE;
2416

2417
		if (flip_done)
2418
			intel_finish_page_flip_cs(dev_priv, pipe);
2419

2420
		if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
2421
			hsw_pipe_crc_irq_handler(dev_priv, pipe);
2422

2423 2424
		if (iir & GEN8_PIPE_FIFO_UNDERRUN)
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2425

2426 2427 2428 2429 2430
		fault_errors = iir;
		if (INTEL_INFO(dev_priv)->gen >= 9)
			fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
		else
			fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2431

2432 2433 2434 2435
		if (fault_errors)
			DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
				  pipe_name(pipe),
				  fault_errors);
2436 2437
	}

2438
	if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2439
	    master_ctl & GEN8_DE_PCH_IRQ) {
2440 2441 2442 2443 2444
		/*
		 * FIXME(BDW): Assume for now that the new interrupt handling
		 * scheme also closed the SDE interrupt handling race we've seen
		 * on older pch-split platforms. But this needs testing.
		 */
2445 2446 2447
		iir = I915_READ(SDEIIR);
		if (iir) {
			I915_WRITE(SDEIIR, iir);
2448
			ret = IRQ_HANDLED;
2449 2450

			if (HAS_PCH_SPT(dev_priv))
2451
				spt_irq_handler(dev_priv, iir);
2452
			else
2453
				cpt_irq_handler(dev_priv, iir);
2454 2455 2456 2457 2458 2459 2460
		} else {
			/*
			 * Like on previous PCH there seems to be something
			 * fishy going on with forwarding PCH interrupts.
			 */
			DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
		}
2461 2462
	}

2463 2464 2465 2466 2467 2468 2469 2470
	return ret;
}

static irqreturn_t gen8_irq_handler(int irq, void *arg)
{
	struct drm_device *dev = arg;
	struct drm_i915_private *dev_priv = dev->dev_private;
	u32 master_ctl;
2471
	u32 gt_iir[4] = {};
2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487
	irqreturn_t ret;

	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

	master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
	master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
	if (!master_ctl)
		return IRQ_NONE;

	I915_WRITE_FW(GEN8_MASTER_IRQ, 0);

	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
	disable_rpm_wakeref_asserts(dev_priv);

	/* Find, clear, then process each source of interrupt */
2488 2489
	ret = gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
	gen8_gt_irq_handler(dev_priv, gt_iir);
2490 2491
	ret |= gen8_de_irq_handler(dev_priv, master_ctl);

2492 2493
	I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
	POSTING_READ_FW(GEN8_MASTER_IRQ);
2494

2495 2496
	enable_rpm_wakeref_asserts(dev_priv);

2497 2498 2499
	return ret;
}

2500 2501 2502
static void i915_error_wake_up(struct drm_i915_private *dev_priv,
			       bool reset_completed)
{
2503
	struct intel_engine_cs *engine;
2504 2505 2506 2507 2508 2509 2510 2511 2512

	/*
	 * Notify all waiters for GPU completion events that reset state has
	 * been changed, and that they need to restart their wait after
	 * checking for potential errors (and bail out to drop locks if there is
	 * a gpu reset pending so that i915_error_work_func can acquire them).
	 */

	/* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2513
	for_each_engine(engine, dev_priv)
2514
		wake_up_all(&engine->irq_queue);
2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526

	/* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
	wake_up_all(&dev_priv->pending_flip_queue);

	/*
	 * Signal tasks blocked in i915_gem_wait_for_error that the pending
	 * reset state is cleared.
	 */
	if (reset_completed)
		wake_up_all(&dev_priv->gpu_error.reset_queue);
}

2527
/**
2528
 * i915_reset_and_wakeup - do process context error handling work
2529
 * @dev: drm device
2530 2531 2532 2533
 *
 * Fire an error uevent so userspace can see that a hang or error
 * was detected.
 */
2534
static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv)
2535
{
2536
	struct kobject *kobj = &dev_priv->dev->primary->kdev->kobj;
2537 2538 2539
	char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
	char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
	char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2540
	int ret;
2541

2542
	kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
2543

2544 2545 2546 2547 2548 2549 2550 2551 2552 2553
	/*
	 * Note that there's only one work item which does gpu resets, so we
	 * need not worry about concurrent gpu resets potentially incrementing
	 * error->reset_counter twice. We only need to take care of another
	 * racing irq/hangcheck declaring the gpu dead for a second time. A
	 * quick check for that is good enough: schedule_work ensures the
	 * correct ordering between hang detection and this work item, and since
	 * the reset in-progress bit is only ever set by code outside of this
	 * work we don't need to worry about any other races.
	 */
2554
	if (i915_reset_in_progress(&dev_priv->gpu_error)) {
2555
		DRM_DEBUG_DRIVER("resetting chip\n");
2556
		kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
2557

2558 2559 2560 2561 2562 2563 2564 2565
		/*
		 * In most cases it's guaranteed that we get here with an RPM
		 * reference held, for example because there is a pending GPU
		 * request that won't finish until the reset is done. This
		 * isn't the case at least when we get here by doing a
		 * simulated reset via debugs, so get an RPM reference.
		 */
		intel_runtime_pm_get(dev_priv);
2566

2567
		intel_prepare_reset(dev_priv);
2568

2569 2570 2571 2572 2573 2574
		/*
		 * All state reset _must_ be completed before we update the
		 * reset counter, for otherwise waiters might miss the reset
		 * pending state and not properly drop locks, resulting in
		 * deadlocks with the reset work.
		 */
2575
		ret = i915_reset(dev_priv);
2576

2577
		intel_finish_reset(dev_priv);
2578

2579 2580
		intel_runtime_pm_put(dev_priv);

2581
		if (ret == 0)
2582
			kobject_uevent_env(kobj,
2583
					   KOBJ_CHANGE, reset_done_event);
2584

2585 2586 2587 2588 2589
		/*
		 * Note: The wake_up also serves as a memory barrier so that
		 * waiters see the update value of the reset counter atomic_t.
		 */
		i915_error_wake_up(dev_priv, true);
2590
	}
2591 2592
}

2593
static void i915_report_and_clear_eir(struct drm_i915_private *dev_priv)
2594
{
2595
	uint32_t instdone[I915_NUM_INSTDONE_REG];
2596
	u32 eir = I915_READ(EIR);
2597
	int pipe, i;
2598

2599 2600
	if (!eir)
		return;
2601

2602
	pr_err("render error detected, EIR: 0x%08x\n", eir);
2603

2604
	i915_get_extra_instdone(dev_priv, instdone);
2605

2606
	if (IS_G4X(dev_priv)) {
2607 2608 2609
		if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
			u32 ipeir = I915_READ(IPEIR_I965);

2610 2611
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2612 2613
			for (i = 0; i < ARRAY_SIZE(instdone); i++)
				pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2614 2615
			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2616
			I915_WRITE(IPEIR_I965, ipeir);
2617
			POSTING_READ(IPEIR_I965);
2618 2619 2620
		}
		if (eir & GM45_ERROR_PAGE_TABLE) {
			u32 pgtbl_err = I915_READ(PGTBL_ER);
2621 2622
			pr_err("page table error\n");
			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
2623
			I915_WRITE(PGTBL_ER, pgtbl_err);
2624
			POSTING_READ(PGTBL_ER);
2625 2626 2627
		}
	}

2628
	if (!IS_GEN2(dev_priv)) {
2629 2630
		if (eir & I915_ERROR_PAGE_TABLE) {
			u32 pgtbl_err = I915_READ(PGTBL_ER);
2631 2632
			pr_err("page table error\n");
			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
2633
			I915_WRITE(PGTBL_ER, pgtbl_err);
2634
			POSTING_READ(PGTBL_ER);
2635 2636 2637 2638
		}
	}

	if (eir & I915_ERROR_MEMORY_REFRESH) {
2639
		pr_err("memory refresh error:\n");
2640
		for_each_pipe(dev_priv, pipe)
2641
			pr_err("pipe %c stat: 0x%08x\n",
2642
			       pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2643 2644 2645
		/* pipestat has already been acked */
	}
	if (eir & I915_ERROR_INSTRUCTION) {
2646 2647
		pr_err("instruction error\n");
		pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
2648 2649
		for (i = 0; i < ARRAY_SIZE(instdone); i++)
			pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2650
		if (INTEL_GEN(dev_priv) < 4) {
2651 2652
			u32 ipeir = I915_READ(IPEIR);

2653 2654 2655
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
2656
			I915_WRITE(IPEIR, ipeir);
2657
			POSTING_READ(IPEIR);
2658 2659 2660
		} else {
			u32 ipeir = I915_READ(IPEIR_I965);

2661 2662 2663 2664
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2665
			I915_WRITE(IPEIR_I965, ipeir);
2666
			POSTING_READ(IPEIR_I965);
2667 2668 2669 2670
		}
	}

	I915_WRITE(EIR, eir);
2671
	POSTING_READ(EIR);
2672 2673 2674 2675 2676 2677 2678 2679 2680 2681
	eir = I915_READ(EIR);
	if (eir) {
		/*
		 * some errors might have become stuck,
		 * mask them.
		 */
		DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
		I915_WRITE(EMR, I915_READ(EMR) | eir);
		I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
	}
2682 2683 2684
}

/**
2685
 * i915_handle_error - handle a gpu error
2686
 * @dev: drm device
2687
 * @engine_mask: mask representing engines that are hung
2688
 * Do some basic checking of register state at error time and
2689 2690 2691 2692 2693
 * dump it to the syslog.  Also call i915_capture_error_state() to make
 * sure we get a record and make it available in debugfs.  Fire a uevent
 * so userspace knows something bad happened (should trigger collection
 * of a ring dump etc.).
 */
2694 2695
void i915_handle_error(struct drm_i915_private *dev_priv,
		       u32 engine_mask,
2696
		       const char *fmt, ...)
2697
{
2698 2699
	va_list args;
	char error_msg[80];
2700

2701 2702 2703 2704
	va_start(args, fmt);
	vscnprintf(error_msg, sizeof(error_msg), fmt, args);
	va_end(args);

2705 2706
	i915_capture_error_state(dev_priv, engine_mask, error_msg);
	i915_report_and_clear_eir(dev_priv);
2707

2708
	if (engine_mask) {
2709
		atomic_or(I915_RESET_IN_PROGRESS_FLAG,
2710
				&dev_priv->gpu_error.reset_counter);
2711

2712
		/*
2713 2714 2715
		 * Wakeup waiting processes so that the reset function
		 * i915_reset_and_wakeup doesn't deadlock trying to grab
		 * various locks. By bumping the reset counter first, the woken
2716 2717 2718 2719 2720 2721 2722 2723
		 * processes will see a reset in progress and back off,
		 * releasing their locks and then wait for the reset completion.
		 * We must do this for _all_ gpu waiters that might hold locks
		 * that the reset work needs to acquire.
		 *
		 * Note: The wake_up serves as the required memory barrier to
		 * ensure that the waiters see the updated value of the reset
		 * counter atomic_t.
2724
		 */
2725
		i915_error_wake_up(dev_priv, false);
2726 2727
	}

2728
	i915_reset_and_wakeup(dev_priv);
2729 2730
}

2731 2732 2733
/* Called from drm generic code, passed 'crtc' which
 * we use as a pipe index
 */
2734
static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe)
2735
{
2736
	struct drm_i915_private *dev_priv = dev->dev_private;
2737
	unsigned long irqflags;
2738

2739
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2740
	if (INTEL_INFO(dev)->gen >= 4)
2741
		i915_enable_pipestat(dev_priv, pipe,
2742
				     PIPE_START_VBLANK_INTERRUPT_STATUS);
2743
	else
2744
		i915_enable_pipestat(dev_priv, pipe,
2745
				     PIPE_VBLANK_INTERRUPT_STATUS);
2746
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2747

2748 2749 2750
	return 0;
}

2751
static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
2752
{
2753
	struct drm_i915_private *dev_priv = dev->dev_private;
2754
	unsigned long irqflags;
2755
	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2756
						     DE_PIPE_VBLANK(pipe);
2757 2758

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2759
	ilk_enable_display_irq(dev_priv, bit);
2760 2761 2762 2763 2764
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);

	return 0;
}

2765
static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe)
J
Jesse Barnes 已提交
2766
{
2767
	struct drm_i915_private *dev_priv = dev->dev_private;
J
Jesse Barnes 已提交
2768 2769 2770
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2771
	i915_enable_pipestat(dev_priv, pipe,
2772
			     PIPE_START_VBLANK_INTERRUPT_STATUS);
J
Jesse Barnes 已提交
2773 2774 2775 2776 2777
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);

	return 0;
}

2778
static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
2779 2780 2781 2782 2783
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2784
	bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2785
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2786

2787 2788 2789
	return 0;
}

2790 2791 2792
/* Called from drm generic code, passed 'crtc' which
 * we use as a pipe index
 */
2793
static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe)
2794
{
2795
	struct drm_i915_private *dev_priv = dev->dev_private;
2796
	unsigned long irqflags;
2797

2798
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2799
	i915_disable_pipestat(dev_priv, pipe,
2800 2801
			      PIPE_VBLANK_INTERRUPT_STATUS |
			      PIPE_START_VBLANK_INTERRUPT_STATUS);
2802 2803 2804
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

2805
static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
2806
{
2807
	struct drm_i915_private *dev_priv = dev->dev_private;
2808
	unsigned long irqflags;
2809
	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2810
						     DE_PIPE_VBLANK(pipe);
2811 2812

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2813
	ilk_disable_display_irq(dev_priv, bit);
2814 2815 2816
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

2817
static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe)
J
Jesse Barnes 已提交
2818
{
2819
	struct drm_i915_private *dev_priv = dev->dev_private;
J
Jesse Barnes 已提交
2820 2821 2822
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2823
	i915_disable_pipestat(dev_priv, pipe,
2824
			      PIPE_START_VBLANK_INTERRUPT_STATUS);
J
Jesse Barnes 已提交
2825 2826 2827
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

2828
static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
2829 2830 2831 2832 2833
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2834
	bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2835 2836 2837
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

2838
static bool
2839
ring_idle(struct intel_engine_cs *engine, u32 seqno)
2840
{
2841 2842
	return i915_seqno_passed(seqno,
				 READ_ONCE(engine->last_submitted_seqno));
B
Ben Gamari 已提交
2843 2844
}

2845
static bool
2846
ipehr_is_semaphore_wait(struct drm_i915_private *dev_priv, u32 ipehr)
2847
{
2848
	if (INTEL_GEN(dev_priv) >= 8) {
2849
		return (ipehr >> 23) == 0x1c;
2850 2851 2852 2853 2854 2855 2856
	} else {
		ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
		return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
				 MI_SEMAPHORE_REGISTER);
	}
}

2857
static struct intel_engine_cs *
2858 2859
semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
				 u64 offset)
2860
{
2861
	struct drm_i915_private *dev_priv = engine->i915;
2862
	struct intel_engine_cs *signaller;
2863

2864
	if (INTEL_GEN(dev_priv) >= 8) {
2865
		for_each_engine(signaller, dev_priv) {
2866
			if (engine == signaller)
2867 2868
				continue;

2869
			if (offset == signaller->semaphore.signal_ggtt[engine->id])
2870 2871
				return signaller;
		}
2872 2873 2874
	} else {
		u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;

2875
		for_each_engine(signaller, dev_priv) {
2876
			if(engine == signaller)
2877 2878
				continue;

2879
			if (sync_bits == signaller->semaphore.mbox.wait[engine->id])
2880 2881 2882 2883
				return signaller;
		}
	}

2884
	DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
2885
		  engine->id, ipehr, offset);
2886 2887 2888 2889

	return NULL;
}

2890
static struct intel_engine_cs *
2891
semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
2892
{
2893
	struct drm_i915_private *dev_priv = engine->i915;
2894
	u32 cmd, ipehr, head;
2895 2896
	u64 offset = 0;
	int i, backwards;
2897

2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914
	/*
	 * This function does not support execlist mode - any attempt to
	 * proceed further into this function will result in a kernel panic
	 * when dereferencing ring->buffer, which is not set up in execlist
	 * mode.
	 *
	 * The correct way of doing it would be to derive the currently
	 * executing ring buffer from the current context, which is derived
	 * from the currently running request. Unfortunately, to get the
	 * current request we would have to grab the struct_mutex before doing
	 * anything else, which would be ill-advised since some other thread
	 * might have grabbed it already and managed to hang itself, causing
	 * the hang checker to deadlock.
	 *
	 * Therefore, this function does not support execlist mode in its
	 * current form. Just return NULL and move on.
	 */
2915
	if (engine->buffer == NULL)
2916 2917
		return NULL;

2918
	ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
2919
	if (!ipehr_is_semaphore_wait(engine->i915, ipehr))
2920
		return NULL;
2921

2922 2923 2924
	/*
	 * HEAD is likely pointing to the dword after the actual command,
	 * so scan backwards until we find the MBOX. But limit it to just 3
2925 2926
	 * or 4 dwords depending on the semaphore wait command size.
	 * Note that we don't care about ACTHD here since that might
2927 2928
	 * point at at batch, and semaphores are always emitted into the
	 * ringbuffer itself.
2929
	 */
2930
	head = I915_READ_HEAD(engine) & HEAD_ADDR;
2931
	backwards = (INTEL_GEN(dev_priv) >= 8) ? 5 : 4;
2932

2933
	for (i = backwards; i; --i) {
2934 2935 2936 2937 2938
		/*
		 * Be paranoid and presume the hw has gone off into the wild -
		 * our ring is smaller than what the hardware (and hence
		 * HEAD_ADDR) allows. Also handles wrap-around.
		 */
2939
		head &= engine->buffer->size - 1;
2940 2941

		/* This here seems to blow up */
2942
		cmd = ioread32(engine->buffer->virtual_start + head);
2943 2944 2945
		if (cmd == ipehr)
			break;

2946 2947
		head -= 4;
	}
2948

2949 2950
	if (!i)
		return NULL;
2951

2952
	*seqno = ioread32(engine->buffer->virtual_start + head + 4) + 1;
2953
	if (INTEL_GEN(dev_priv) >= 8) {
2954
		offset = ioread32(engine->buffer->virtual_start + head + 12);
2955
		offset <<= 32;
2956
		offset = ioread32(engine->buffer->virtual_start + head + 8);
2957
	}
2958
	return semaphore_wait_to_signaller_ring(engine, ipehr, offset);
2959 2960
}

2961
static int semaphore_passed(struct intel_engine_cs *engine)
2962
{
2963
	struct drm_i915_private *dev_priv = engine->i915;
2964
	struct intel_engine_cs *signaller;
2965
	u32 seqno;
2966

2967
	engine->hangcheck.deadlock++;
2968

2969
	signaller = semaphore_waits_for(engine, &seqno);
2970 2971 2972 2973
	if (signaller == NULL)
		return -1;

	/* Prevent pathological recursion due to driver bugs */
2974
	if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
2975 2976
		return -1;

2977
	if (i915_seqno_passed(signaller->get_seqno(signaller), seqno))
2978 2979
		return 1;

2980 2981 2982
	/* cursory check for an unkickable deadlock */
	if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
	    semaphore_passed(signaller) < 0)
2983 2984 2985
		return -1;

	return 0;
2986 2987 2988 2989
}

static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
{
2990
	struct intel_engine_cs *engine;
2991

2992
	for_each_engine(engine, dev_priv)
2993
		engine->hangcheck.deadlock = 0;
2994 2995
}

2996
static bool subunits_stuck(struct intel_engine_cs *engine)
2997
{
2998 2999 3000 3001
	u32 instdone[I915_NUM_INSTDONE_REG];
	bool stuck;
	int i;

3002
	if (engine->id != RCS)
3003 3004
		return true;

3005
	i915_get_extra_instdone(engine->i915, instdone);
3006

3007 3008 3009 3010 3011 3012 3013
	/* There might be unstable subunit states even when
	 * actual head is not moving. Filter out the unstable ones by
	 * accumulating the undone -> done transitions and only
	 * consider those as progress.
	 */
	stuck = true;
	for (i = 0; i < I915_NUM_INSTDONE_REG; i++) {
3014
		const u32 tmp = instdone[i] | engine->hangcheck.instdone[i];
3015

3016
		if (tmp != engine->hangcheck.instdone[i])
3017 3018
			stuck = false;

3019
		engine->hangcheck.instdone[i] |= tmp;
3020 3021 3022 3023 3024 3025
	}

	return stuck;
}

static enum intel_ring_hangcheck_action
3026
head_stuck(struct intel_engine_cs *engine, u64 acthd)
3027
{
3028
	if (acthd != engine->hangcheck.acthd) {
3029 3030

		/* Clear subunit states on head movement */
3031 3032
		memset(engine->hangcheck.instdone, 0,
		       sizeof(engine->hangcheck.instdone));
3033

3034
		return HANGCHECK_ACTIVE;
3035
	}
3036

3037
	if (!subunits_stuck(engine))
3038 3039 3040 3041 3042 3043
		return HANGCHECK_ACTIVE;

	return HANGCHECK_HUNG;
}

static enum intel_ring_hangcheck_action
3044
ring_stuck(struct intel_engine_cs *engine, u64 acthd)
3045
{
3046
	struct drm_i915_private *dev_priv = engine->i915;
3047 3048 3049
	enum intel_ring_hangcheck_action ha;
	u32 tmp;

3050
	ha = head_stuck(engine, acthd);
3051 3052 3053
	if (ha != HANGCHECK_HUNG)
		return ha;

3054
	if (IS_GEN2(dev_priv))
3055
		return HANGCHECK_HUNG;
3056 3057 3058 3059 3060 3061

	/* Is the chip hanging on a WAIT_FOR_EVENT?
	 * If so we can simply poke the RB_WAIT bit
	 * and break the hang. This should work on
	 * all but the second generation chipsets.
	 */
3062
	tmp = I915_READ_CTL(engine);
3063
	if (tmp & RING_WAIT) {
3064
		i915_handle_error(dev_priv, 0,
3065
				  "Kicking stuck wait on %s",
3066 3067
				  engine->name);
		I915_WRITE_CTL(engine, tmp);
3068
		return HANGCHECK_KICK;
3069 3070
	}

3071
	if (INTEL_GEN(dev_priv) >= 6 && tmp & RING_WAIT_SEMAPHORE) {
3072
		switch (semaphore_passed(engine)) {
3073
		default:
3074
			return HANGCHECK_HUNG;
3075
		case 1:
3076
			i915_handle_error(dev_priv, 0,
3077
					  "Kicking stuck semaphore on %s",
3078 3079
					  engine->name);
			I915_WRITE_CTL(engine, tmp);
3080
			return HANGCHECK_KICK;
3081
		case 0:
3082
			return HANGCHECK_WAIT;
3083
		}
3084
	}
3085

3086
	return HANGCHECK_HUNG;
3087 3088
}

3089 3090
static unsigned kick_waiters(struct intel_engine_cs *engine)
{
3091
	struct drm_i915_private *i915 = engine->i915;
3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106
	unsigned user_interrupts = READ_ONCE(engine->user_interrupts);

	if (engine->hangcheck.user_interrupts == user_interrupts &&
	    !test_and_set_bit(engine->id, &i915->gpu_error.missed_irq_rings)) {
		if (!(i915->gpu_error.test_irq_rings & intel_engine_flag(engine)))
			DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
				  engine->name);
		else
			DRM_INFO("Fake missed irq on %s\n",
				 engine->name);
		wake_up_all(&engine->irq_queue);
	}

	return user_interrupts;
}
3107
/*
B
Ben Gamari 已提交
3108
 * This is called when the chip hasn't reported back with completed
3109 3110 3111 3112 3113
 * batchbuffers in a long time. We keep track per ring seqno progress and
 * if there are no progress, hangcheck score for that ring is increased.
 * Further, acthd is inspected to see if the ring is stuck. On stuck case
 * we kick the ring. If we see no progress on three subsequent calls
 * we assume chip is wedged and try to fix it by resetting the chip.
B
Ben Gamari 已提交
3114
 */
3115
static void i915_hangcheck_elapsed(struct work_struct *work)
B
Ben Gamari 已提交
3116
{
3117 3118 3119
	struct drm_i915_private *dev_priv =
		container_of(work, typeof(*dev_priv),
			     gpu_error.hangcheck_work.work);
3120
	struct intel_engine_cs *engine;
3121
	enum intel_engine_id id;
3122
	int busy_count = 0, rings_hung = 0;
3123
	bool stuck[I915_NUM_ENGINES] = { 0 };
3124 3125 3126
#define BUSY 1
#define KICK 5
#define HUNG 20
3127
#define ACTIVE_DECAY 15
3128

3129
	if (!i915.enable_hangcheck)
3130 3131
		return;

3132 3133 3134 3135 3136 3137 3138
	/*
	 * The hangcheck work is synced during runtime suspend, we don't
	 * require a wakeref. TODO: instead of disabling the asserts make
	 * sure that we hold a reference when this work is running.
	 */
	DISABLE_RPM_WAKEREF_ASSERTS(dev_priv);

3139 3140 3141 3142 3143 3144
	/* As enabling the GPU requires fairly extensive mmio access,
	 * periodically arm the mmio checker to see if we are triggering
	 * any invalid access.
	 */
	intel_uncore_arm_unclaimed_mmio_detection(dev_priv);

3145
	for_each_engine_id(engine, dev_priv, id) {
3146 3147
		u64 acthd;
		u32 seqno;
3148
		unsigned user_interrupts;
3149
		bool busy = true;
3150

3151 3152
		semaphore_clear_deadlocks(dev_priv);

3153 3154 3155 3156 3157 3158 3159 3160 3161 3162
		/* We don't strictly need an irq-barrier here, as we are not
		 * serving an interrupt request, be paranoid in case the
		 * barrier has side-effects (such as preventing a broken
		 * cacheline snoop) and so be sure that we can see the seqno
		 * advance. If the seqno should stick, due to a stale
		 * cacheline, we would erroneously declare the GPU hung.
		 */
		if (engine->irq_seqno_barrier)
			engine->irq_seqno_barrier(engine);

3163
		acthd = intel_ring_get_active_head(engine);
3164
		seqno = engine->get_seqno(engine);
3165

3166 3167 3168
		/* Reset stuck interrupts between batch advances */
		user_interrupts = 0;

3169 3170 3171 3172
		if (engine->hangcheck.seqno == seqno) {
			if (ring_idle(engine, seqno)) {
				engine->hangcheck.action = HANGCHECK_IDLE;
				if (waitqueue_active(&engine->irq_queue)) {
3173
					/* Safeguard against driver failure */
3174
					user_interrupts = kick_waiters(engine);
3175
					engine->hangcheck.score += BUSY;
3176 3177
				} else
					busy = false;
3178
			} else {
3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193
				/* We always increment the hangcheck score
				 * if the ring is busy and still processing
				 * the same request, so that no single request
				 * can run indefinitely (such as a chain of
				 * batches). The only time we do not increment
				 * the hangcheck score on this ring, if this
				 * ring is in a legitimate wait for another
				 * ring. In that case the waiting ring is a
				 * victim and we want to be sure we catch the
				 * right culprit. Then every time we do kick
				 * the ring, add a small increment to the
				 * score so that we can catch a batch that is
				 * being repeatedly kicked and so responsible
				 * for stalling the machine.
				 */
3194 3195
				engine->hangcheck.action = ring_stuck(engine,
								      acthd);
3196

3197
				switch (engine->hangcheck.action) {
3198
				case HANGCHECK_IDLE:
3199
				case HANGCHECK_WAIT:
3200
					break;
3201
				case HANGCHECK_ACTIVE:
3202
					engine->hangcheck.score += BUSY;
3203
					break;
3204
				case HANGCHECK_KICK:
3205
					engine->hangcheck.score += KICK;
3206
					break;
3207
				case HANGCHECK_HUNG:
3208
					engine->hangcheck.score += HUNG;
3209
					stuck[id] = true;
3210 3211
					break;
				}
3212
			}
3213
		} else {
3214
			engine->hangcheck.action = HANGCHECK_ACTIVE;
3215

3216 3217 3218
			/* Gradually reduce the count so that we catch DoS
			 * attempts across multiple batches.
			 */
3219 3220 3221 3222
			if (engine->hangcheck.score > 0)
				engine->hangcheck.score -= ACTIVE_DECAY;
			if (engine->hangcheck.score < 0)
				engine->hangcheck.score = 0;
3223

3224
			/* Clear head and subunit states on seqno movement */
3225
			acthd = 0;
3226

3227 3228
			memset(engine->hangcheck.instdone, 0,
			       sizeof(engine->hangcheck.instdone));
3229 3230
		}

3231 3232
		engine->hangcheck.seqno = seqno;
		engine->hangcheck.acthd = acthd;
3233
		engine->hangcheck.user_interrupts = user_interrupts;
3234
		busy_count += busy;
3235
	}
3236

3237
	for_each_engine_id(engine, dev_priv, id) {
3238
		if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
3239
			DRM_INFO("%s on %s\n",
3240
				 stuck[id] ? "stuck" : "no progress",
3241
				 engine->name);
3242
			rings_hung |= intel_engine_flag(engine);
3243 3244 3245
		}
	}

3246
	if (rings_hung) {
3247
		i915_handle_error(dev_priv, rings_hung, "Engine(s) hung");
3248 3249
		goto out;
	}
B
Ben Gamari 已提交
3250

3251 3252 3253
	if (busy_count)
		/* Reset timer case chip hangs without another request
		 * being added */
3254
		i915_queue_hangcheck(dev_priv);
3255 3256 3257

out:
	ENABLE_RPM_WAKEREF_ASSERTS(dev_priv);
3258 3259
}

3260
void i915_queue_hangcheck(struct drm_i915_private *dev_priv)
3261
{
3262
	struct i915_gpu_error *e = &dev_priv->gpu_error;
3263

3264
	if (!i915.enable_hangcheck)
3265 3266
		return;

3267 3268 3269 3270 3271 3272 3273
	/* Don't continually defer the hangcheck so that it is always run at
	 * least once after work has been scheduled on any ring. Otherwise,
	 * we will ignore a hung ring if a second ring is kept busy.
	 */

	queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work,
			   round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES));
B
Ben Gamari 已提交
3274 3275
}

3276
static void ibx_irq_reset(struct drm_device *dev)
P
Paulo Zanoni 已提交
3277 3278 3279 3280 3281 3282
{
	struct drm_i915_private *dev_priv = dev->dev_private;

	if (HAS_PCH_NOP(dev))
		return;

3283
	GEN5_IRQ_RESET(SDE);
3284 3285 3286

	if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
		I915_WRITE(SERR_INT, 0xffffffff);
P
Paulo Zanoni 已提交
3287
}
3288

P
Paulo Zanoni 已提交
3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304
/*
 * SDEIER is also touched by the interrupt handler to work around missed PCH
 * interrupts. Hence we can't update it after the interrupt handler is enabled -
 * instead we unconditionally enable all PCH interrupt sources here, but then
 * only unmask them as needed with SDEIMR.
 *
 * This function needs to be called before interrupts are enabled.
 */
static void ibx_irq_pre_postinstall(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;

	if (HAS_PCH_NOP(dev))
		return;

	WARN_ON(I915_READ(SDEIER) != 0);
P
Paulo Zanoni 已提交
3305 3306 3307 3308
	I915_WRITE(SDEIER, 0xffffffff);
	POSTING_READ(SDEIER);
}

3309
static void gen5_gt_irq_reset(struct drm_device *dev)
3310 3311 3312
{
	struct drm_i915_private *dev_priv = dev->dev_private;

3313
	GEN5_IRQ_RESET(GT);
P
Paulo Zanoni 已提交
3314
	if (INTEL_INFO(dev)->gen >= 6)
3315
		GEN5_IRQ_RESET(GEN6_PM);
3316 3317
}

3318 3319 3320 3321
static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
{
	enum pipe pipe;

3322 3323 3324 3325 3326
	if (IS_CHERRYVIEW(dev_priv))
		I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
	else
		I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);

3327
	i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
3328 3329
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));

3330 3331 3332 3333 3334 3335
	for_each_pipe(dev_priv, pipe) {
		I915_WRITE(PIPESTAT(pipe),
			   PIPE_FIFO_UNDERRUN_STATUS |
			   PIPESTAT_INT_STATUS_MASK);
		dev_priv->pipestat_irq_mask[pipe] = 0;
	}
3336 3337

	GEN5_IRQ_RESET(VLV_);
3338
	dev_priv->irq_mask = ~0;
3339 3340
}

3341 3342 3343
static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
{
	u32 pipestat_mask;
3344
	u32 enable_mask;
3345 3346 3347 3348 3349 3350 3351 3352 3353
	enum pipe pipe;

	pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
			PIPE_CRC_DONE_INTERRUPT_STATUS;

	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
	for_each_pipe(dev_priv, pipe)
		i915_enable_pipestat(dev_priv, pipe, pipestat_mask);

3354 3355 3356
	enable_mask = I915_DISPLAY_PORT_INTERRUPT |
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3357
	if (IS_CHERRYVIEW(dev_priv))
3358
		enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3359 3360 3361

	WARN_ON(dev_priv->irq_mask != ~0);

3362 3363 3364
	dev_priv->irq_mask = ~enable_mask;

	GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383
}

/* drm_dma.h hooks
*/
static void ironlake_irq_reset(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;

	I915_WRITE(HWSTAM, 0xffffffff);

	GEN5_IRQ_RESET(DE);
	if (IS_GEN7(dev))
		I915_WRITE(GEN7_ERR_INT, 0xffffffff);

	gen5_gt_irq_reset(dev);

	ibx_irq_reset(dev);
}

J
Jesse Barnes 已提交
3384 3385
static void valleyview_irq_preinstall(struct drm_device *dev)
{
3386
	struct drm_i915_private *dev_priv = dev->dev_private;
J
Jesse Barnes 已提交
3387

3388 3389 3390
	I915_WRITE(VLV_MASTER_IER, 0);
	POSTING_READ(VLV_MASTER_IER);

3391
	gen5_gt_irq_reset(dev);
J
Jesse Barnes 已提交
3392

3393
	spin_lock_irq(&dev_priv->irq_lock);
3394 3395
	if (dev_priv->display_irqs_enabled)
		vlv_display_irq_reset(dev_priv);
3396
	spin_unlock_irq(&dev_priv->irq_lock);
J
Jesse Barnes 已提交
3397 3398
}

3399 3400 3401 3402 3403 3404 3405 3406
static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
{
	GEN8_IRQ_RESET_NDX(GT, 0);
	GEN8_IRQ_RESET_NDX(GT, 1);
	GEN8_IRQ_RESET_NDX(GT, 2);
	GEN8_IRQ_RESET_NDX(GT, 3);
}

P
Paulo Zanoni 已提交
3407
static void gen8_irq_reset(struct drm_device *dev)
3408 3409 3410 3411 3412 3413 3414
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int pipe;

	I915_WRITE(GEN8_MASTER_IRQ, 0);
	POSTING_READ(GEN8_MASTER_IRQ);

3415
	gen8_gt_irq_reset(dev_priv);
3416

3417
	for_each_pipe(dev_priv, pipe)
3418 3419
		if (intel_display_power_is_enabled(dev_priv,
						   POWER_DOMAIN_PIPE(pipe)))
3420
			GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3421

3422 3423 3424
	GEN5_IRQ_RESET(GEN8_DE_PORT_);
	GEN5_IRQ_RESET(GEN8_DE_MISC_);
	GEN5_IRQ_RESET(GEN8_PCU_);
3425

3426 3427
	if (HAS_PCH_SPLIT(dev))
		ibx_irq_reset(dev);
3428
}
3429

3430 3431
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
				     unsigned int pipe_mask)
3432
{
3433
	uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3434
	enum pipe pipe;
3435

3436
	spin_lock_irq(&dev_priv->irq_lock);
3437 3438 3439 3440
	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
		GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
				  dev_priv->de_irq_mask[pipe],
				  ~dev_priv->de_irq_mask[pipe] | extra_ier);
3441
	spin_unlock_irq(&dev_priv->irq_lock);
3442 3443
}

3444 3445 3446
void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
				     unsigned int pipe_mask)
{
3447 3448
	enum pipe pipe;

3449
	spin_lock_irq(&dev_priv->irq_lock);
3450 3451
	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
		GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3452 3453 3454 3455 3456 3457
	spin_unlock_irq(&dev_priv->irq_lock);

	/* make sure we're done processing display irqs */
	synchronize_irq(dev_priv->dev->irq);
}

3458 3459 3460 3461 3462 3463 3464
static void cherryview_irq_preinstall(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;

	I915_WRITE(GEN8_MASTER_IRQ, 0);
	POSTING_READ(GEN8_MASTER_IRQ);

3465
	gen8_gt_irq_reset(dev_priv);
3466 3467 3468

	GEN5_IRQ_RESET(GEN8_PCU_);

3469
	spin_lock_irq(&dev_priv->irq_lock);
3470 3471
	if (dev_priv->display_irqs_enabled)
		vlv_display_irq_reset(dev_priv);
3472
	spin_unlock_irq(&dev_priv->irq_lock);
3473 3474
}

3475
static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
3476 3477 3478 3479 3480
				  const u32 hpd[HPD_NUM_PINS])
{
	struct intel_encoder *encoder;
	u32 enabled_irqs = 0;

3481
	for_each_intel_encoder(dev_priv->dev, encoder)
3482 3483 3484 3485 3486 3487
		if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
			enabled_irqs |= hpd[encoder->hpd_pin];

	return enabled_irqs;
}

3488
static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
3489
{
3490
	u32 hotplug_irqs, hotplug, enabled_irqs;
3491

3492
	if (HAS_PCH_IBX(dev_priv)) {
3493
		hotplug_irqs = SDE_HOTPLUG_MASK;
3494
		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
3495
	} else {
3496
		hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3497
		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
3498
	}
3499

3500
	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3501 3502 3503

	/*
	 * Enable digital hotplug on the PCH, and configure the DP short pulse
3504 3505
	 * duration to 2ms (which is the minimum in the Display Port spec).
	 * The pulse duration bits are reserved on LPT+.
3506
	 */
3507 3508 3509 3510 3511
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
	hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3512 3513 3514 3515
	/*
	 * When CPU and PCH are on the same package, port A
	 * HPD must be enabled in both north and south.
	 */
3516
	if (HAS_PCH_LPT_LP(dev_priv))
3517
		hotplug |= PORTA_HOTPLUG_ENABLE;
3518
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3519
}
X
Xiong Zhang 已提交
3520

3521
static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3522 3523 3524 3525
{
	u32 hotplug_irqs, hotplug, enabled_irqs;

	hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
3526
	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
3527 3528 3529 3530 3531 3532

	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);

	/* Enable digital hotplug on the PCH */
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
	hotplug |= PORTD_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE |
3533
		PORTB_HOTPLUG_ENABLE | PORTA_HOTPLUG_ENABLE;
3534 3535 3536 3537 3538
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);

	hotplug = I915_READ(PCH_PORT_HOTPLUG2);
	hotplug |= PORTE_HOTPLUG_ENABLE;
	I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3539 3540
}

3541
static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3542 3543 3544
{
	u32 hotplug_irqs, hotplug, enabled_irqs;

3545
	if (INTEL_GEN(dev_priv) >= 8) {
3546
		hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
3547
		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw);
3548 3549

		bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3550
	} else if (INTEL_GEN(dev_priv) >= 7) {
3551
		hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
3552
		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb);
3553 3554

		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3555 3556
	} else {
		hotplug_irqs = DE_DP_A_HOTPLUG;
3557
		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk);
3558

3559 3560
		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
	}
3561 3562 3563 3564

	/*
	 * Enable digital hotplug on the CPU, and configure the DP short pulse
	 * duration to 2ms (which is the minimum in the Display Port spec)
3565
	 * The pulse duration bits are reserved on HSW+.
3566 3567 3568 3569 3570 3571
	 */
	hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
	hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
	hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms;
	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);

3572
	ibx_hpd_irq_setup(dev_priv);
3573 3574
}

3575
static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3576
{
3577
	u32 hotplug_irqs, hotplug, enabled_irqs;
3578

3579
	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
3580
	hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
3581

3582
	bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3583

3584 3585 3586
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
	hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE |
		PORTA_HOTPLUG_ENABLE;
3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606

	DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n",
		      hotplug, enabled_irqs);
	hotplug &= ~BXT_DDI_HPD_INVERT_MASK;

	/*
	 * For BXT invert bit has to be set based on AOB design
	 * for HPD detection logic, update it based on VBT fields.
	 */

	if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
		hotplug |= BXT_DDIA_HPD_INVERT;
	if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) &&
	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_B))
		hotplug |= BXT_DDIB_HPD_INVERT;
	if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) &&
	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_C))
		hotplug |= BXT_DDIC_HPD_INVERT;

3607
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3608 3609
}

P
Paulo Zanoni 已提交
3610 3611
static void ibx_irq_postinstall(struct drm_device *dev)
{
3612
	struct drm_i915_private *dev_priv = dev->dev_private;
3613
	u32 mask;
3614

D
Daniel Vetter 已提交
3615 3616 3617
	if (HAS_PCH_NOP(dev))
		return;

3618
	if (HAS_PCH_IBX(dev))
3619
		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3620
	else
3621
		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3622

3623
	gen5_assert_iir_is_zero(dev_priv, SDEIIR);
P
Paulo Zanoni 已提交
3624 3625 3626
	I915_WRITE(SDEIMR, ~mask);
}

3627 3628 3629 3630 3631 3632 3633 3634
static void gen5_gt_irq_postinstall(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	u32 pm_irqs, gt_irqs;

	pm_irqs = gt_irqs = 0;

	dev_priv->gt_irq_mask = ~0;
3635
	if (HAS_L3_DPF(dev)) {
3636
		/* L3 parity interrupt is always unmasked. */
3637 3638
		dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
		gt_irqs |= GT_PARITY_ERROR(dev);
3639 3640 3641 3642 3643 3644 3645 3646 3647 3648
	}

	gt_irqs |= GT_RENDER_USER_INTERRUPT;
	if (IS_GEN5(dev)) {
		gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
			   ILK_BSD_USER_INTERRUPT;
	} else {
		gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
	}

P
Paulo Zanoni 已提交
3649
	GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
3650 3651

	if (INTEL_INFO(dev)->gen >= 6) {
3652 3653 3654 3655
		/*
		 * RPS interrupts will get enabled/disabled on demand when RPS
		 * itself is enabled/disabled.
		 */
3656 3657 3658
		if (HAS_VEBOX(dev))
			pm_irqs |= PM_VEBOX_USER_INTERRUPT;

3659
		dev_priv->pm_irq_mask = 0xffffffff;
P
Paulo Zanoni 已提交
3660
		GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
3661 3662 3663
	}
}

3664
static int ironlake_irq_postinstall(struct drm_device *dev)
3665
{
3666
	struct drm_i915_private *dev_priv = dev->dev_private;
3667 3668 3669 3670 3671 3672
	u32 display_mask, extra_mask;

	if (INTEL_INFO(dev)->gen >= 7) {
		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
				DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
				DE_PLANEB_FLIP_DONE_IVB |
3673
				DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
3674
		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3675 3676
			      DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
			      DE_DP_A_HOTPLUG_IVB);
3677 3678 3679
	} else {
		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
				DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
3680 3681 3682
				DE_AUX_CHANNEL_A |
				DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
				DE_POISON);
3683 3684 3685
		extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
			      DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
			      DE_DP_A_HOTPLUG);
3686
	}
3687

3688
	dev_priv->irq_mask = ~display_mask;
3689

3690 3691
	I915_WRITE(HWSTAM, 0xeffe);

P
Paulo Zanoni 已提交
3692 3693
	ibx_irq_pre_postinstall(dev);

P
Paulo Zanoni 已提交
3694
	GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3695

3696
	gen5_gt_irq_postinstall(dev);
3697

P
Paulo Zanoni 已提交
3698
	ibx_irq_postinstall(dev);
3699

3700
	if (IS_IRONLAKE_M(dev)) {
3701 3702 3703
		/* Enable PCU event interrupts
		 *
		 * spinlocking not required here for correctness since interrupt
3704 3705
		 * setup is guaranteed to run in single-threaded context. But we
		 * need it to make the assert_spin_locked happy. */
3706
		spin_lock_irq(&dev_priv->irq_lock);
3707
		ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
3708
		spin_unlock_irq(&dev_priv->irq_lock);
3709 3710
	}

3711 3712 3713
	return 0;
}

3714 3715 3716 3717 3718 3719 3720 3721 3722
void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
{
	assert_spin_locked(&dev_priv->irq_lock);

	if (dev_priv->display_irqs_enabled)
		return;

	dev_priv->display_irqs_enabled = true;

3723 3724
	if (intel_irqs_enabled(dev_priv)) {
		vlv_display_irq_reset(dev_priv);
3725
		vlv_display_irq_postinstall(dev_priv);
3726
	}
3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737
}

void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
{
	assert_spin_locked(&dev_priv->irq_lock);

	if (!dev_priv->display_irqs_enabled)
		return;

	dev_priv->display_irqs_enabled = false;

3738
	if (intel_irqs_enabled(dev_priv))
3739
		vlv_display_irq_reset(dev_priv);
3740 3741
}

3742 3743 3744 3745 3746

static int valleyview_irq_postinstall(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;

3747
	gen5_gt_irq_postinstall(dev);
J
Jesse Barnes 已提交
3748

3749
	spin_lock_irq(&dev_priv->irq_lock);
3750 3751
	if (dev_priv->display_irqs_enabled)
		vlv_display_irq_postinstall(dev_priv);
3752 3753
	spin_unlock_irq(&dev_priv->irq_lock);

J
Jesse Barnes 已提交
3754
	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3755
	POSTING_READ(VLV_MASTER_IER);
3756 3757 3758 3759

	return 0;
}

3760 3761 3762 3763 3764
static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
{
	/* These are interrupts we'll toggle with the ring mask register */
	uint32_t gt_interrupts[] = {
		GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3765 3766 3767
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
			GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3768
		GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3769 3770 3771
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
			GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3772
		0,
3773 3774
		GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3775 3776
		};

3777 3778 3779
	if (HAS_L3_DPF(dev_priv))
		gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;

3780
	dev_priv->pm_irq_mask = 0xffffffff;
3781 3782
	GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
	GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
3783 3784 3785 3786 3787
	/*
	 * RPS interrupts will get enabled/disabled on demand when RPS itself
	 * is enabled/disabled.
	 */
	GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
3788
	GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
3789 3790 3791 3792
}

static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
{
3793 3794
	uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
	uint32_t de_pipe_enables;
3795 3796 3797
	u32 de_port_masked = GEN8_AUX_CHANNEL_A;
	u32 de_port_enables;
	enum pipe pipe;
3798

3799
	if (INTEL_INFO(dev_priv)->gen >= 9) {
3800 3801
		de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
				  GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3802 3803
		de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
				  GEN9_AUX_CHANNEL_D;
S
Shashank Sharma 已提交
3804
		if (IS_BROXTON(dev_priv))
3805 3806
			de_port_masked |= BXT_DE_PORT_GMBUS;
	} else {
3807 3808
		de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
				  GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3809
	}
3810 3811 3812 3813

	de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
					   GEN8_PIPE_FIFO_UNDERRUN;

3814
	de_port_enables = de_port_masked;
3815 3816 3817
	if (IS_BROXTON(dev_priv))
		de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
	else if (IS_BROADWELL(dev_priv))
3818 3819
		de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;

3820 3821 3822
	dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
	dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
	dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3823

3824
	for_each_pipe(dev_priv, pipe)
3825
		if (intel_display_power_is_enabled(dev_priv,
3826 3827 3828 3829
				POWER_DOMAIN_PIPE(pipe)))
			GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
					  dev_priv->de_irq_mask[pipe],
					  de_pipe_enables);
3830

3831
	GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
3832 3833 3834 3835 3836 3837
}

static int gen8_irq_postinstall(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;

3838 3839
	if (HAS_PCH_SPLIT(dev))
		ibx_irq_pre_postinstall(dev);
P
Paulo Zanoni 已提交
3840

3841 3842 3843
	gen8_gt_irq_postinstall(dev_priv);
	gen8_de_irq_postinstall(dev_priv);

3844 3845
	if (HAS_PCH_SPLIT(dev))
		ibx_irq_postinstall(dev);
3846

3847
	I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3848 3849 3850 3851 3852
	POSTING_READ(GEN8_MASTER_IRQ);

	return 0;
}

3853 3854 3855 3856 3857 3858
static int cherryview_irq_postinstall(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;

	gen8_gt_irq_postinstall(dev_priv);

3859
	spin_lock_irq(&dev_priv->irq_lock);
3860 3861
	if (dev_priv->display_irqs_enabled)
		vlv_display_irq_postinstall(dev_priv);
3862 3863
	spin_unlock_irq(&dev_priv->irq_lock);

3864
	I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3865 3866 3867 3868 3869
	POSTING_READ(GEN8_MASTER_IRQ);

	return 0;
}

3870 3871 3872 3873 3874 3875 3876
static void gen8_irq_uninstall(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;

	if (!dev_priv)
		return;

P
Paulo Zanoni 已提交
3877
	gen8_irq_reset(dev);
3878 3879
}

J
Jesse Barnes 已提交
3880 3881
static void valleyview_irq_uninstall(struct drm_device *dev)
{
3882
	struct drm_i915_private *dev_priv = dev->dev_private;
J
Jesse Barnes 已提交
3883 3884 3885 3886

	if (!dev_priv)
		return;

3887
	I915_WRITE(VLV_MASTER_IER, 0);
3888
	POSTING_READ(VLV_MASTER_IER);
3889

3890 3891
	gen5_gt_irq_reset(dev);

J
Jesse Barnes 已提交
3892
	I915_WRITE(HWSTAM, 0xffffffff);
3893

3894
	spin_lock_irq(&dev_priv->irq_lock);
3895 3896
	if (dev_priv->display_irqs_enabled)
		vlv_display_irq_reset(dev_priv);
3897
	spin_unlock_irq(&dev_priv->irq_lock);
J
Jesse Barnes 已提交
3898 3899
}

3900 3901 3902 3903 3904 3905 3906 3907 3908 3909
static void cherryview_irq_uninstall(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;

	if (!dev_priv)
		return;

	I915_WRITE(GEN8_MASTER_IRQ, 0);
	POSTING_READ(GEN8_MASTER_IRQ);

3910
	gen8_gt_irq_reset(dev_priv);
3911

3912
	GEN5_IRQ_RESET(GEN8_PCU_);
3913

3914
	spin_lock_irq(&dev_priv->irq_lock);
3915 3916
	if (dev_priv->display_irqs_enabled)
		vlv_display_irq_reset(dev_priv);
3917
	spin_unlock_irq(&dev_priv->irq_lock);
3918 3919
}

3920
static void ironlake_irq_uninstall(struct drm_device *dev)
3921
{
3922
	struct drm_i915_private *dev_priv = dev->dev_private;
3923 3924 3925 3926

	if (!dev_priv)
		return;

P
Paulo Zanoni 已提交
3927
	ironlake_irq_reset(dev);
3928 3929
}

3930
static void i8xx_irq_preinstall(struct drm_device * dev)
L
Linus Torvalds 已提交
3931
{
3932
	struct drm_i915_private *dev_priv = dev->dev_private;
3933
	int pipe;
3934

3935
	for_each_pipe(dev_priv, pipe)
3936
		I915_WRITE(PIPESTAT(pipe), 0);
3937 3938 3939
	I915_WRITE16(IMR, 0xffff);
	I915_WRITE16(IER, 0x0);
	POSTING_READ16(IER);
C
Chris Wilson 已提交
3940 3941 3942 3943
}

static int i8xx_irq_postinstall(struct drm_device *dev)
{
3944
	struct drm_i915_private *dev_priv = dev->dev_private;
C
Chris Wilson 已提交
3945 3946 3947 3948 3949 3950 3951 3952 3953

	I915_WRITE16(EMR,
		     ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));

	/* Unmask the interrupts that we always want on. */
	dev_priv->irq_mask =
		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3954
		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
C
Chris Wilson 已提交
3955 3956 3957 3958 3959 3960 3961 3962
	I915_WRITE16(IMR, dev_priv->irq_mask);

	I915_WRITE16(IER,
		     I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		     I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
		     I915_USER_INTERRUPT);
	POSTING_READ16(IER);

3963 3964
	/* Interrupt setup is already guaranteed to be single-threaded, this is
	 * just to make the assert_spin_locked check happy. */
3965
	spin_lock_irq(&dev_priv->irq_lock);
3966 3967
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3968
	spin_unlock_irq(&dev_priv->irq_lock);
3969

C
Chris Wilson 已提交
3970 3971 3972
	return 0;
}

3973
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
C
Chris Wilson 已提交
3974
{
3975
	struct drm_device *dev = arg;
3976
	struct drm_i915_private *dev_priv = dev->dev_private;
C
Chris Wilson 已提交
3977 3978 3979 3980 3981 3982
	u16 iir, new_iir;
	u32 pipe_stats[2];
	int pipe;
	u16 flip_mask =
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3983
	irqreturn_t ret;
C
Chris Wilson 已提交
3984

3985 3986 3987
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

3988 3989 3990 3991
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
	disable_rpm_wakeref_asserts(dev_priv);

	ret = IRQ_NONE;
C
Chris Wilson 已提交
3992 3993
	iir = I915_READ16(IIR);
	if (iir == 0)
3994
		goto out;
C
Chris Wilson 已提交
3995 3996 3997 3998 3999 4000 4001

	while (iir & ~flip_mask) {
		/* Can't rely on pipestat interrupt bit in iir as it might
		 * have been cleared after the pipestat interrupt was received.
		 * It doesn't set the bit in iir again, but it still produces
		 * interrupts (for non-MSI).
		 */
4002
		spin_lock(&dev_priv->irq_lock);
C
Chris Wilson 已提交
4003
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4004
			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
C
Chris Wilson 已提交
4005

4006
		for_each_pipe(dev_priv, pipe) {
4007
			i915_reg_t reg = PIPESTAT(pipe);
C
Chris Wilson 已提交
4008 4009 4010 4011 4012
			pipe_stats[pipe] = I915_READ(reg);

			/*
			 * Clear the PIPE*STAT regs before the IIR
			 */
4013
			if (pipe_stats[pipe] & 0x8000ffff)
C
Chris Wilson 已提交
4014 4015
				I915_WRITE(reg, pipe_stats[pipe]);
		}
4016
		spin_unlock(&dev_priv->irq_lock);
C
Chris Wilson 已提交
4017 4018 4019 4020 4021

		I915_WRITE16(IIR, iir & ~flip_mask);
		new_iir = I915_READ16(IIR); /* Flush posted writes */

		if (iir & I915_USER_INTERRUPT)
4022
			notify_ring(&dev_priv->engine[RCS]);
C
Chris Wilson 已提交
4023

4024
		for_each_pipe(dev_priv, pipe) {
4025 4026
			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
				intel_pipe_handle_vblank(dev_priv, pipe);
C
Chris Wilson 已提交
4027

4028
			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4029
				i9xx_pipe_crc_irq_handler(dev_priv, pipe);
4030

4031 4032 4033
			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
				intel_cpu_fifo_underrun_irq_handler(dev_priv,
								    pipe);
4034
		}
C
Chris Wilson 已提交
4035 4036 4037

		iir = new_iir;
	}
4038 4039 4040 4041
	ret = IRQ_HANDLED;

out:
	enable_rpm_wakeref_asserts(dev_priv);
C
Chris Wilson 已提交
4042

4043
	return ret;
C
Chris Wilson 已提交
4044 4045 4046 4047
}

static void i8xx_irq_uninstall(struct drm_device * dev)
{
4048
	struct drm_i915_private *dev_priv = dev->dev_private;
C
Chris Wilson 已提交
4049 4050
	int pipe;

4051
	for_each_pipe(dev_priv, pipe) {
C
Chris Wilson 已提交
4052 4053 4054 4055 4056 4057 4058 4059 4060
		/* Clear enable bits; then clear status bits */
		I915_WRITE(PIPESTAT(pipe), 0);
		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
	}
	I915_WRITE16(IMR, 0xffff);
	I915_WRITE16(IER, 0x0);
	I915_WRITE16(IIR, I915_READ16(IIR));
}

4061 4062
static void i915_irq_preinstall(struct drm_device * dev)
{
4063
	struct drm_i915_private *dev_priv = dev->dev_private;
4064 4065 4066
	int pipe;

	if (I915_HAS_HOTPLUG(dev)) {
4067
		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4068 4069 4070
		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
	}

4071
	I915_WRITE16(HWSTAM, 0xeffe);
4072
	for_each_pipe(dev_priv, pipe)
4073 4074 4075 4076 4077 4078 4079 4080
		I915_WRITE(PIPESTAT(pipe), 0);
	I915_WRITE(IMR, 0xffffffff);
	I915_WRITE(IER, 0x0);
	POSTING_READ(IER);
}

static int i915_irq_postinstall(struct drm_device *dev)
{
4081
	struct drm_i915_private *dev_priv = dev->dev_private;
4082
	u32 enable_mask;
4083

4084 4085 4086 4087 4088 4089 4090 4091
	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));

	/* Unmask the interrupts that we always want on. */
	dev_priv->irq_mask =
		~(I915_ASLE_INTERRUPT |
		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4092
		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4093 4094 4095 4096 4097 4098 4099

	enable_mask =
		I915_ASLE_INTERRUPT |
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
		I915_USER_INTERRUPT;

4100
	if (I915_HAS_HOTPLUG(dev)) {
4101
		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4102 4103
		POSTING_READ(PORT_HOTPLUG_EN);

4104 4105 4106 4107 4108 4109 4110 4111 4112 4113
		/* Enable in IER... */
		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
		/* and unmask in IMR */
		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
	}

	I915_WRITE(IMR, dev_priv->irq_mask);
	I915_WRITE(IER, enable_mask);
	POSTING_READ(IER);

4114
	i915_enable_asle_pipestat(dev_priv);
4115

4116 4117
	/* Interrupt setup is already guaranteed to be single-threaded, this is
	 * just to make the assert_spin_locked check happy. */
4118
	spin_lock_irq(&dev_priv->irq_lock);
4119 4120
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4121
	spin_unlock_irq(&dev_priv->irq_lock);
4122

4123 4124 4125
	return 0;
}

4126
static irqreturn_t i915_irq_handler(int irq, void *arg)
4127
{
4128
	struct drm_device *dev = arg;
4129
	struct drm_i915_private *dev_priv = dev->dev_private;
4130
	u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
4131 4132 4133 4134
	u32 flip_mask =
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
	int pipe, ret = IRQ_NONE;
4135

4136 4137 4138
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

4139 4140 4141
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
	disable_rpm_wakeref_asserts(dev_priv);

4142
	iir = I915_READ(IIR);
4143 4144
	do {
		bool irq_received = (iir & ~flip_mask) != 0;
4145
		bool blc_event = false;
4146 4147 4148 4149 4150 4151

		/* Can't rely on pipestat interrupt bit in iir as it might
		 * have been cleared after the pipestat interrupt was received.
		 * It doesn't set the bit in iir again, but it still produces
		 * interrupts (for non-MSI).
		 */
4152
		spin_lock(&dev_priv->irq_lock);
4153
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4154
			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4155

4156
		for_each_pipe(dev_priv, pipe) {
4157
			i915_reg_t reg = PIPESTAT(pipe);
4158 4159
			pipe_stats[pipe] = I915_READ(reg);

4160
			/* Clear the PIPE*STAT regs before the IIR */
4161 4162
			if (pipe_stats[pipe] & 0x8000ffff) {
				I915_WRITE(reg, pipe_stats[pipe]);
4163
				irq_received = true;
4164 4165
			}
		}
4166
		spin_unlock(&dev_priv->irq_lock);
4167 4168 4169 4170 4171

		if (!irq_received)
			break;

		/* Consume port.  Then clear IIR or we'll miss events */
4172
		if (I915_HAS_HOTPLUG(dev_priv) &&
4173 4174 4175
		    iir & I915_DISPLAY_PORT_INTERRUPT) {
			u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
			if (hotplug_status)
4176
				i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4177
		}
4178

4179
		I915_WRITE(IIR, iir & ~flip_mask);
4180 4181 4182
		new_iir = I915_READ(IIR); /* Flush posted writes */

		if (iir & I915_USER_INTERRUPT)
4183
			notify_ring(&dev_priv->engine[RCS]);
4184

4185
		for_each_pipe(dev_priv, pipe) {
4186 4187
			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
				intel_pipe_handle_vblank(dev_priv, pipe);
4188 4189 4190

			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
				blc_event = true;
4191 4192

			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4193
				i9xx_pipe_crc_irq_handler(dev_priv, pipe);
4194

4195 4196 4197
			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
				intel_cpu_fifo_underrun_irq_handler(dev_priv,
								    pipe);
4198 4199 4200
		}

		if (blc_event || (iir & I915_ASLE_INTERRUPT))
4201
			intel_opregion_asle_intr(dev_priv);
4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217

		/* With MSI, interrupts are only generated when iir
		 * transitions from zero to nonzero.  If another bit got
		 * set while we were handling the existing iir bits, then
		 * we would never get another interrupt.
		 *
		 * This is fine on non-MSI as well, as if we hit this path
		 * we avoid exiting the interrupt handler only to generate
		 * another one.
		 *
		 * Note that for MSI this could cause a stray interrupt report
		 * if an interrupt landed in the time between writing IIR and
		 * the posting read.  This should be rare enough to never
		 * trigger the 99% of 100,000 interrupts test for disabling
		 * stray interrupts.
		 */
4218
		ret = IRQ_HANDLED;
4219
		iir = new_iir;
4220
	} while (iir & ~flip_mask);
4221

4222 4223
	enable_rpm_wakeref_asserts(dev_priv);

4224 4225 4226 4227 4228
	return ret;
}

static void i915_irq_uninstall(struct drm_device * dev)
{
4229
	struct drm_i915_private *dev_priv = dev->dev_private;
4230 4231 4232
	int pipe;

	if (I915_HAS_HOTPLUG(dev)) {
4233
		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4234 4235 4236
		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
	}

4237
	I915_WRITE16(HWSTAM, 0xffff);
4238
	for_each_pipe(dev_priv, pipe) {
4239
		/* Clear enable bits; then clear status bits */
4240
		I915_WRITE(PIPESTAT(pipe), 0);
4241 4242
		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
	}
4243 4244 4245 4246 4247 4248 4249 4250
	I915_WRITE(IMR, 0xffffffff);
	I915_WRITE(IER, 0x0);

	I915_WRITE(IIR, I915_READ(IIR));
}

static void i965_irq_preinstall(struct drm_device * dev)
{
4251
	struct drm_i915_private *dev_priv = dev->dev_private;
4252 4253
	int pipe;

4254
	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4255
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4256 4257

	I915_WRITE(HWSTAM, 0xeffe);
4258
	for_each_pipe(dev_priv, pipe)
4259 4260 4261 4262 4263 4264 4265 4266
		I915_WRITE(PIPESTAT(pipe), 0);
	I915_WRITE(IMR, 0xffffffff);
	I915_WRITE(IER, 0x0);
	POSTING_READ(IER);
}

static int i965_irq_postinstall(struct drm_device *dev)
{
4267
	struct drm_i915_private *dev_priv = dev->dev_private;
4268
	u32 enable_mask;
4269 4270 4271
	u32 error_mask;

	/* Unmask the interrupts that we always want on. */
4272
	dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
4273
			       I915_DISPLAY_PORT_INTERRUPT |
4274 4275 4276 4277 4278 4279 4280
			       I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
			       I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
			       I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
			       I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
			       I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);

	enable_mask = ~dev_priv->irq_mask;
4281 4282
	enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
			 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4283 4284
	enable_mask |= I915_USER_INTERRUPT;

4285
	if (IS_G4X(dev_priv))
4286
		enable_mask |= I915_BSD_USER_INTERRUPT;
4287

4288 4289
	/* Interrupt setup is already guaranteed to be single-threaded, this is
	 * just to make the assert_spin_locked check happy. */
4290
	spin_lock_irq(&dev_priv->irq_lock);
4291 4292 4293
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4294
	spin_unlock_irq(&dev_priv->irq_lock);
4295 4296 4297 4298 4299

	/*
	 * Enable some error detection, note the instruction error mask
	 * bit is reserved, so we leave it masked.
	 */
4300
	if (IS_G4X(dev_priv)) {
4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314
		error_mask = ~(GM45_ERROR_PAGE_TABLE |
			       GM45_ERROR_MEM_PRIV |
			       GM45_ERROR_CP_PRIV |
			       I915_ERROR_MEMORY_REFRESH);
	} else {
		error_mask = ~(I915_ERROR_PAGE_TABLE |
			       I915_ERROR_MEMORY_REFRESH);
	}
	I915_WRITE(EMR, error_mask);

	I915_WRITE(IMR, dev_priv->irq_mask);
	I915_WRITE(IER, enable_mask);
	POSTING_READ(IER);

4315
	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4316 4317
	POSTING_READ(PORT_HOTPLUG_EN);

4318
	i915_enable_asle_pipestat(dev_priv);
4319 4320 4321 4322

	return 0;
}

4323
static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
4324 4325 4326
{
	u32 hotplug_en;

4327 4328
	assert_spin_locked(&dev_priv->irq_lock);

4329 4330
	/* Note HDMI and DP share hotplug bits */
	/* enable bits are the same for all generations */
4331
	hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
4332 4333 4334 4335
	/* Programming the CRT detection parameters tends
	   to generate a spurious hotplug event about three
	   seconds later.  So just do it once.
	*/
4336
	if (IS_G4X(dev_priv))
4337 4338 4339 4340
		hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
	hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;

	/* Ignore TV since it's buggy */
4341
	i915_hotplug_interrupt_update_locked(dev_priv,
4342 4343 4344 4345
					     HOTPLUG_INT_EN_MASK |
					     CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
					     CRT_HOTPLUG_ACTIVATION_PERIOD_64,
					     hotplug_en);
4346 4347
}

4348
static irqreturn_t i965_irq_handler(int irq, void *arg)
4349
{
4350
	struct drm_device *dev = arg;
4351
	struct drm_i915_private *dev_priv = dev->dev_private;
4352 4353 4354
	u32 iir, new_iir;
	u32 pipe_stats[I915_MAX_PIPES];
	int ret = IRQ_NONE, pipe;
4355 4356 4357
	u32 flip_mask =
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4358

4359 4360 4361
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

4362 4363 4364
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
	disable_rpm_wakeref_asserts(dev_priv);

4365 4366 4367
	iir = I915_READ(IIR);

	for (;;) {
4368
		bool irq_received = (iir & ~flip_mask) != 0;
4369 4370
		bool blc_event = false;

4371 4372 4373 4374 4375
		/* Can't rely on pipestat interrupt bit in iir as it might
		 * have been cleared after the pipestat interrupt was received.
		 * It doesn't set the bit in iir again, but it still produces
		 * interrupts (for non-MSI).
		 */
4376
		spin_lock(&dev_priv->irq_lock);
4377
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4378
			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4379

4380
		for_each_pipe(dev_priv, pipe) {
4381
			i915_reg_t reg = PIPESTAT(pipe);
4382 4383 4384 4385 4386 4387 4388
			pipe_stats[pipe] = I915_READ(reg);

			/*
			 * Clear the PIPE*STAT regs before the IIR
			 */
			if (pipe_stats[pipe] & 0x8000ffff) {
				I915_WRITE(reg, pipe_stats[pipe]);
4389
				irq_received = true;
4390 4391
			}
		}
4392
		spin_unlock(&dev_priv->irq_lock);
4393 4394 4395 4396 4397 4398 4399

		if (!irq_received)
			break;

		ret = IRQ_HANDLED;

		/* Consume port.  Then clear IIR or we'll miss events */
4400 4401 4402
		if (iir & I915_DISPLAY_PORT_INTERRUPT) {
			u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
			if (hotplug_status)
4403
				i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4404
		}
4405

4406
		I915_WRITE(IIR, iir & ~flip_mask);
4407 4408 4409
		new_iir = I915_READ(IIR); /* Flush posted writes */

		if (iir & I915_USER_INTERRUPT)
4410
			notify_ring(&dev_priv->engine[RCS]);
4411
		if (iir & I915_BSD_USER_INTERRUPT)
4412
			notify_ring(&dev_priv->engine[VCS]);
4413

4414
		for_each_pipe(dev_priv, pipe) {
4415 4416
			if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
				intel_pipe_handle_vblank(dev_priv, pipe);
4417 4418 4419

			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
				blc_event = true;
4420 4421

			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4422
				i9xx_pipe_crc_irq_handler(dev_priv, pipe);
4423

4424 4425
			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
				intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
4426
		}
4427 4428

		if (blc_event || (iir & I915_ASLE_INTERRUPT))
4429
			intel_opregion_asle_intr(dev_priv);
4430

4431
		if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4432
			gmbus_irq_handler(dev_priv);
4433

4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451
		/* With MSI, interrupts are only generated when iir
		 * transitions from zero to nonzero.  If another bit got
		 * set while we were handling the existing iir bits, then
		 * we would never get another interrupt.
		 *
		 * This is fine on non-MSI as well, as if we hit this path
		 * we avoid exiting the interrupt handler only to generate
		 * another one.
		 *
		 * Note that for MSI this could cause a stray interrupt report
		 * if an interrupt landed in the time between writing IIR and
		 * the posting read.  This should be rare enough to never
		 * trigger the 99% of 100,000 interrupts test for disabling
		 * stray interrupts.
		 */
		iir = new_iir;
	}

4452 4453
	enable_rpm_wakeref_asserts(dev_priv);

4454 4455 4456 4457 4458
	return ret;
}

static void i965_irq_uninstall(struct drm_device * dev)
{
4459
	struct drm_i915_private *dev_priv = dev->dev_private;
4460 4461 4462 4463 4464
	int pipe;

	if (!dev_priv)
		return;

4465
	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4466
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4467 4468

	I915_WRITE(HWSTAM, 0xffffffff);
4469
	for_each_pipe(dev_priv, pipe)
4470 4471 4472 4473
		I915_WRITE(PIPESTAT(pipe), 0);
	I915_WRITE(IMR, 0xffffffff);
	I915_WRITE(IER, 0x0);

4474
	for_each_pipe(dev_priv, pipe)
4475 4476 4477 4478 4479
		I915_WRITE(PIPESTAT(pipe),
			   I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
	I915_WRITE(IIR, I915_READ(IIR));
}

4480 4481 4482 4483 4484 4485 4486
/**
 * intel_irq_init - initializes irq support
 * @dev_priv: i915 device instance
 *
 * This function initializes all the irq support including work items, timers
 * and all the vtables. It does not setup the interrupt itself though.
 */
4487
void intel_irq_init(struct drm_i915_private *dev_priv)
4488
{
4489
	struct drm_device *dev = dev_priv->dev;
4490

4491 4492
	intel_hpd_init_work(dev_priv);

4493
	INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
4494
	INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4495

4496
	/* Let's track the enabled rps events */
4497
	if (IS_VALLEYVIEW(dev_priv))
4498
		/* WaGsvRC0ResidencyMethod:vlv */
4499
		dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
4500 4501
	else
		dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4502

4503 4504
	INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
			  i915_hangcheck_elapsed);
4505

4506
	if (IS_GEN2(dev_priv)) {
4507 4508
		dev->max_vblank_count = 0;
		dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
4509
	} else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
4510
		dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4511
		dev->driver->get_vblank_counter = g4x_get_vblank_counter;
4512 4513 4514
	} else {
		dev->driver->get_vblank_counter = i915_get_vblank_counter;
		dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4515 4516
	}

4517 4518 4519 4520 4521
	/*
	 * Opt out of the vblank disable timer on everything except gen2.
	 * Gen2 doesn't have a hardware frame counter and so depends on
	 * vblank interrupts to produce sane vblank seuquence numbers.
	 */
4522
	if (!IS_GEN2(dev_priv))
4523 4524
		dev->vblank_disable_immediate = true;

4525 4526
	dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
	dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4527

4528
	if (IS_CHERRYVIEW(dev_priv)) {
4529 4530 4531 4532 4533 4534 4535
		dev->driver->irq_handler = cherryview_irq_handler;
		dev->driver->irq_preinstall = cherryview_irq_preinstall;
		dev->driver->irq_postinstall = cherryview_irq_postinstall;
		dev->driver->irq_uninstall = cherryview_irq_uninstall;
		dev->driver->enable_vblank = valleyview_enable_vblank;
		dev->driver->disable_vblank = valleyview_disable_vblank;
		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4536
	} else if (IS_VALLEYVIEW(dev_priv)) {
J
Jesse Barnes 已提交
4537 4538 4539 4540 4541 4542
		dev->driver->irq_handler = valleyview_irq_handler;
		dev->driver->irq_preinstall = valleyview_irq_preinstall;
		dev->driver->irq_postinstall = valleyview_irq_postinstall;
		dev->driver->irq_uninstall = valleyview_irq_uninstall;
		dev->driver->enable_vblank = valleyview_enable_vblank;
		dev->driver->disable_vblank = valleyview_disable_vblank;
4543
		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4544
	} else if (INTEL_INFO(dev_priv)->gen >= 8) {
4545
		dev->driver->irq_handler = gen8_irq_handler;
4546
		dev->driver->irq_preinstall = gen8_irq_reset;
4547 4548 4549 4550
		dev->driver->irq_postinstall = gen8_irq_postinstall;
		dev->driver->irq_uninstall = gen8_irq_uninstall;
		dev->driver->enable_vblank = gen8_enable_vblank;
		dev->driver->disable_vblank = gen8_disable_vblank;
4551
		if (IS_BROXTON(dev))
4552
			dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
4553 4554 4555
		else if (HAS_PCH_SPT(dev))
			dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
		else
4556
			dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4557 4558
	} else if (HAS_PCH_SPLIT(dev)) {
		dev->driver->irq_handler = ironlake_irq_handler;
4559
		dev->driver->irq_preinstall = ironlake_irq_reset;
4560 4561 4562 4563
		dev->driver->irq_postinstall = ironlake_irq_postinstall;
		dev->driver->irq_uninstall = ironlake_irq_uninstall;
		dev->driver->enable_vblank = ironlake_enable_vblank;
		dev->driver->disable_vblank = ironlake_disable_vblank;
4564
		dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4565
	} else {
4566
		if (IS_GEN2(dev_priv)) {
C
Chris Wilson 已提交
4567 4568 4569 4570
			dev->driver->irq_preinstall = i8xx_irq_preinstall;
			dev->driver->irq_postinstall = i8xx_irq_postinstall;
			dev->driver->irq_handler = i8xx_irq_handler;
			dev->driver->irq_uninstall = i8xx_irq_uninstall;
4571
		} else if (IS_GEN3(dev_priv)) {
4572 4573 4574 4575
			dev->driver->irq_preinstall = i915_irq_preinstall;
			dev->driver->irq_postinstall = i915_irq_postinstall;
			dev->driver->irq_uninstall = i915_irq_uninstall;
			dev->driver->irq_handler = i915_irq_handler;
C
Chris Wilson 已提交
4576
		} else {
4577 4578 4579 4580
			dev->driver->irq_preinstall = i965_irq_preinstall;
			dev->driver->irq_postinstall = i965_irq_postinstall;
			dev->driver->irq_uninstall = i965_irq_uninstall;
			dev->driver->irq_handler = i965_irq_handler;
C
Chris Wilson 已提交
4581
		}
4582 4583
		if (I915_HAS_HOTPLUG(dev_priv))
			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4584 4585 4586 4587
		dev->driver->enable_vblank = i915_enable_vblank;
		dev->driver->disable_vblank = i915_disable_vblank;
	}
}
4588

4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599
/**
 * intel_irq_install - enables the hardware interrupt
 * @dev_priv: i915 device instance
 *
 * This function enables the hardware interrupt handling, but leaves the hotplug
 * handling still disabled. It is called after intel_irq_init().
 *
 * In the driver load and resume code we need working interrupts in a few places
 * but don't want to deal with the hassle of concurrent probe and hotplug
 * workers. Hence the split into this two-stage approach.
 */
4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611
int intel_irq_install(struct drm_i915_private *dev_priv)
{
	/*
	 * We enable some interrupt sources in our postinstall hooks, so mark
	 * interrupts as enabled _before_ actually enabling them to avoid
	 * special cases in our ordering checks.
	 */
	dev_priv->pm.irqs_enabled = true;

	return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
}

4612 4613 4614 4615 4616 4617 4618
/**
 * intel_irq_uninstall - finilizes all irq handling
 * @dev_priv: i915 device instance
 *
 * This stops interrupt and hotplug handling and unregisters and frees all
 * resources acquired in the init functions.
 */
4619 4620 4621 4622 4623 4624 4625
void intel_irq_uninstall(struct drm_i915_private *dev_priv)
{
	drm_irq_uninstall(dev_priv->dev);
	intel_hpd_cancel_work(dev_priv);
	dev_priv->pm.irqs_enabled = false;
}

4626 4627 4628 4629 4630 4631 4632
/**
 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
 * @dev_priv: i915 device instance
 *
 * This function is used to disable interrupts at runtime, both in the runtime
 * pm and the system suspend/resume code.
 */
4633
void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4634
{
4635
	dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
4636
	dev_priv->pm.irqs_enabled = false;
4637
	synchronize_irq(dev_priv->dev->irq);
4638 4639
}

4640 4641 4642 4643 4644 4645 4646
/**
 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
 * @dev_priv: i915 device instance
 *
 * This function is used to enable interrupts at runtime, both in the runtime
 * pm and the system suspend/resume code.
 */
4647
void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4648
{
4649
	dev_priv->pm.irqs_enabled = true;
4650 4651
	dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
	dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
4652
}