i915_irq.c 130.5 KB
Newer Older
D
Dave Airlie 已提交
1
/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
L
Linus Torvalds 已提交
2
 */
D
Dave Airlie 已提交
3
/*
L
Linus Torvalds 已提交
4 5
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
 * All Rights Reserved.
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
D
Dave Airlie 已提交
27
 */
L
Linus Torvalds 已提交
28

29 30
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

31
#include <linux/sysrq.h>
32
#include <linux/slab.h>
33
#include <linux/circ_buf.h>
34 35
#include <drm/drmP.h>
#include <drm/i915_drm.h>
L
Linus Torvalds 已提交
36
#include "i915_drv.h"
C
Chris Wilson 已提交
37
#include "i915_trace.h"
J
Jesse Barnes 已提交
38
#include "intel_drv.h"
L
Linus Torvalds 已提交
39

40 41 42 43 44 45 46 47
/**
 * DOC: interrupt handling
 *
 * These functions provide the basic support for enabling and disabling the
 * interrupt handling support. There's a lot more functionality in i915_irq.c
 * and related files, but that will be described in separate chapters.
 */

48 49 50 51
static const u32 hpd_ilk[HPD_NUM_PINS] = {
	[HPD_PORT_A] = DE_DP_A_HOTPLUG,
};

52 53 54 55
static const u32 hpd_ivb[HPD_NUM_PINS] = {
	[HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
};

56 57 58 59
static const u32 hpd_bdw[HPD_NUM_PINS] = {
	[HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
};

60
static const u32 hpd_ibx[HPD_NUM_PINS] = {
61 62 63 64 65 66 67
	[HPD_CRT] = SDE_CRT_HOTPLUG,
	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG
};

68
static const u32 hpd_cpt[HPD_NUM_PINS] = {
69
	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
70
	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
71 72 73 74 75
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
};

X
Xiong Zhang 已提交
76
static const u32 hpd_spt[HPD_NUM_PINS] = {
77
	[HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
X
Xiong Zhang 已提交
78 79 80 81 82 83
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
	[HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
};

84
static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
85 86 87 88 89 90 91 92
	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
};

93
static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
94 95 96 97 98 99 100 101
	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
};

102
static const u32 hpd_status_i915[HPD_NUM_PINS] = {
103 104 105 106 107 108 109 110
	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
};

111 112
/* BXT hpd list */
static const u32 hpd_bxt[HPD_NUM_PINS] = {
113
	[HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
114 115 116 117
	[HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
	[HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
};

118
/* IIR can theoretically queue up two events. Be paranoid. */
119
#define GEN8_IRQ_RESET_NDX(type, which) do { \
120 121 122 123 124 125 126 127 128
	I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
	POSTING_READ(GEN8_##type##_IMR(which)); \
	I915_WRITE(GEN8_##type##_IER(which), 0); \
	I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
	POSTING_READ(GEN8_##type##_IIR(which)); \
	I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
	POSTING_READ(GEN8_##type##_IIR(which)); \
} while (0)

129
#define GEN5_IRQ_RESET(type) do { \
P
Paulo Zanoni 已提交
130
	I915_WRITE(type##IMR, 0xffffffff); \
131
	POSTING_READ(type##IMR); \
P
Paulo Zanoni 已提交
132
	I915_WRITE(type##IER, 0); \
133 134 135 136
	I915_WRITE(type##IIR, 0xffffffff); \
	POSTING_READ(type##IIR); \
	I915_WRITE(type##IIR, 0xffffffff); \
	POSTING_READ(type##IIR); \
P
Paulo Zanoni 已提交
137 138
} while (0)

139 140 141
/*
 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
 */
142 143
static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv,
				    i915_reg_t reg)
144 145 146 147 148 149 150
{
	u32 val = I915_READ(reg);

	if (val == 0)
		return;

	WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
151
	     i915_mmio_reg_offset(reg), val);
152 153 154 155 156
	I915_WRITE(reg, 0xffffffff);
	POSTING_READ(reg);
	I915_WRITE(reg, 0xffffffff);
	POSTING_READ(reg);
}
157

P
Paulo Zanoni 已提交
158
#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
159
	gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
P
Paulo Zanoni 已提交
160
	I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
161 162
	I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
	POSTING_READ(GEN8_##type##_IMR(which)); \
P
Paulo Zanoni 已提交
163 164 165
} while (0)

#define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
166
	gen5_assert_iir_is_zero(dev_priv, type##IIR); \
P
Paulo Zanoni 已提交
167
	I915_WRITE(type##IER, (ier_val)); \
168 169
	I915_WRITE(type##IMR, (imr_val)); \
	POSTING_READ(type##IMR); \
P
Paulo Zanoni 已提交
170 171
} while (0)

172 173
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);

174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211
/* For display hotplug interrupt */
static inline void
i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
				     uint32_t mask,
				     uint32_t bits)
{
	uint32_t val;

	assert_spin_locked(&dev_priv->irq_lock);
	WARN_ON(bits & ~mask);

	val = I915_READ(PORT_HOTPLUG_EN);
	val &= ~mask;
	val |= bits;
	I915_WRITE(PORT_HOTPLUG_EN, val);
}

/**
 * i915_hotplug_interrupt_update - update hotplug interrupt enable
 * @dev_priv: driver private
 * @mask: bits to update
 * @bits: bits to enable
 * NOTE: the HPD enable bits are modified both inside and outside
 * of an interrupt context. To avoid that read-modify-write cycles
 * interfer, these bits are protected by a spinlock. Since this
 * function is usually not called from a context where the lock is
 * held already, this function acquires the lock itself. A non-locking
 * version is also available.
 */
void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
				   uint32_t mask,
				   uint32_t bits)
{
	spin_lock_irq(&dev_priv->irq_lock);
	i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
	spin_unlock_irq(&dev_priv->irq_lock);
}

212 213 214 215 216 217
/**
 * ilk_update_display_irq - update DEIMR
 * @dev_priv: driver private
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
218 219 220
void ilk_update_display_irq(struct drm_i915_private *dev_priv,
			    uint32_t interrupt_mask,
			    uint32_t enabled_irq_mask)
221
{
222 223
	uint32_t new_val;

224 225
	assert_spin_locked(&dev_priv->irq_lock);

226 227
	WARN_ON(enabled_irq_mask & ~interrupt_mask);

228
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
229 230
		return;

231 232 233 234 235 236
	new_val = dev_priv->irq_mask;
	new_val &= ~interrupt_mask;
	new_val |= (~enabled_irq_mask & interrupt_mask);

	if (new_val != dev_priv->irq_mask) {
		dev_priv->irq_mask = new_val;
237
		I915_WRITE(DEIMR, dev_priv->irq_mask);
238
		POSTING_READ(DEIMR);
239 240 241
	}
}

P
Paulo Zanoni 已提交
242 243 244 245 246 247 248 249 250 251 252 253
/**
 * ilk_update_gt_irq - update GTIMR
 * @dev_priv: driver private
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
			      uint32_t interrupt_mask,
			      uint32_t enabled_irq_mask)
{
	assert_spin_locked(&dev_priv->irq_lock);

254 255
	WARN_ON(enabled_irq_mask & ~interrupt_mask);

256
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
257 258
		return;

P
Paulo Zanoni 已提交
259 260 261 262 263
	dev_priv->gt_irq_mask &= ~interrupt_mask;
	dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
}

264
void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
P
Paulo Zanoni 已提交
265 266
{
	ilk_update_gt_irq(dev_priv, mask, mask);
267
	POSTING_READ_FW(GTIMR);
P
Paulo Zanoni 已提交
268 269
}

270
void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
P
Paulo Zanoni 已提交
271 272 273 274
{
	ilk_update_gt_irq(dev_priv, mask, 0);
}

275
static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
276 277 278 279
{
	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
}

280
static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv)
281 282 283 284
{
	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
}

285
static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
286 287 288 289
{
	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
}

P
Paulo Zanoni 已提交
290
/**
291 292 293 294 295
 * snb_update_pm_irq - update GEN6_PMIMR
 * @dev_priv: driver private
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
P
Paulo Zanoni 已提交
296 297 298 299
static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
			      uint32_t interrupt_mask,
			      uint32_t enabled_irq_mask)
{
300
	uint32_t new_val;
P
Paulo Zanoni 已提交
301

302 303
	WARN_ON(enabled_irq_mask & ~interrupt_mask);

P
Paulo Zanoni 已提交
304 305
	assert_spin_locked(&dev_priv->irq_lock);

306
	new_val = dev_priv->pm_irq_mask;
307 308 309
	new_val &= ~interrupt_mask;
	new_val |= (~enabled_irq_mask & interrupt_mask);

310 311
	if (new_val != dev_priv->pm_irq_mask) {
		dev_priv->pm_irq_mask = new_val;
312 313
		I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
		POSTING_READ(gen6_pm_imr(dev_priv));
314
	}
P
Paulo Zanoni 已提交
315 316
}

317
void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
P
Paulo Zanoni 已提交
318
{
319 320 321
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
		return;

P
Paulo Zanoni 已提交
322 323 324
	snb_update_pm_irq(dev_priv, mask, mask);
}

325 326
static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
				  uint32_t mask)
P
Paulo Zanoni 已提交
327 328 329 330
{
	snb_update_pm_irq(dev_priv, mask, 0);
}

331 332 333 334 335 336 337 338
void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
{
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
		return;

	__gen6_disable_pm_irq(dev_priv, mask);
}

339
void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
I
Imre Deak 已提交
340
{
341
	i915_reg_t reg = gen6_pm_iir(dev_priv);
I
Imre Deak 已提交
342 343 344 345 346

	spin_lock_irq(&dev_priv->irq_lock);
	I915_WRITE(reg, dev_priv->pm_rps_events);
	I915_WRITE(reg, dev_priv->pm_rps_events);
	POSTING_READ(reg);
347
	dev_priv->rps.pm_iir = 0;
I
Imre Deak 已提交
348 349 350
	spin_unlock_irq(&dev_priv->irq_lock);
}

351
void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
352
{
353 354 355
	if (READ_ONCE(dev_priv->rps.interrupts_enabled))
		return;

356
	spin_lock_irq(&dev_priv->irq_lock);
357 358
	WARN_ON_ONCE(dev_priv->rps.pm_iir);
	WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
I
Imre Deak 已提交
359
	dev_priv->rps.interrupts_enabled = true;
360 361
	I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
				dev_priv->pm_rps_events);
362
	gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
363

364 365 366
	spin_unlock_irq(&dev_priv->irq_lock);
}

367 368
u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
{
369
	return (mask & ~dev_priv->rps.pm_intr_keep);
370 371
}

372
void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
373
{
374 375 376
	if (!READ_ONCE(dev_priv->rps.interrupts_enabled))
		return;

I
Imre Deak 已提交
377 378
	spin_lock_irq(&dev_priv->irq_lock);
	dev_priv->rps.interrupts_enabled = false;
379

380
	I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
381 382

	__gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
383 384
	I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
				~dev_priv->pm_rps_events);
385 386

	spin_unlock_irq(&dev_priv->irq_lock);
387
	synchronize_irq(dev_priv->drm.irq);
388 389 390 391 392 393 394 395

	/* Now that we will not be generating any more work, flush any
	 * outsanding tasks. As we are called on the RPS idle path,
	 * we will reset the GPU to minimum frequencies, so the current
	 * state of the worker can be discarded.
	 */
	cancel_work_sync(&dev_priv->rps.work);
	gen6_reset_rps_interrupts(dev_priv);
396 397
}

398
/**
399 400 401 402 403
 * bdw_update_port_irq - update DE port interrupt
 * @dev_priv: driver private
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429
static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
				uint32_t interrupt_mask,
				uint32_t enabled_irq_mask)
{
	uint32_t new_val;
	uint32_t old_val;

	assert_spin_locked(&dev_priv->irq_lock);

	WARN_ON(enabled_irq_mask & ~interrupt_mask);

	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
		return;

	old_val = I915_READ(GEN8_DE_PORT_IMR);

	new_val = old_val;
	new_val &= ~interrupt_mask;
	new_val |= (~enabled_irq_mask & interrupt_mask);

	if (new_val != old_val) {
		I915_WRITE(GEN8_DE_PORT_IMR, new_val);
		POSTING_READ(GEN8_DE_PORT_IMR);
	}
}

430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461
/**
 * bdw_update_pipe_irq - update DE pipe interrupt
 * @dev_priv: driver private
 * @pipe: pipe whose interrupt to update
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
			 enum pipe pipe,
			 uint32_t interrupt_mask,
			 uint32_t enabled_irq_mask)
{
	uint32_t new_val;

	assert_spin_locked(&dev_priv->irq_lock);

	WARN_ON(enabled_irq_mask & ~interrupt_mask);

	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
		return;

	new_val = dev_priv->de_irq_mask[pipe];
	new_val &= ~interrupt_mask;
	new_val |= (~enabled_irq_mask & interrupt_mask);

	if (new_val != dev_priv->de_irq_mask[pipe]) {
		dev_priv->de_irq_mask[pipe] = new_val;
		I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
		POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
	}
}

462 463 464 465 466 467
/**
 * ibx_display_interrupt_update - update SDEIMR
 * @dev_priv: driver private
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
468 469 470
void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
				  uint32_t interrupt_mask,
				  uint32_t enabled_irq_mask)
471 472 473 474 475
{
	uint32_t sdeimr = I915_READ(SDEIMR);
	sdeimr &= ~interrupt_mask;
	sdeimr |= (~enabled_irq_mask & interrupt_mask);

476 477
	WARN_ON(enabled_irq_mask & ~interrupt_mask);

478 479
	assert_spin_locked(&dev_priv->irq_lock);

480
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
481 482
		return;

483 484 485
	I915_WRITE(SDEIMR, sdeimr);
	POSTING_READ(SDEIMR);
}
486

D
Daniel Vetter 已提交
487
static void
488 489
__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
		       u32 enable_mask, u32 status_mask)
490
{
491
	i915_reg_t reg = PIPESTAT(pipe);
492
	u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
493

494
	assert_spin_locked(&dev_priv->irq_lock);
495
	WARN_ON(!intel_irqs_enabled(dev_priv));
496

497 498 499 500
	if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
		      pipe_name(pipe), enable_mask, status_mask))
501 502 503
		return;

	if ((pipestat & enable_mask) == enable_mask)
504 505
		return;

506 507
	dev_priv->pipestat_irq_mask[pipe] |= status_mask;

508
	/* Enable the interrupt, clear any pending status */
509
	pipestat |= enable_mask | status_mask;
510 511
	I915_WRITE(reg, pipestat);
	POSTING_READ(reg);
512 513
}

D
Daniel Vetter 已提交
514
static void
515 516
__i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
		        u32 enable_mask, u32 status_mask)
517
{
518
	i915_reg_t reg = PIPESTAT(pipe);
519
	u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
520

521
	assert_spin_locked(&dev_priv->irq_lock);
522
	WARN_ON(!intel_irqs_enabled(dev_priv));
523

524 525 526 527
	if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
		      pipe_name(pipe), enable_mask, status_mask))
528 529
		return;

530 531 532
	if ((pipestat & enable_mask) == 0)
		return;

533 534
	dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;

535
	pipestat &= ~enable_mask;
536 537
	I915_WRITE(reg, pipestat);
	POSTING_READ(reg);
538 539
}

540 541 542 543 544
static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
{
	u32 enable_mask = status_mask << 16;

	/*
545 546
	 * On pipe A we don't support the PSR interrupt yet,
	 * on pipe B and C the same bit MBZ.
547 548 549
	 */
	if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
		return 0;
550 551 552 553 554 555
	/*
	 * On pipe B and C we don't support the PSR interrupt yet, on pipe
	 * A the same bit is for perf counters which we don't use either.
	 */
	if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
		return 0;
556 557 558 559 560 561 562 563 564 565 566 567

	enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
			 SPRITE0_FLIP_DONE_INT_EN_VLV |
			 SPRITE1_FLIP_DONE_INT_EN_VLV);
	if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
		enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
	if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
		enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;

	return enable_mask;
}

568 569 570 571 572 573
void
i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
		     u32 status_mask)
{
	u32 enable_mask;

574
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
575
		enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm,
576 577 578
							   status_mask);
	else
		enable_mask = status_mask << 16;
579 580 581 582 583 584 585 586 587
	__i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
}

void
i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
		      u32 status_mask)
{
	u32 enable_mask;

588
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
589
		enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm,
590 591 592
							   status_mask);
	else
		enable_mask = status_mask << 16;
593 594 595
	__i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
}

596
/**
597
 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
598
 * @dev_priv: i915 device private
599
 */
600
static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
601
{
602
	if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv))
603 604
		return;

605
	spin_lock_irq(&dev_priv->irq_lock);
606

607
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
608
	if (INTEL_GEN(dev_priv) >= 4)
609
		i915_enable_pipestat(dev_priv, PIPE_A,
610
				     PIPE_LEGACY_BLC_EVENT_STATUS);
611

612
	spin_unlock_irq(&dev_priv->irq_lock);
613 614
}

615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664
/*
 * This timing diagram depicts the video signal in and
 * around the vertical blanking period.
 *
 * Assumptions about the fictitious mode used in this example:
 *  vblank_start >= 3
 *  vsync_start = vblank_start + 1
 *  vsync_end = vblank_start + 2
 *  vtotal = vblank_start + 3
 *
 *           start of vblank:
 *           latch double buffered registers
 *           increment frame counter (ctg+)
 *           generate start of vblank interrupt (gen4+)
 *           |
 *           |          frame start:
 *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
 *           |          may be shifted forward 1-3 extra lines via PIPECONF
 *           |          |
 *           |          |  start of vsync:
 *           |          |  generate vsync interrupt
 *           |          |  |
 * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
 *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
 * ----va---> <-----------------vb--------------------> <--------va-------------
 *       |          |       <----vs----->                     |
 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
 *       |          |                                         |
 *       last visible pixel                                   first visible pixel
 *                  |                                         increment frame counter (gen3/4)
 *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
 *
 * x  = horizontal active
 * _  = horizontal blanking
 * hs = horizontal sync
 * va = vertical active
 * vb = vertical blanking
 * vs = vertical sync
 * vbs = vblank_start (number)
 *
 * Summary:
 * - most events happen at the start of horizontal sync
 * - frame start happens at the start of horizontal blank, 1-4 lines
 *   (depending on PIPECONF settings) after the start of vblank
 * - gen3/4 pixel and frame counter are synchronized with the start
 *   of horizontal active on the first line of vertical active
 */

665 666 667
/* Called from drm generic code, passed a 'crtc', which
 * we use as a pipe index
 */
668
static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
669
{
670
	struct drm_i915_private *dev_priv = to_i915(dev);
671
	i915_reg_t high_frame, low_frame;
672
	u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
673 674
	struct intel_crtc *intel_crtc =
		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
675
	const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
676

677 678 679 680 681
	htotal = mode->crtc_htotal;
	hsync_start = mode->crtc_hsync_start;
	vbl_start = mode->crtc_vblank_start;
	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
		vbl_start = DIV_ROUND_UP(vbl_start, 2);
682

683 684 685 686 687 688
	/* Convert to pixel count */
	vbl_start *= htotal;

	/* Start of vblank event occurs at start of hsync */
	vbl_start -= htotal - hsync_start;

689 690
	high_frame = PIPEFRAME(pipe);
	low_frame = PIPEFRAMEPIXEL(pipe);
691

692 693 694 695 696 697
	/*
	 * High & low register fields aren't synchronized, so make sure
	 * we get a low value that's stable across two reads of the high
	 * register.
	 */
	do {
698
		high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
699
		low   = I915_READ(low_frame);
700
		high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
701 702
	} while (high1 != high2);

703
	high1 >>= PIPE_FRAME_HIGH_SHIFT;
704
	pixel = low & PIPE_PIXEL_MASK;
705
	low >>= PIPE_FRAME_LOW_SHIFT;
706 707 708 709 710 711

	/*
	 * The frame counter increments at beginning of active.
	 * Cook up a vblank counter by also checking the pixel
	 * counter against vblank start.
	 */
712
	return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
713 714
}

715
static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
716
{
717
	struct drm_i915_private *dev_priv = to_i915(dev);
718

719
	return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
720 721
}

722
/* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
723 724 725
static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
{
	struct drm_device *dev = crtc->base.dev;
726
	struct drm_i915_private *dev_priv = to_i915(dev);
727
	const struct drm_display_mode *mode = &crtc->base.hwmode;
728
	enum pipe pipe = crtc->pipe;
729
	int position, vtotal;
730

731
	vtotal = mode->crtc_vtotal;
732 733 734
	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
		vtotal /= 2;

735
	if (IS_GEN2(dev_priv))
736
		position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
737
	else
738
		position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
739

740 741 742 743 744 745 746 747 748 749 750 751
	/*
	 * On HSW, the DSL reg (0x70000) appears to return 0 if we
	 * read it just before the start of vblank.  So try it again
	 * so we don't accidentally end up spanning a vblank frame
	 * increment, causing the pipe_update_end() code to squak at us.
	 *
	 * The nature of this problem means we can't simply check the ISR
	 * bit and return the vblank start value; nor can we use the scanline
	 * debug register in the transcoder as it appears to have the same
	 * problem.  We may need to extend this to include other platforms,
	 * but so far testing only shows the problem on HSW.
	 */
752
	if (HAS_DDI(dev_priv) && !position) {
753 754 755 756 757 758 759 760 761 762 763 764 765
		int i, temp;

		for (i = 0; i < 100; i++) {
			udelay(1);
			temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) &
				DSL_LINEMASK_GEN3;
			if (temp != position) {
				position = temp;
				break;
			}
		}
	}

766
	/*
767 768
	 * See update_scanline_offset() for the details on the
	 * scanline_offset adjustment.
769
	 */
770
	return (position + crtc->scanline_offset) % vtotal;
771 772
}

773
static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
774
				    unsigned int flags, int *vpos, int *hpos,
775 776
				    ktime_t *stime, ktime_t *etime,
				    const struct drm_display_mode *mode)
777
{
778
	struct drm_i915_private *dev_priv = to_i915(dev);
779 780
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
781
	int position;
782
	int vbl_start, vbl_end, hsync_start, htotal, vtotal;
783 784
	bool in_vbl = true;
	int ret = 0;
785
	unsigned long irqflags;
786

787
	if (WARN_ON(!mode->crtc_clock)) {
788
		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
789
				 "pipe %c\n", pipe_name(pipe));
790 791 792
		return 0;
	}

793
	htotal = mode->crtc_htotal;
794
	hsync_start = mode->crtc_hsync_start;
795 796 797
	vtotal = mode->crtc_vtotal;
	vbl_start = mode->crtc_vblank_start;
	vbl_end = mode->crtc_vblank_end;
798

799 800 801 802 803 804
	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
		vbl_start = DIV_ROUND_UP(vbl_start, 2);
		vbl_end /= 2;
		vtotal /= 2;
	}

805 806
	ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;

807 808 809 810 811 812
	/*
	 * Lock uncore.lock, as we will do multiple timing critical raw
	 * register reads, potentially with preemption disabled, so the
	 * following code must not block on uncore.lock.
	 */
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
813

814 815 816 817 818 819
	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */

	/* Get optional system timestamp before query. */
	if (stime)
		*stime = ktime_get();

820
	if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
821 822 823
		/* No obvious pixelcount register. Only query vertical
		 * scanout position from Display scan line register.
		 */
824
		position = __intel_get_crtc_scanline(intel_crtc);
825 826 827 828 829
	} else {
		/* Have access to pixelcount since start of frame.
		 * We can split this into vertical and horizontal
		 * scanout position.
		 */
830
		position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
831

832 833 834 835
		/* convert to pixel counts */
		vbl_start *= htotal;
		vbl_end *= htotal;
		vtotal *= htotal;
836

837 838 839 840 841 842 843 844 845 846 847 848
		/*
		 * In interlaced modes, the pixel counter counts all pixels,
		 * so one field will have htotal more pixels. In order to avoid
		 * the reported position from jumping backwards when the pixel
		 * counter is beyond the length of the shorter field, just
		 * clamp the position the length of the shorter field. This
		 * matches how the scanline counter based position works since
		 * the scanline counter doesn't count the two half lines.
		 */
		if (position >= vtotal)
			position = vtotal - 1;

849 850 851 852 853 854 855 856 857 858
		/*
		 * Start of vblank interrupt is triggered at start of hsync,
		 * just prior to the first active line of vblank. However we
		 * consider lines to start at the leading edge of horizontal
		 * active. So, should we get here before we've crossed into
		 * the horizontal active of the first line in vblank, we would
		 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
		 * always add htotal-hsync_start to the current pixel position.
		 */
		position = (position + htotal - hsync_start) % vtotal;
859 860
	}

861 862 863 864 865 866 867 868
	/* Get optional system timestamp after query. */
	if (etime)
		*etime = ktime_get();

	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */

	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);

869 870 871 872 873 874 875 876 877 878 879 880
	in_vbl = position >= vbl_start && position < vbl_end;

	/*
	 * While in vblank, position will be negative
	 * counting up towards 0 at vbl_end. And outside
	 * vblank, position will be positive counting
	 * up since vbl_end.
	 */
	if (position >= vbl_start)
		position -= vbl_end;
	else
		position += vtotal - vbl_end;
881

882
	if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
883 884 885 886 887 888
		*vpos = position;
		*hpos = 0;
	} else {
		*vpos = position / htotal;
		*hpos = position - (*vpos * htotal);
	}
889 890 891

	/* In vblank? */
	if (in_vbl)
892
		ret |= DRM_SCANOUTPOS_IN_VBLANK;
893 894 895 896

	return ret;
}

897 898
int intel_get_crtc_scanline(struct intel_crtc *crtc)
{
899
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
900 901 902 903 904 905 906 907 908 909
	unsigned long irqflags;
	int position;

	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
	position = __intel_get_crtc_scanline(crtc);
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);

	return position;
}

910
static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
911 912 913 914
			      int *max_error,
			      struct timeval *vblank_time,
			      unsigned flags)
{
915
	struct drm_crtc *crtc;
916

917 918
	if (pipe >= INTEL_INFO(dev)->num_pipes) {
		DRM_ERROR("Invalid crtc %u\n", pipe);
919 920 921 922
		return -EINVAL;
	}

	/* Get drm_crtc to timestamp: */
923 924
	crtc = intel_get_crtc_for_pipe(dev, pipe);
	if (crtc == NULL) {
925
		DRM_ERROR("Invalid crtc %u\n", pipe);
926 927 928
		return -EINVAL;
	}

929
	if (!crtc->hwmode.crtc_clock) {
930
		DRM_DEBUG_KMS("crtc %u is disabled\n", pipe);
931 932
		return -EBUSY;
	}
933 934

	/* Helper routine in DRM core does all the work: */
935 936
	return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
						     vblank_time, flags,
937
						     &crtc->hwmode);
938 939
}

940
static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
941
{
942
	u32 busy_up, busy_down, max_avg, min_avg;
943 944
	u8 new_delay;

945
	spin_lock(&mchdev_lock);
946

947 948
	I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));

949
	new_delay = dev_priv->ips.cur_delay;
950

951
	I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
952 953
	busy_up = I915_READ(RCPREVBSYTUPAVG);
	busy_down = I915_READ(RCPREVBSYTDNAVG);
954 955 956 957
	max_avg = I915_READ(RCBMAXAVG);
	min_avg = I915_READ(RCBMINAVG);

	/* Handle RCS change request from hw */
958
	if (busy_up > max_avg) {
959 960 961 962
		if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
			new_delay = dev_priv->ips.cur_delay - 1;
		if (new_delay < dev_priv->ips.max_delay)
			new_delay = dev_priv->ips.max_delay;
963
	} else if (busy_down < min_avg) {
964 965 966 967
		if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
			new_delay = dev_priv->ips.cur_delay + 1;
		if (new_delay > dev_priv->ips.min_delay)
			new_delay = dev_priv->ips.min_delay;
968 969
	}

970
	if (ironlake_set_drps(dev_priv, new_delay))
971
		dev_priv->ips.cur_delay = new_delay;
972

973
	spin_unlock(&mchdev_lock);
974

975 976 977
	return;
}

978
static void notify_ring(struct intel_engine_cs *engine)
979
{
980
	smp_store_mb(engine->breadcrumbs.irq_posted, true);
981
	if (intel_engine_wakeup(engine))
982
		trace_i915_gem_request_notify(engine);
983 984
}

985 986
static void vlv_c0_read(struct drm_i915_private *dev_priv,
			struct intel_rps_ei *ei)
987
{
988 989 990 991
	ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
	ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
	ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
}
992

993 994 995 996 997 998
static bool vlv_c0_above(struct drm_i915_private *dev_priv,
			 const struct intel_rps_ei *old,
			 const struct intel_rps_ei *now,
			 int threshold)
{
	u64 time, c0;
999
	unsigned int mul = 100;
1000

1001 1002
	if (old->cz_clock == 0)
		return false;
1003

1004 1005 1006
	if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
		mul <<= 8;

1007
	time = now->cz_clock - old->cz_clock;
1008
	time *= threshold * dev_priv->czclk_freq;
1009

1010 1011 1012
	/* Workload can be split between render + media, e.g. SwapBuffers
	 * being blitted in X after being rendered in mesa. To account for
	 * this we need to combine both engines into our activity counter.
1013
	 */
1014 1015
	c0 = now->render_c0 - old->render_c0;
	c0 += now->media_c0 - old->media_c0;
1016
	c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC;
1017

1018
	return c0 >= time;
1019 1020
}

1021
void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
1022
{
1023 1024 1025
	vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
	dev_priv->rps.up_ei = dev_priv->rps.down_ei;
}
1026

1027 1028 1029 1030
static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
{
	struct intel_rps_ei now;
	u32 events = 0;
1031

1032
	if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
1033
		return 0;
1034

1035 1036 1037
	vlv_c0_read(dev_priv, &now);
	if (now.cz_clock == 0)
		return 0;
1038

1039 1040 1041
	if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
		if (!vlv_c0_above(dev_priv,
				  &dev_priv->rps.down_ei, &now,
1042
				  dev_priv->rps.down_threshold))
1043 1044 1045
			events |= GEN6_PM_RP_DOWN_THRESHOLD;
		dev_priv->rps.down_ei = now;
	}
1046

1047 1048 1049
	if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
		if (vlv_c0_above(dev_priv,
				 &dev_priv->rps.up_ei, &now,
1050
				 dev_priv->rps.up_threshold))
1051 1052
			events |= GEN6_PM_RP_UP_THRESHOLD;
		dev_priv->rps.up_ei = now;
1053 1054
	}

1055
	return events;
1056 1057
}

1058 1059
static bool any_waiters(struct drm_i915_private *dev_priv)
{
1060
	struct intel_engine_cs *engine;
1061

1062
	for_each_engine(engine, dev_priv)
1063
		if (intel_engine_has_waiter(engine))
1064 1065 1066 1067 1068
			return true;

	return false;
}

1069
static void gen6_pm_rps_work(struct work_struct *work)
1070
{
1071 1072
	struct drm_i915_private *dev_priv =
		container_of(work, struct drm_i915_private, rps.work);
1073 1074
	bool client_boost;
	int new_delay, adj, min, max;
P
Paulo Zanoni 已提交
1075
	u32 pm_iir;
1076

1077
	spin_lock_irq(&dev_priv->irq_lock);
I
Imre Deak 已提交
1078 1079 1080 1081 1082
	/* Speed up work cancelation during disabling rps interrupts. */
	if (!dev_priv->rps.interrupts_enabled) {
		spin_unlock_irq(&dev_priv->irq_lock);
		return;
	}
1083

1084 1085
	pm_iir = dev_priv->rps.pm_iir;
	dev_priv->rps.pm_iir = 0;
1086 1087
	/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
	gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1088 1089
	client_boost = dev_priv->rps.client_boost;
	dev_priv->rps.client_boost = false;
1090
	spin_unlock_irq(&dev_priv->irq_lock);
1091

1092
	/* Make sure we didn't queue anything we're not going to process. */
1093
	WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1094

1095
	if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
1096
		return;
1097

1098
	mutex_lock(&dev_priv->rps.hw_lock);
1099

1100 1101
	pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);

1102
	adj = dev_priv->rps.last_adj;
1103
	new_delay = dev_priv->rps.cur_freq;
1104 1105
	min = dev_priv->rps.min_freq_softlimit;
	max = dev_priv->rps.max_freq_softlimit;
1106 1107 1108 1109
	if (client_boost || any_waiters(dev_priv))
		max = dev_priv->rps.max_freq;
	if (client_boost && new_delay < dev_priv->rps.boost_freq) {
		new_delay = dev_priv->rps.boost_freq;
1110 1111
		adj = 0;
	} else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1112 1113
		if (adj > 0)
			adj *= 2;
1114 1115
		else /* CHV needs even encode values */
			adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
1116 1117 1118 1119
		/*
		 * For better performance, jump directly
		 * to RPe if we're below it.
		 */
1120
		if (new_delay < dev_priv->rps.efficient_freq - adj) {
1121
			new_delay = dev_priv->rps.efficient_freq;
1122 1123
			adj = 0;
		}
1124
	} else if (client_boost || any_waiters(dev_priv)) {
1125
		adj = 0;
1126
	} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1127 1128
		if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
			new_delay = dev_priv->rps.efficient_freq;
1129
		else
1130
			new_delay = dev_priv->rps.min_freq_softlimit;
1131 1132 1133 1134
		adj = 0;
	} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
		if (adj < 0)
			adj *= 2;
1135 1136
		else /* CHV needs even encode values */
			adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
1137
	} else { /* unknown event */
1138
		adj = 0;
1139
	}
1140

1141 1142
	dev_priv->rps.last_adj = adj;

1143 1144 1145
	/* sysfs frequency interfaces may have snuck in while servicing the
	 * interrupt
	 */
1146
	new_delay += adj;
1147
	new_delay = clamp_t(int, new_delay, min, max);
1148

1149
	intel_set_rps(dev_priv, new_delay);
1150

1151
	mutex_unlock(&dev_priv->rps.hw_lock);
1152 1153
}

1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165

/**
 * ivybridge_parity_work - Workqueue called when a parity error interrupt
 * occurred.
 * @work: workqueue struct
 *
 * Doesn't actually do anything except notify userspace. As a consequence of
 * this event, userspace should try to remap the bad rows since statistically
 * it is likely the same row is more likely to go bad again.
 */
static void ivybridge_parity_work(struct work_struct *work)
{
1166 1167
	struct drm_i915_private *dev_priv =
		container_of(work, struct drm_i915_private, l3_parity.error_work);
1168
	u32 error_status, row, bank, subbank;
1169
	char *parity_event[6];
1170
	uint32_t misccpctl;
1171
	uint8_t slice = 0;
1172 1173 1174 1175 1176

	/* We must turn off DOP level clock gating to access the L3 registers.
	 * In order to prevent a get/put style interface, acquire struct mutex
	 * any time we access those registers.
	 */
1177
	mutex_lock(&dev_priv->drm.struct_mutex);
1178

1179 1180 1181 1182
	/* If we've screwed up tracking, just let the interrupt fire again */
	if (WARN_ON(!dev_priv->l3_parity.which_slice))
		goto out;

1183 1184 1185 1186
	misccpctl = I915_READ(GEN7_MISCCPCTL);
	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
	POSTING_READ(GEN7_MISCCPCTL);

1187
	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1188
		i915_reg_t reg;
1189

1190
		slice--;
1191
		if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv)))
1192
			break;
1193

1194
		dev_priv->l3_parity.which_slice &= ~(1<<slice);
1195

1196
		reg = GEN7_L3CDERRST1(slice);
1197

1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212
		error_status = I915_READ(reg);
		row = GEN7_PARITY_ERROR_ROW(error_status);
		bank = GEN7_PARITY_ERROR_BANK(error_status);
		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);

		I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
		POSTING_READ(reg);

		parity_event[0] = I915_L3_PARITY_UEVENT "=1";
		parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
		parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
		parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
		parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
		parity_event[5] = NULL;

1213
		kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
1214
				   KOBJ_CHANGE, parity_event);
1215

1216 1217
		DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
			  slice, row, bank, subbank);
1218

1219 1220 1221 1222 1223
		kfree(parity_event[4]);
		kfree(parity_event[3]);
		kfree(parity_event[2]);
		kfree(parity_event[1]);
	}
1224

1225
	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1226

1227 1228
out:
	WARN_ON(dev_priv->l3_parity.which_slice);
1229
	spin_lock_irq(&dev_priv->irq_lock);
1230
	gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1231
	spin_unlock_irq(&dev_priv->irq_lock);
1232

1233
	mutex_unlock(&dev_priv->drm.struct_mutex);
1234 1235
}

1236 1237
static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv,
					       u32 iir)
1238
{
1239
	if (!HAS_L3_DPF(dev_priv))
1240 1241
		return;

1242
	spin_lock(&dev_priv->irq_lock);
1243
	gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1244
	spin_unlock(&dev_priv->irq_lock);
1245

1246
	iir &= GT_PARITY_ERROR(dev_priv);
1247 1248 1249 1250 1251 1252
	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
		dev_priv->l3_parity.which_slice |= 1 << 1;

	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
		dev_priv->l3_parity.which_slice |= 1 << 0;

1253
	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1254 1255
}

1256
static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
1257 1258
			       u32 gt_iir)
{
1259
	if (gt_iir & GT_RENDER_USER_INTERRUPT)
1260
		notify_ring(&dev_priv->engine[RCS]);
1261
	if (gt_iir & ILK_BSD_USER_INTERRUPT)
1262
		notify_ring(&dev_priv->engine[VCS]);
1263 1264
}

1265
static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
1266 1267
			       u32 gt_iir)
{
1268
	if (gt_iir & GT_RENDER_USER_INTERRUPT)
1269
		notify_ring(&dev_priv->engine[RCS]);
1270
	if (gt_iir & GT_BSD_USER_INTERRUPT)
1271
		notify_ring(&dev_priv->engine[VCS]);
1272
	if (gt_iir & GT_BLT_USER_INTERRUPT)
1273
		notify_ring(&dev_priv->engine[BCS]);
1274

1275 1276
	if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
		      GT_BSD_CS_ERROR_INTERRUPT |
1277 1278
		      GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
		DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
1279

1280 1281
	if (gt_iir & GT_PARITY_ERROR(dev_priv))
		ivybridge_parity_error_irq_handler(dev_priv, gt_iir);
1282 1283
}

1284
static __always_inline void
1285
gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
1286 1287
{
	if (iir & (GT_RENDER_USER_INTERRUPT << test_shift))
1288
		notify_ring(engine);
1289
	if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift))
1290
		tasklet_schedule(&engine->irq_tasklet);
1291 1292
}

1293 1294 1295
static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv,
				   u32 master_ctl,
				   u32 gt_iir[4])
1296 1297 1298 1299
{
	irqreturn_t ret = IRQ_NONE;

	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1300 1301 1302
		gt_iir[0] = I915_READ_FW(GEN8_GT_IIR(0));
		if (gt_iir[0]) {
			I915_WRITE_FW(GEN8_GT_IIR(0), gt_iir[0]);
1303 1304 1305 1306 1307
			ret = IRQ_HANDLED;
		} else
			DRM_ERROR("The master control interrupt lied (GT0)!\n");
	}

1308
	if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1309 1310 1311
		gt_iir[1] = I915_READ_FW(GEN8_GT_IIR(1));
		if (gt_iir[1]) {
			I915_WRITE_FW(GEN8_GT_IIR(1), gt_iir[1]);
1312
			ret = IRQ_HANDLED;
1313
		} else
1314
			DRM_ERROR("The master control interrupt lied (GT1)!\n");
1315 1316
	}

1317
	if (master_ctl & GEN8_GT_VECS_IRQ) {
1318 1319 1320
		gt_iir[3] = I915_READ_FW(GEN8_GT_IIR(3));
		if (gt_iir[3]) {
			I915_WRITE_FW(GEN8_GT_IIR(3), gt_iir[3]);
1321 1322 1323 1324 1325
			ret = IRQ_HANDLED;
		} else
			DRM_ERROR("The master control interrupt lied (GT3)!\n");
	}

1326
	if (master_ctl & GEN8_GT_PM_IRQ) {
1327 1328
		gt_iir[2] = I915_READ_FW(GEN8_GT_IIR(2));
		if (gt_iir[2] & dev_priv->pm_rps_events) {
1329
			I915_WRITE_FW(GEN8_GT_IIR(2),
1330
				      gt_iir[2] & dev_priv->pm_rps_events);
1331
			ret = IRQ_HANDLED;
1332 1333 1334 1335
		} else
			DRM_ERROR("The master control interrupt lied (PM)!\n");
	}

1336 1337 1338
	return ret;
}

1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363
static void gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
				u32 gt_iir[4])
{
	if (gt_iir[0]) {
		gen8_cs_irq_handler(&dev_priv->engine[RCS],
				    gt_iir[0], GEN8_RCS_IRQ_SHIFT);
		gen8_cs_irq_handler(&dev_priv->engine[BCS],
				    gt_iir[0], GEN8_BCS_IRQ_SHIFT);
	}

	if (gt_iir[1]) {
		gen8_cs_irq_handler(&dev_priv->engine[VCS],
				    gt_iir[1], GEN8_VCS1_IRQ_SHIFT);
		gen8_cs_irq_handler(&dev_priv->engine[VCS2],
				    gt_iir[1], GEN8_VCS2_IRQ_SHIFT);
	}

	if (gt_iir[3])
		gen8_cs_irq_handler(&dev_priv->engine[VECS],
				    gt_iir[3], GEN8_VECS_IRQ_SHIFT);

	if (gt_iir[2] & dev_priv->pm_rps_events)
		gen6_rps_irq_handler(dev_priv, gt_iir[2]);
}

1364 1365 1366 1367
static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
{
	switch (port) {
	case PORT_A:
1368
		return val & PORTA_HOTPLUG_LONG_DETECT;
1369 1370 1371 1372 1373 1374 1375 1376 1377
	case PORT_B:
		return val & PORTB_HOTPLUG_LONG_DETECT;
	case PORT_C:
		return val & PORTC_HOTPLUG_LONG_DETECT;
	default:
		return false;
	}
}

1378 1379 1380 1381 1382 1383 1384 1385 1386 1387
static bool spt_port_hotplug2_long_detect(enum port port, u32 val)
{
	switch (port) {
	case PORT_E:
		return val & PORTE_HOTPLUG_LONG_DETECT;
	default:
		return false;
	}
}

1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403
static bool spt_port_hotplug_long_detect(enum port port, u32 val)
{
	switch (port) {
	case PORT_A:
		return val & PORTA_HOTPLUG_LONG_DETECT;
	case PORT_B:
		return val & PORTB_HOTPLUG_LONG_DETECT;
	case PORT_C:
		return val & PORTC_HOTPLUG_LONG_DETECT;
	case PORT_D:
		return val & PORTD_HOTPLUG_LONG_DETECT;
	default:
		return false;
	}
}

1404 1405 1406 1407 1408 1409 1410 1411 1412 1413
static bool ilk_port_hotplug_long_detect(enum port port, u32 val)
{
	switch (port) {
	case PORT_A:
		return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
	default:
		return false;
	}
}

1414
static bool pch_port_hotplug_long_detect(enum port port, u32 val)
1415 1416 1417
{
	switch (port) {
	case PORT_B:
1418
		return val & PORTB_HOTPLUG_LONG_DETECT;
1419
	case PORT_C:
1420
		return val & PORTC_HOTPLUG_LONG_DETECT;
1421
	case PORT_D:
1422 1423 1424
		return val & PORTD_HOTPLUG_LONG_DETECT;
	default:
		return false;
1425 1426 1427
	}
}

1428
static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
1429 1430 1431
{
	switch (port) {
	case PORT_B:
1432
		return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1433
	case PORT_C:
1434
		return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1435
	case PORT_D:
1436 1437 1438
		return val & PORTD_HOTPLUG_INT_LONG_PULSE;
	default:
		return false;
1439 1440 1441
	}
}

1442 1443 1444 1445 1446 1447 1448
/*
 * Get a bit mask of pins that have triggered, and which ones may be long.
 * This can be called multiple times with the same masks to accumulate
 * hotplug detection results from several registers.
 *
 * Note that the caller is expected to zero out the masks initially.
 */
1449
static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
1450
			     u32 hotplug_trigger, u32 dig_hotplug_reg,
1451 1452
			     const u32 hpd[HPD_NUM_PINS],
			     bool long_pulse_detect(enum port port, u32 val))
1453
{
1454
	enum port port;
1455 1456 1457
	int i;

	for_each_hpd_pin(i) {
1458 1459
		if ((hpd[i] & hotplug_trigger) == 0)
			continue;
1460

1461 1462
		*pin_mask |= BIT(i);

1463 1464 1465
		if (!intel_hpd_pin_to_port(i, &port))
			continue;

1466
		if (long_pulse_detect(port, dig_hotplug_reg))
1467
			*long_mask |= BIT(i);
1468 1469 1470 1471 1472 1473 1474
	}

	DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
			 hotplug_trigger, dig_hotplug_reg, *pin_mask);

}

1475
static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
1476
{
1477
	wake_up_all(&dev_priv->gmbus_wait_queue);
1478 1479
}

1480
static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
1481
{
1482
	wake_up_all(&dev_priv->gmbus_wait_queue);
1483 1484
}

1485
#if defined(CONFIG_DEBUG_FS)
1486 1487
static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
					 enum pipe pipe,
1488 1489 1490
					 uint32_t crc0, uint32_t crc1,
					 uint32_t crc2, uint32_t crc3,
					 uint32_t crc4)
1491 1492 1493
{
	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
	struct intel_pipe_crc_entry *entry;
1494
	int head, tail;
1495

1496 1497
	spin_lock(&pipe_crc->lock);

1498
	if (!pipe_crc->entries) {
1499
		spin_unlock(&pipe_crc->lock);
1500
		DRM_DEBUG_KMS("spurious interrupt\n");
1501 1502 1503
		return;
	}

1504 1505
	head = pipe_crc->head;
	tail = pipe_crc->tail;
1506 1507

	if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1508
		spin_unlock(&pipe_crc->lock);
1509 1510 1511 1512 1513
		DRM_ERROR("CRC buffer overflowing\n");
		return;
	}

	entry = &pipe_crc->entries[head];
1514

1515
	entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm,
1516
								 pipe);
1517 1518 1519 1520 1521
	entry->crc[0] = crc0;
	entry->crc[1] = crc1;
	entry->crc[2] = crc2;
	entry->crc[3] = crc3;
	entry->crc[4] = crc4;
1522 1523

	head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1524 1525 1526
	pipe_crc->head = head;

	spin_unlock(&pipe_crc->lock);
1527 1528

	wake_up_interruptible(&pipe_crc->wq);
1529
}
1530 1531
#else
static inline void
1532 1533
display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
			     enum pipe pipe,
1534 1535 1536 1537 1538
			     uint32_t crc0, uint32_t crc1,
			     uint32_t crc2, uint32_t crc3,
			     uint32_t crc4) {}
#endif

1539

1540 1541
static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
				     enum pipe pipe)
D
Daniel Vetter 已提交
1542
{
1543
	display_pipe_crc_irq_handler(dev_priv, pipe,
1544 1545
				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
				     0, 0, 0, 0);
D
Daniel Vetter 已提交
1546 1547
}

1548 1549
static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
				     enum pipe pipe)
1550
{
1551
	display_pipe_crc_irq_handler(dev_priv, pipe,
1552 1553 1554 1555 1556
				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
				     I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
				     I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
				     I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
				     I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1557
}
1558

1559 1560
static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
				      enum pipe pipe)
1561
{
1562 1563
	uint32_t res1, res2;

1564
	if (INTEL_GEN(dev_priv) >= 3)
1565 1566 1567 1568
		res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
	else
		res1 = 0;

1569
	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1570 1571 1572
		res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
	else
		res2 = 0;
1573

1574
	display_pipe_crc_irq_handler(dev_priv, pipe,
1575 1576 1577 1578
				     I915_READ(PIPE_CRC_RES_RED(pipe)),
				     I915_READ(PIPE_CRC_RES_GREEN(pipe)),
				     I915_READ(PIPE_CRC_RES_BLUE(pipe)),
				     res1, res2);
1579
}
1580

1581 1582 1583 1584
/* The RPS events need forcewake, so we add them to a work queue and mask their
 * IMR bits until the work is done. Other interrupts can be processed without
 * the work queue. */
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1585
{
1586
	if (pm_iir & dev_priv->pm_rps_events) {
1587
		spin_lock(&dev_priv->irq_lock);
1588
		gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
I
Imre Deak 已提交
1589 1590
		if (dev_priv->rps.interrupts_enabled) {
			dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1591
			schedule_work(&dev_priv->rps.work);
I
Imre Deak 已提交
1592
		}
1593
		spin_unlock(&dev_priv->irq_lock);
1594 1595
	}

1596 1597 1598
	if (INTEL_INFO(dev_priv)->gen >= 8)
		return;

1599
	if (HAS_VEBOX(dev_priv)) {
1600
		if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1601
			notify_ring(&dev_priv->engine[VECS]);
B
Ben Widawsky 已提交
1602

1603 1604
		if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
			DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
B
Ben Widawsky 已提交
1605
	}
1606 1607
}

1608
static bool intel_pipe_handle_vblank(struct drm_i915_private *dev_priv,
1609
				     enum pipe pipe)
1610
{
1611 1612
	bool ret;

1613
	ret = drm_handle_vblank(&dev_priv->drm, pipe);
1614
	if (ret)
1615
		intel_finish_page_flip_mmio(dev_priv, pipe);
1616 1617

	return ret;
1618 1619
}

1620 1621
static void valleyview_pipestat_irq_ack(struct drm_i915_private *dev_priv,
					u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1622 1623 1624
{
	int pipe;

1625
	spin_lock(&dev_priv->irq_lock);
1626 1627 1628 1629 1630 1631

	if (!dev_priv->display_irqs_enabled) {
		spin_unlock(&dev_priv->irq_lock);
		return;
	}

1632
	for_each_pipe(dev_priv, pipe) {
1633
		i915_reg_t reg;
1634
		u32 mask, iir_bit = 0;
1635

1636 1637 1638 1639 1640 1641 1642
		/*
		 * PIPESTAT bits get signalled even when the interrupt is
		 * disabled with the mask bits, and some of the status bits do
		 * not generate interrupts at all (like the underrun bit). Hence
		 * we need to be careful that we only handle what we want to
		 * handle.
		 */
1643 1644 1645

		/* fifo underruns are filterered in the underrun handler. */
		mask = PIPE_FIFO_UNDERRUN_STATUS;
1646 1647 1648 1649 1650 1651 1652 1653

		switch (pipe) {
		case PIPE_A:
			iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
			break;
		case PIPE_B:
			iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
			break;
1654 1655 1656
		case PIPE_C:
			iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
			break;
1657 1658 1659 1660 1661
		}
		if (iir & iir_bit)
			mask |= dev_priv->pipestat_irq_mask[pipe];

		if (!mask)
1662 1663 1664
			continue;

		reg = PIPESTAT(pipe);
1665 1666
		mask |= PIPESTAT_INT_ENABLE_MASK;
		pipe_stats[pipe] = I915_READ(reg) & mask;
1667 1668 1669 1670

		/*
		 * Clear the PIPE*STAT regs before the IIR
		 */
1671 1672
		if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
					PIPESTAT_INT_STATUS_MASK))
1673 1674
			I915_WRITE(reg, pipe_stats[pipe]);
	}
1675
	spin_unlock(&dev_priv->irq_lock);
1676 1677
}

1678
static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1679 1680 1681
					    u32 pipe_stats[I915_MAX_PIPES])
{
	enum pipe pipe;
1682

1683
	for_each_pipe(dev_priv, pipe) {
1684 1685 1686
		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
		    intel_pipe_handle_vblank(dev_priv, pipe))
			intel_check_page_flip(dev_priv, pipe);
1687

1688
		if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV)
1689
			intel_finish_page_flip_cs(dev_priv, pipe);
1690 1691

		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1692
			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1693

1694 1695
		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1696 1697 1698
	}

	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1699
		gmbus_irq_handler(dev_priv);
1700 1701
}

1702
static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
1703 1704 1705
{
	u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);

1706 1707
	if (hotplug_status)
		I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1708

1709 1710 1711
	return hotplug_status;
}

1712
static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1713 1714 1715
				 u32 hotplug_status)
{
	u32 pin_mask = 0, long_mask = 0;
1716

1717 1718
	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
	    IS_CHERRYVIEW(dev_priv)) {
1719
		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1720

1721 1722 1723 1724 1725
		if (hotplug_trigger) {
			intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
					   hotplug_trigger, hpd_status_g4x,
					   i9xx_port_hotplug_long_detect);

1726
			intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1727
		}
1728 1729

		if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1730
			dp_aux_irq_handler(dev_priv);
1731 1732
	} else {
		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1733

1734 1735
		if (hotplug_trigger) {
			intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1736
					   hotplug_trigger, hpd_status_i915,
1737
					   i9xx_port_hotplug_long_detect);
1738
			intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1739
		}
1740
	}
1741 1742
}

1743
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
J
Jesse Barnes 已提交
1744
{
1745
	struct drm_device *dev = arg;
1746
	struct drm_i915_private *dev_priv = to_i915(dev);
J
Jesse Barnes 已提交
1747 1748
	irqreturn_t ret = IRQ_NONE;

1749 1750 1751
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

1752 1753 1754
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
	disable_rpm_wakeref_asserts(dev_priv);

1755
	do {
1756
		u32 iir, gt_iir, pm_iir;
1757
		u32 pipe_stats[I915_MAX_PIPES] = {};
1758
		u32 hotplug_status = 0;
1759
		u32 ier = 0;
1760

J
Jesse Barnes 已提交
1761 1762
		gt_iir = I915_READ(GTIIR);
		pm_iir = I915_READ(GEN6_PMIIR);
1763
		iir = I915_READ(VLV_IIR);
J
Jesse Barnes 已提交
1764 1765

		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1766
			break;
J
Jesse Barnes 已提交
1767 1768 1769

		ret = IRQ_HANDLED;

1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782
		/*
		 * Theory on interrupt generation, based on empirical evidence:
		 *
		 * x = ((VLV_IIR & VLV_IER) ||
		 *      (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
		 *       (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
		 *
		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
		 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
		 * guarantee the CPU interrupt will be raised again even if we
		 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
		 * bits this time around.
		 */
1783
		I915_WRITE(VLV_MASTER_IER, 0);
1784 1785
		ier = I915_READ(VLV_IER);
		I915_WRITE(VLV_IER, 0);
1786 1787 1788 1789 1790 1791

		if (gt_iir)
			I915_WRITE(GTIIR, gt_iir);
		if (pm_iir)
			I915_WRITE(GEN6_PMIIR, pm_iir);

1792
		if (iir & I915_DISPLAY_PORT_INTERRUPT)
1793
			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1794

1795 1796
		/* Call regardless, as some status bits might not be
		 * signalled in iir */
1797
		valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1798 1799 1800 1801 1802 1803 1804

		/*
		 * VLV_IIR is single buffered, and reflects the level
		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
		 */
		if (iir)
			I915_WRITE(VLV_IIR, iir);
1805

1806
		I915_WRITE(VLV_IER, ier);
1807 1808
		I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
		POSTING_READ(VLV_MASTER_IER);
1809

1810
		if (gt_iir)
1811
			snb_gt_irq_handler(dev_priv, gt_iir);
1812 1813 1814
		if (pm_iir)
			gen6_rps_irq_handler(dev_priv, pm_iir);

1815
		if (hotplug_status)
1816
			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1817

1818
		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1819
	} while (0);
J
Jesse Barnes 已提交
1820

1821 1822
	enable_rpm_wakeref_asserts(dev_priv);

J
Jesse Barnes 已提交
1823 1824 1825
	return ret;
}

1826 1827
static irqreturn_t cherryview_irq_handler(int irq, void *arg)
{
1828
	struct drm_device *dev = arg;
1829
	struct drm_i915_private *dev_priv = to_i915(dev);
1830 1831
	irqreturn_t ret = IRQ_NONE;

1832 1833 1834
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

1835 1836 1837
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
	disable_rpm_wakeref_asserts(dev_priv);

1838
	do {
1839
		u32 master_ctl, iir;
1840
		u32 gt_iir[4] = {};
1841
		u32 pipe_stats[I915_MAX_PIPES] = {};
1842
		u32 hotplug_status = 0;
1843 1844
		u32 ier = 0;

1845 1846
		master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
		iir = I915_READ(VLV_IIR);
1847

1848 1849
		if (master_ctl == 0 && iir == 0)
			break;
1850

1851 1852
		ret = IRQ_HANDLED;

1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865
		/*
		 * Theory on interrupt generation, based on empirical evidence:
		 *
		 * x = ((VLV_IIR & VLV_IER) ||
		 *      ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
		 *       (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
		 *
		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
		 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
		 * guarantee the CPU interrupt will be raised again even if we
		 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
		 * bits this time around.
		 */
1866
		I915_WRITE(GEN8_MASTER_IRQ, 0);
1867 1868
		ier = I915_READ(VLV_IER);
		I915_WRITE(VLV_IER, 0);
1869

1870
		gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
1871

1872
		if (iir & I915_DISPLAY_PORT_INTERRUPT)
1873
			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1874

1875 1876
		/* Call regardless, as some status bits might not be
		 * signalled in iir */
1877
		valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1878

1879 1880 1881 1882 1883 1884 1885
		/*
		 * VLV_IIR is single buffered, and reflects the level
		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
		 */
		if (iir)
			I915_WRITE(VLV_IIR, iir);

1886
		I915_WRITE(VLV_IER, ier);
1887
		I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
1888
		POSTING_READ(GEN8_MASTER_IRQ);
1889

1890 1891
		gen8_gt_irq_handler(dev_priv, gt_iir);

1892
		if (hotplug_status)
1893
			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1894

1895
		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1896
	} while (0);
1897

1898 1899
	enable_rpm_wakeref_asserts(dev_priv);

1900 1901 1902
	return ret;
}

1903 1904
static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
				u32 hotplug_trigger,
1905 1906 1907 1908
				const u32 hpd[HPD_NUM_PINS])
{
	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;

1909 1910 1911 1912 1913 1914
	/*
	 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
	 * unless we touch the hotplug register, even if hotplug_trigger is
	 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
	 * errors.
	 */
1915
	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1916 1917 1918 1919 1920 1921 1922 1923
	if (!hotplug_trigger) {
		u32 mask = PORTA_HOTPLUG_STATUS_MASK |
			PORTD_HOTPLUG_STATUS_MASK |
			PORTC_HOTPLUG_STATUS_MASK |
			PORTB_HOTPLUG_STATUS_MASK;
		dig_hotplug_reg &= ~mask;
	}

1924
	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1925 1926
	if (!hotplug_trigger)
		return;
1927 1928 1929 1930 1931

	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
			   dig_hotplug_reg, hpd,
			   pch_port_hotplug_long_detect);

1932
	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1933 1934
}

1935
static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1936
{
1937
	int pipe;
1938
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1939

1940
	ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
1941

1942 1943 1944
	if (pch_iir & SDE_AUDIO_POWER_MASK) {
		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
			       SDE_AUDIO_POWER_SHIFT);
1945
		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1946 1947
				 port_name(port));
	}
1948

1949
	if (pch_iir & SDE_AUX_MASK)
1950
		dp_aux_irq_handler(dev_priv);
1951

1952
	if (pch_iir & SDE_GMBUS)
1953
		gmbus_irq_handler(dev_priv);
1954 1955 1956 1957 1958 1959 1960 1961 1962 1963

	if (pch_iir & SDE_AUDIO_HDCP_MASK)
		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");

	if (pch_iir & SDE_AUDIO_TRANS_MASK)
		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");

	if (pch_iir & SDE_POISON)
		DRM_ERROR("PCH poison interrupt\n");

1964
	if (pch_iir & SDE_FDI_MASK)
1965
		for_each_pipe(dev_priv, pipe)
1966 1967 1968
			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
					 pipe_name(pipe),
					 I915_READ(FDI_RX_IIR(pipe)));
1969 1970 1971 1972 1973 1974 1975 1976

	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");

	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");

	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1977
		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
1978 1979

	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1980
		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
1981 1982
}

1983
static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
1984 1985
{
	u32 err_int = I915_READ(GEN7_ERR_INT);
D
Daniel Vetter 已提交
1986
	enum pipe pipe;
1987

1988 1989 1990
	if (err_int & ERR_INT_POISON)
		DRM_ERROR("Poison interrupt\n");

1991
	for_each_pipe(dev_priv, pipe) {
1992 1993
		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1994

D
Daniel Vetter 已提交
1995
		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1996 1997
			if (IS_IVYBRIDGE(dev_priv))
				ivb_pipe_crc_irq_handler(dev_priv, pipe);
D
Daniel Vetter 已提交
1998
			else
1999
				hsw_pipe_crc_irq_handler(dev_priv, pipe);
D
Daniel Vetter 已提交
2000 2001
		}
	}
2002

2003 2004 2005
	I915_WRITE(GEN7_ERR_INT, err_int);
}

2006
static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
2007 2008 2009
{
	u32 serr_int = I915_READ(SERR_INT);

2010 2011 2012
	if (serr_int & SERR_INT_POISON)
		DRM_ERROR("PCH poison interrupt\n");

2013
	if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
2014
		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
2015 2016

	if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
2017
		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
2018 2019

	if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
2020
		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
2021 2022

	I915_WRITE(SERR_INT, serr_int);
2023 2024
}

2025
static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2026 2027
{
	int pipe;
2028
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
2029

2030
	ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
2031

2032 2033 2034 2035 2036 2037
	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
			       SDE_AUDIO_POWER_SHIFT_CPT);
		DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
				 port_name(port));
	}
2038 2039

	if (pch_iir & SDE_AUX_MASK_CPT)
2040
		dp_aux_irq_handler(dev_priv);
2041 2042

	if (pch_iir & SDE_GMBUS_CPT)
2043
		gmbus_irq_handler(dev_priv);
2044 2045 2046 2047 2048 2049 2050 2051

	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
		DRM_DEBUG_DRIVER("Audio CP request interrupt\n");

	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
		DRM_DEBUG_DRIVER("Audio CP change interrupt\n");

	if (pch_iir & SDE_FDI_MASK_CPT)
2052
		for_each_pipe(dev_priv, pipe)
2053 2054 2055
			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
					 pipe_name(pipe),
					 I915_READ(FDI_RX_IIR(pipe)));
2056 2057

	if (pch_iir & SDE_ERROR_CPT)
2058
		cpt_serr_int_handler(dev_priv);
2059 2060
}

2061
static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075
{
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
		~SDE_PORTE_HOTPLUG_SPT;
	u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
	u32 pin_mask = 0, long_mask = 0;

	if (hotplug_trigger) {
		u32 dig_hotplug_reg;

		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
		I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);

		intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
				   dig_hotplug_reg, hpd_spt,
2076
				   spt_port_hotplug_long_detect);
2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090
	}

	if (hotplug2_trigger) {
		u32 dig_hotplug_reg;

		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
		I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);

		intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger,
				   dig_hotplug_reg, hpd_spt,
				   spt_port_hotplug2_long_detect);
	}

	if (pin_mask)
2091
		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2092 2093

	if (pch_iir & SDE_GMBUS_CPT)
2094
		gmbus_irq_handler(dev_priv);
2095 2096
}

2097 2098
static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
				u32 hotplug_trigger,
2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109
				const u32 hpd[HPD_NUM_PINS])
{
	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;

	dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);

	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
			   dig_hotplug_reg, hpd,
			   ilk_port_hotplug_long_detect);

2110
	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2111 2112
}

2113 2114
static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
				    u32 de_iir)
2115
{
2116
	enum pipe pipe;
2117 2118
	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;

2119
	if (hotplug_trigger)
2120
		ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk);
2121 2122

	if (de_iir & DE_AUX_CHANNEL_A)
2123
		dp_aux_irq_handler(dev_priv);
2124 2125

	if (de_iir & DE_GSE)
2126
		intel_opregion_asle_intr(dev_priv);
2127 2128 2129 2130

	if (de_iir & DE_POISON)
		DRM_ERROR("Poison interrupt\n");

2131
	for_each_pipe(dev_priv, pipe) {
2132 2133 2134
		if (de_iir & DE_PIPE_VBLANK(pipe) &&
		    intel_pipe_handle_vblank(dev_priv, pipe))
			intel_check_page_flip(dev_priv, pipe);
2135

2136
		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2137
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2138

2139
		if (de_iir & DE_PIPE_CRC_DONE(pipe))
2140
			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2141

2142
		/* plane/pipes map 1:1 on ilk+ */
2143
		if (de_iir & DE_PLANE_FLIP_DONE(pipe))
2144
			intel_finish_page_flip_cs(dev_priv, pipe);
2145 2146 2147 2148 2149 2150
	}

	/* check event from PCH */
	if (de_iir & DE_PCH_EVENT) {
		u32 pch_iir = I915_READ(SDEIIR);

2151 2152
		if (HAS_PCH_CPT(dev_priv))
			cpt_irq_handler(dev_priv, pch_iir);
2153
		else
2154
			ibx_irq_handler(dev_priv, pch_iir);
2155 2156 2157 2158 2159

		/* should clear PCH hotplug event before clear CPU irq */
		I915_WRITE(SDEIIR, pch_iir);
	}

2160 2161
	if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT)
		ironlake_rps_change_irq_handler(dev_priv);
2162 2163
}

2164 2165
static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
				    u32 de_iir)
2166
{
2167
	enum pipe pipe;
2168 2169
	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;

2170
	if (hotplug_trigger)
2171
		ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb);
2172 2173

	if (de_iir & DE_ERR_INT_IVB)
2174
		ivb_err_int_handler(dev_priv);
2175 2176

	if (de_iir & DE_AUX_CHANNEL_A_IVB)
2177
		dp_aux_irq_handler(dev_priv);
2178 2179

	if (de_iir & DE_GSE_IVB)
2180
		intel_opregion_asle_intr(dev_priv);
2181

2182
	for_each_pipe(dev_priv, pipe) {
2183 2184 2185
		if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
		    intel_pipe_handle_vblank(dev_priv, pipe))
			intel_check_page_flip(dev_priv, pipe);
2186 2187

		/* plane/pipes map 1:1 on ilk+ */
2188
		if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe))
2189
			intel_finish_page_flip_cs(dev_priv, pipe);
2190 2191 2192
	}

	/* check event from PCH */
2193
	if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
2194 2195
		u32 pch_iir = I915_READ(SDEIIR);

2196
		cpt_irq_handler(dev_priv, pch_iir);
2197 2198 2199 2200 2201 2202

		/* clear PCH hotplug event before clear CPU irq */
		I915_WRITE(SDEIIR, pch_iir);
	}
}

2203 2204 2205 2206 2207 2208 2209 2210
/*
 * To handle irqs with the minimum potential races with fresh interrupts, we:
 * 1 - Disable Master Interrupt Control.
 * 2 - Find the source(s) of the interrupt.
 * 3 - Clear the Interrupt Identity bits (IIR).
 * 4 - Process the interrupt(s) that had bits set in the IIRs.
 * 5 - Re-enable Master Interrupt Control.
 */
2211
static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2212
{
2213
	struct drm_device *dev = arg;
2214
	struct drm_i915_private *dev_priv = to_i915(dev);
2215
	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2216
	irqreturn_t ret = IRQ_NONE;
2217

2218 2219 2220
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

2221 2222 2223
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
	disable_rpm_wakeref_asserts(dev_priv);

2224 2225 2226
	/* disable master interrupt before clearing iir  */
	de_ier = I915_READ(DEIER);
	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2227
	POSTING_READ(DEIER);
2228

2229 2230 2231 2232 2233
	/* Disable south interrupts. We'll only write to SDEIIR once, so further
	 * interrupts will will be stored on its back queue, and then we'll be
	 * able to process them after we restore SDEIER (as soon as we restore
	 * it, we'll get an interrupt if SDEIIR still has something to process
	 * due to its back queue). */
2234
	if (!HAS_PCH_NOP(dev_priv)) {
2235 2236 2237 2238
		sde_ier = I915_READ(SDEIER);
		I915_WRITE(SDEIER, 0);
		POSTING_READ(SDEIER);
	}
2239

2240 2241
	/* Find, clear, then process each source of interrupt */

2242
	gt_iir = I915_READ(GTIIR);
2243
	if (gt_iir) {
2244 2245
		I915_WRITE(GTIIR, gt_iir);
		ret = IRQ_HANDLED;
2246
		if (INTEL_GEN(dev_priv) >= 6)
2247
			snb_gt_irq_handler(dev_priv, gt_iir);
2248
		else
2249
			ilk_gt_irq_handler(dev_priv, gt_iir);
2250 2251
	}

2252 2253
	de_iir = I915_READ(DEIIR);
	if (de_iir) {
2254 2255
		I915_WRITE(DEIIR, de_iir);
		ret = IRQ_HANDLED;
2256 2257
		if (INTEL_GEN(dev_priv) >= 7)
			ivb_display_irq_handler(dev_priv, de_iir);
2258
		else
2259
			ilk_display_irq_handler(dev_priv, de_iir);
2260 2261
	}

2262
	if (INTEL_GEN(dev_priv) >= 6) {
2263 2264 2265 2266
		u32 pm_iir = I915_READ(GEN6_PMIIR);
		if (pm_iir) {
			I915_WRITE(GEN6_PMIIR, pm_iir);
			ret = IRQ_HANDLED;
2267
			gen6_rps_irq_handler(dev_priv, pm_iir);
2268
		}
2269
	}
2270 2271 2272

	I915_WRITE(DEIER, de_ier);
	POSTING_READ(DEIER);
2273
	if (!HAS_PCH_NOP(dev_priv)) {
2274 2275 2276
		I915_WRITE(SDEIER, sde_ier);
		POSTING_READ(SDEIER);
	}
2277

2278 2279 2280
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
	enable_rpm_wakeref_asserts(dev_priv);

2281 2282 2283
	return ret;
}

2284 2285
static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
				u32 hotplug_trigger,
2286
				const u32 hpd[HPD_NUM_PINS])
2287
{
2288
	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2289

2290 2291
	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2292

2293
	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2294
			   dig_hotplug_reg, hpd,
2295
			   bxt_port_hotplug_long_detect);
2296

2297
	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2298 2299
}

2300 2301
static irqreturn_t
gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2302 2303
{
	irqreturn_t ret = IRQ_NONE;
2304
	u32 iir;
2305
	enum pipe pipe;
J
Jesse Barnes 已提交
2306

2307
	if (master_ctl & GEN8_DE_MISC_IRQ) {
2308 2309 2310
		iir = I915_READ(GEN8_DE_MISC_IIR);
		if (iir) {
			I915_WRITE(GEN8_DE_MISC_IIR, iir);
2311
			ret = IRQ_HANDLED;
2312
			if (iir & GEN8_DE_MISC_GSE)
2313
				intel_opregion_asle_intr(dev_priv);
2314 2315
			else
				DRM_ERROR("Unexpected DE Misc interrupt\n");
2316
		}
2317 2318
		else
			DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2319 2320
	}

2321
	if (master_ctl & GEN8_DE_PORT_IRQ) {
2322 2323 2324
		iir = I915_READ(GEN8_DE_PORT_IIR);
		if (iir) {
			u32 tmp_mask;
2325
			bool found = false;
2326

2327
			I915_WRITE(GEN8_DE_PORT_IIR, iir);
2328
			ret = IRQ_HANDLED;
J
Jesse Barnes 已提交
2329

2330 2331 2332 2333 2334 2335 2336
			tmp_mask = GEN8_AUX_CHANNEL_A;
			if (INTEL_INFO(dev_priv)->gen >= 9)
				tmp_mask |= GEN9_AUX_CHANNEL_B |
					    GEN9_AUX_CHANNEL_C |
					    GEN9_AUX_CHANNEL_D;

			if (iir & tmp_mask) {
2337
				dp_aux_irq_handler(dev_priv);
2338 2339 2340
				found = true;
			}

2341 2342 2343
			if (IS_BROXTON(dev_priv)) {
				tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
				if (tmp_mask) {
2344 2345
					bxt_hpd_irq_handler(dev_priv, tmp_mask,
							    hpd_bxt);
2346 2347 2348 2349 2350
					found = true;
				}
			} else if (IS_BROADWELL(dev_priv)) {
				tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
				if (tmp_mask) {
2351 2352
					ilk_hpd_irq_handler(dev_priv,
							    tmp_mask, hpd_bdw);
2353 2354
					found = true;
				}
2355 2356
			}

2357 2358
			if (IS_BROXTON(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
				gmbus_irq_handler(dev_priv);
S
Shashank Sharma 已提交
2359 2360 2361
				found = true;
			}

2362
			if (!found)
2363
				DRM_ERROR("Unexpected DE Port interrupt\n");
2364
		}
2365 2366
		else
			DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2367 2368
	}

2369
	for_each_pipe(dev_priv, pipe) {
2370
		u32 flip_done, fault_errors;
2371

2372 2373
		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
			continue;
2374

2375 2376 2377 2378 2379
		iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
		if (!iir) {
			DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
			continue;
		}
2380

2381 2382
		ret = IRQ_HANDLED;
		I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
2383

2384 2385 2386
		if (iir & GEN8_PIPE_VBLANK &&
		    intel_pipe_handle_vblank(dev_priv, pipe))
			intel_check_page_flip(dev_priv, pipe);
2387

2388 2389 2390 2391 2392
		flip_done = iir;
		if (INTEL_INFO(dev_priv)->gen >= 9)
			flip_done &= GEN9_PIPE_PLANE1_FLIP_DONE;
		else
			flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE;
2393

2394
		if (flip_done)
2395
			intel_finish_page_flip_cs(dev_priv, pipe);
2396

2397
		if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
2398
			hsw_pipe_crc_irq_handler(dev_priv, pipe);
2399

2400 2401
		if (iir & GEN8_PIPE_FIFO_UNDERRUN)
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2402

2403 2404 2405 2406 2407
		fault_errors = iir;
		if (INTEL_INFO(dev_priv)->gen >= 9)
			fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
		else
			fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2408

2409 2410 2411 2412
		if (fault_errors)
			DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
				  pipe_name(pipe),
				  fault_errors);
2413 2414
	}

2415
	if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2416
	    master_ctl & GEN8_DE_PCH_IRQ) {
2417 2418 2419 2420 2421
		/*
		 * FIXME(BDW): Assume for now that the new interrupt handling
		 * scheme also closed the SDE interrupt handling race we've seen
		 * on older pch-split platforms. But this needs testing.
		 */
2422 2423 2424
		iir = I915_READ(SDEIIR);
		if (iir) {
			I915_WRITE(SDEIIR, iir);
2425
			ret = IRQ_HANDLED;
2426

2427
			if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv))
2428
				spt_irq_handler(dev_priv, iir);
2429
			else
2430
				cpt_irq_handler(dev_priv, iir);
2431 2432 2433 2434 2435 2436 2437
		} else {
			/*
			 * Like on previous PCH there seems to be something
			 * fishy going on with forwarding PCH interrupts.
			 */
			DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
		}
2438 2439
	}

2440 2441 2442 2443 2444 2445
	return ret;
}

static irqreturn_t gen8_irq_handler(int irq, void *arg)
{
	struct drm_device *dev = arg;
2446
	struct drm_i915_private *dev_priv = to_i915(dev);
2447
	u32 master_ctl;
2448
	u32 gt_iir[4] = {};
2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464
	irqreturn_t ret;

	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

	master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
	master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
	if (!master_ctl)
		return IRQ_NONE;

	I915_WRITE_FW(GEN8_MASTER_IRQ, 0);

	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
	disable_rpm_wakeref_asserts(dev_priv);

	/* Find, clear, then process each source of interrupt */
2465 2466
	ret = gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
	gen8_gt_irq_handler(dev_priv, gt_iir);
2467 2468
	ret |= gen8_de_irq_handler(dev_priv, master_ctl);

2469 2470
	I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
	POSTING_READ_FW(GEN8_MASTER_IRQ);
2471

2472 2473
	enable_rpm_wakeref_asserts(dev_priv);

2474 2475 2476
	return ret;
}

2477
static void i915_error_wake_up(struct drm_i915_private *dev_priv)
2478 2479 2480 2481 2482 2483 2484 2485 2486
{
	/*
	 * Notify all waiters for GPU completion events that reset state has
	 * been changed, and that they need to restart their wait after
	 * checking for potential errors (and bail out to drop locks if there is
	 * a gpu reset pending so that i915_error_work_func can acquire them).
	 */

	/* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2487
	wake_up_all(&dev_priv->gpu_error.wait_queue);
2488 2489 2490 2491 2492

	/* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
	wake_up_all(&dev_priv->pending_flip_queue);
}

2493
/**
2494
 * i915_reset_and_wakeup - do process context error handling work
2495
 * @dev_priv: i915 device private
2496 2497 2498 2499
 *
 * Fire an error uevent so userspace can see that a hang or error
 * was detected.
 */
2500
static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv)
2501
{
2502
	struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj;
2503 2504 2505
	char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
	char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
	char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2506

2507
	kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
2508

2509 2510 2511
	DRM_DEBUG_DRIVER("resetting chip\n");
	kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);

2512
	/*
2513 2514 2515 2516 2517
	 * In most cases it's guaranteed that we get here with an RPM
	 * reference held, for example because there is a pending GPU
	 * request that won't finish until the reset is done. This
	 * isn't the case at least when we get here by doing a
	 * simulated reset via debugs, so get an RPM reference.
2518
	 */
2519 2520
	intel_runtime_pm_get(dev_priv);
	intel_prepare_reset(dev_priv);
2521

2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532
	do {
		/*
		 * All state reset _must_ be completed before we update the
		 * reset counter, for otherwise waiters might miss the reset
		 * pending state and not properly drop locks, resulting in
		 * deadlocks with the reset work.
		 */
		if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
			i915_reset(dev_priv);
			mutex_unlock(&dev_priv->drm.struct_mutex);
		}
2533

2534 2535 2536 2537 2538
		/* We need to wait for anyone holding the lock to wakeup */
	} while (wait_on_bit_timeout(&dev_priv->gpu_error.flags,
				     I915_RESET_IN_PROGRESS,
				     TASK_UNINTERRUPTIBLE,
				     HZ));
2539

2540
	intel_finish_reset(dev_priv);
2541
	intel_runtime_pm_put(dev_priv);
2542

2543
	if (!test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
2544 2545
		kobject_uevent_env(kobj,
				   KOBJ_CHANGE, reset_done_event);
2546

2547 2548 2549 2550 2551
	/*
	 * Note: The wake_up also serves as a memory barrier so that
	 * waiters see the updated value of the dev_priv->gpu_error.
	 */
	wake_up_all(&dev_priv->gpu_error.reset_queue);
2552 2553
}

2554 2555 2556 2557
static inline void
i915_err_print_instdone(struct drm_i915_private *dev_priv,
			struct intel_instdone *instdone)
{
2558 2559 2560
	int slice;
	int subslice;

2561 2562 2563 2564 2565 2566 2567 2568 2569 2570
	pr_err("  INSTDONE: 0x%08x\n", instdone->instdone);

	if (INTEL_GEN(dev_priv) <= 3)
		return;

	pr_err("  SC_INSTDONE: 0x%08x\n", instdone->slice_common);

	if (INTEL_GEN(dev_priv) <= 6)
		return;

2571 2572 2573 2574 2575 2576 2577
	for_each_instdone_slice_subslice(dev_priv, slice, subslice)
		pr_err("  SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
		       slice, subslice, instdone->sampler[slice][subslice]);

	for_each_instdone_slice_subslice(dev_priv, slice, subslice)
		pr_err("  ROW_INSTDONE[%d][%d]: 0x%08x\n",
		       slice, subslice, instdone->row[slice][subslice]);
2578 2579
}

2580
static void i915_report_and_clear_eir(struct drm_i915_private *dev_priv)
2581
{
2582
	struct intel_instdone instdone;
2583
	u32 eir = I915_READ(EIR);
2584
	int pipe;
2585

2586 2587
	if (!eir)
		return;
2588

2589
	pr_err("render error detected, EIR: 0x%08x\n", eir);
2590

2591
	i915_get_engine_instdone(dev_priv, RCS, &instdone);
2592

2593
	if (IS_G4X(dev_priv)) {
2594 2595 2596
		if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
			u32 ipeir = I915_READ(IPEIR_I965);

2597 2598
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2599
			i915_err_print_instdone(dev_priv, &instdone);
2600 2601
			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2602
			I915_WRITE(IPEIR_I965, ipeir);
2603
			POSTING_READ(IPEIR_I965);
2604 2605 2606
		}
		if (eir & GM45_ERROR_PAGE_TABLE) {
			u32 pgtbl_err = I915_READ(PGTBL_ER);
2607 2608
			pr_err("page table error\n");
			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
2609
			I915_WRITE(PGTBL_ER, pgtbl_err);
2610
			POSTING_READ(PGTBL_ER);
2611 2612 2613
		}
	}

2614
	if (!IS_GEN2(dev_priv)) {
2615 2616
		if (eir & I915_ERROR_PAGE_TABLE) {
			u32 pgtbl_err = I915_READ(PGTBL_ER);
2617 2618
			pr_err("page table error\n");
			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
2619
			I915_WRITE(PGTBL_ER, pgtbl_err);
2620
			POSTING_READ(PGTBL_ER);
2621 2622 2623 2624
		}
	}

	if (eir & I915_ERROR_MEMORY_REFRESH) {
2625
		pr_err("memory refresh error:\n");
2626
		for_each_pipe(dev_priv, pipe)
2627
			pr_err("pipe %c stat: 0x%08x\n",
2628
			       pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2629 2630 2631
		/* pipestat has already been acked */
	}
	if (eir & I915_ERROR_INSTRUCTION) {
2632 2633
		pr_err("instruction error\n");
		pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
2634
		i915_err_print_instdone(dev_priv, &instdone);
2635
		if (INTEL_GEN(dev_priv) < 4) {
2636 2637
			u32 ipeir = I915_READ(IPEIR);

2638 2639 2640
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
2641
			I915_WRITE(IPEIR, ipeir);
2642
			POSTING_READ(IPEIR);
2643 2644 2645
		} else {
			u32 ipeir = I915_READ(IPEIR_I965);

2646 2647 2648 2649
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2650
			I915_WRITE(IPEIR_I965, ipeir);
2651
			POSTING_READ(IPEIR_I965);
2652 2653 2654 2655
		}
	}

	I915_WRITE(EIR, eir);
2656
	POSTING_READ(EIR);
2657 2658 2659 2660 2661 2662 2663 2664 2665 2666
	eir = I915_READ(EIR);
	if (eir) {
		/*
		 * some errors might have become stuck,
		 * mask them.
		 */
		DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
		I915_WRITE(EMR, I915_READ(EMR) | eir);
		I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
	}
2667 2668 2669
}

/**
2670
 * i915_handle_error - handle a gpu error
2671
 * @dev_priv: i915 device private
2672
 * @engine_mask: mask representing engines that are hung
2673
 * Do some basic checking of register state at error time and
2674 2675 2676 2677
 * dump it to the syslog.  Also call i915_capture_error_state() to make
 * sure we get a record and make it available in debugfs.  Fire a uevent
 * so userspace knows something bad happened (should trigger collection
 * of a ring dump etc.).
2678
 * @fmt: Error message format string
2679
 */
2680 2681
void i915_handle_error(struct drm_i915_private *dev_priv,
		       u32 engine_mask,
2682
		       const char *fmt, ...)
2683
{
2684 2685
	va_list args;
	char error_msg[80];
2686

2687 2688 2689 2690
	va_start(args, fmt);
	vscnprintf(error_msg, sizeof(error_msg), fmt, args);
	va_end(args);

2691 2692
	i915_capture_error_state(dev_priv, engine_mask, error_msg);
	i915_report_and_clear_eir(dev_priv);
2693

2694 2695
	if (!engine_mask)
		return;
2696

2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713
	if (test_and_set_bit(I915_RESET_IN_PROGRESS,
			     &dev_priv->gpu_error.flags))
		return;

	/*
	 * Wakeup waiting processes so that the reset function
	 * i915_reset_and_wakeup doesn't deadlock trying to grab
	 * various locks. By bumping the reset counter first, the woken
	 * processes will see a reset in progress and back off,
	 * releasing their locks and then wait for the reset completion.
	 * We must do this for _all_ gpu waiters that might hold locks
	 * that the reset work needs to acquire.
	 *
	 * Note: The wake_up also provides a memory barrier to ensure that the
	 * waiters see the updated value of the reset flags.
	 */
	i915_error_wake_up(dev_priv);
2714

2715
	i915_reset_and_wakeup(dev_priv);
2716 2717
}

2718 2719 2720
/* Called from drm generic code, passed 'crtc' which
 * we use as a pipe index
 */
2721
static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe)
2722
{
2723
	struct drm_i915_private *dev_priv = to_i915(dev);
2724
	unsigned long irqflags;
2725

2726
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2727
	if (INTEL_INFO(dev)->gen >= 4)
2728
		i915_enable_pipestat(dev_priv, pipe,
2729
				     PIPE_START_VBLANK_INTERRUPT_STATUS);
2730
	else
2731
		i915_enable_pipestat(dev_priv, pipe,
2732
				     PIPE_VBLANK_INTERRUPT_STATUS);
2733
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2734

2735 2736 2737
	return 0;
}

2738
static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
2739
{
2740
	struct drm_i915_private *dev_priv = to_i915(dev);
2741
	unsigned long irqflags;
2742
	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2743
						     DE_PIPE_VBLANK(pipe);
2744 2745

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2746
	ilk_enable_display_irq(dev_priv, bit);
2747 2748 2749 2750 2751
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);

	return 0;
}

2752
static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe)
J
Jesse Barnes 已提交
2753
{
2754
	struct drm_i915_private *dev_priv = to_i915(dev);
J
Jesse Barnes 已提交
2755 2756 2757
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2758
	i915_enable_pipestat(dev_priv, pipe,
2759
			     PIPE_START_VBLANK_INTERRUPT_STATUS);
J
Jesse Barnes 已提交
2760 2761 2762 2763 2764
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);

	return 0;
}

2765
static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
2766
{
2767
	struct drm_i915_private *dev_priv = to_i915(dev);
2768 2769 2770
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2771
	bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2772
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2773

2774 2775 2776
	return 0;
}

2777 2778 2779
/* Called from drm generic code, passed 'crtc' which
 * we use as a pipe index
 */
2780
static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe)
2781
{
2782
	struct drm_i915_private *dev_priv = to_i915(dev);
2783
	unsigned long irqflags;
2784

2785
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2786
	i915_disable_pipestat(dev_priv, pipe,
2787 2788
			      PIPE_VBLANK_INTERRUPT_STATUS |
			      PIPE_START_VBLANK_INTERRUPT_STATUS);
2789 2790 2791
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

2792
static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
2793
{
2794
	struct drm_i915_private *dev_priv = to_i915(dev);
2795
	unsigned long irqflags;
2796
	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2797
						     DE_PIPE_VBLANK(pipe);
2798 2799

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2800
	ilk_disable_display_irq(dev_priv, bit);
2801 2802 2803
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

2804
static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe)
J
Jesse Barnes 已提交
2805
{
2806
	struct drm_i915_private *dev_priv = to_i915(dev);
J
Jesse Barnes 已提交
2807 2808 2809
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2810
	i915_disable_pipestat(dev_priv, pipe,
2811
			      PIPE_START_VBLANK_INTERRUPT_STATUS);
J
Jesse Barnes 已提交
2812 2813 2814
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

2815
static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
2816
{
2817
	struct drm_i915_private *dev_priv = to_i915(dev);
2818 2819 2820
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2821
	bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2822 2823 2824
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

2825
static bool
2826
ipehr_is_semaphore_wait(struct intel_engine_cs *engine, u32 ipehr)
2827
{
2828
	if (INTEL_GEN(engine->i915) >= 8) {
2829
		return (ipehr >> 23) == 0x1c;
2830 2831 2832 2833 2834 2835 2836
	} else {
		ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
		return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
				 MI_SEMAPHORE_REGISTER);
	}
}

2837
static struct intel_engine_cs *
2838 2839
semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
				 u64 offset)
2840
{
2841
	struct drm_i915_private *dev_priv = engine->i915;
2842
	struct intel_engine_cs *signaller;
2843

2844
	if (INTEL_GEN(dev_priv) >= 8) {
2845
		for_each_engine(signaller, dev_priv) {
2846
			if (engine == signaller)
2847 2848
				continue;

2849
			if (offset == signaller->semaphore.signal_ggtt[engine->hw_id])
2850 2851
				return signaller;
		}
2852 2853 2854
	} else {
		u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;

2855
		for_each_engine(signaller, dev_priv) {
2856
			if(engine == signaller)
2857 2858
				continue;

2859
			if (sync_bits == signaller->semaphore.mbox.wait[engine->hw_id])
2860 2861 2862 2863
				return signaller;
		}
	}

2864 2865
	DRM_DEBUG_DRIVER("No signaller ring found for %s, ipehr 0x%08x, offset 0x%016llx\n",
			 engine->name, ipehr, offset);
2866

2867
	return ERR_PTR(-ENODEV);
2868 2869
}

2870
static struct intel_engine_cs *
2871
semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
2872
{
2873
	struct drm_i915_private *dev_priv = engine->i915;
2874
	void __iomem *vaddr;
2875
	u32 cmd, ipehr, head;
2876 2877
	u64 offset = 0;
	int i, backwards;
2878

2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895
	/*
	 * This function does not support execlist mode - any attempt to
	 * proceed further into this function will result in a kernel panic
	 * when dereferencing ring->buffer, which is not set up in execlist
	 * mode.
	 *
	 * The correct way of doing it would be to derive the currently
	 * executing ring buffer from the current context, which is derived
	 * from the currently running request. Unfortunately, to get the
	 * current request we would have to grab the struct_mutex before doing
	 * anything else, which would be ill-advised since some other thread
	 * might have grabbed it already and managed to hang itself, causing
	 * the hang checker to deadlock.
	 *
	 * Therefore, this function does not support execlist mode in its
	 * current form. Just return NULL and move on.
	 */
2896
	if (engine->buffer == NULL)
2897 2898
		return NULL;

2899
	ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
2900
	if (!ipehr_is_semaphore_wait(engine, ipehr))
2901
		return NULL;
2902

2903 2904 2905
	/*
	 * HEAD is likely pointing to the dword after the actual command,
	 * so scan backwards until we find the MBOX. But limit it to just 3
2906 2907
	 * or 4 dwords depending on the semaphore wait command size.
	 * Note that we don't care about ACTHD here since that might
2908 2909
	 * point at at batch, and semaphores are always emitted into the
	 * ringbuffer itself.
2910
	 */
2911
	head = I915_READ_HEAD(engine) & HEAD_ADDR;
2912
	backwards = (INTEL_GEN(dev_priv) >= 8) ? 5 : 4;
2913
	vaddr = (void __iomem *)engine->buffer->vaddr;
2914

2915
	for (i = backwards; i; --i) {
2916 2917 2918 2919 2920
		/*
		 * Be paranoid and presume the hw has gone off into the wild -
		 * our ring is smaller than what the hardware (and hence
		 * HEAD_ADDR) allows. Also handles wrap-around.
		 */
2921
		head &= engine->buffer->size - 1;
2922 2923

		/* This here seems to blow up */
2924
		cmd = ioread32(vaddr + head);
2925 2926 2927
		if (cmd == ipehr)
			break;

2928 2929
		head -= 4;
	}
2930

2931 2932
	if (!i)
		return NULL;
2933

2934
	*seqno = ioread32(vaddr + head + 4) + 1;
2935
	if (INTEL_GEN(dev_priv) >= 8) {
2936
		offset = ioread32(vaddr + head + 12);
2937
		offset <<= 32;
2938
		offset |= ioread32(vaddr + head + 8);
2939
	}
2940
	return semaphore_wait_to_signaller_ring(engine, ipehr, offset);
2941 2942
}

2943
static int semaphore_passed(struct intel_engine_cs *engine)
2944
{
2945
	struct drm_i915_private *dev_priv = engine->i915;
2946
	struct intel_engine_cs *signaller;
2947
	u32 seqno;
2948

2949
	engine->hangcheck.deadlock++;
2950

2951
	signaller = semaphore_waits_for(engine, &seqno);
2952 2953 2954
	if (signaller == NULL)
		return -1;

2955 2956 2957
	if (IS_ERR(signaller))
		return 0;

2958
	/* Prevent pathological recursion due to driver bugs */
2959
	if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
2960 2961
		return -1;

2962
	if (i915_seqno_passed(intel_engine_get_seqno(signaller), seqno))
2963 2964
		return 1;

2965 2966 2967
	/* cursory check for an unkickable deadlock */
	if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
	    semaphore_passed(signaller) < 0)
2968 2969 2970
		return -1;

	return 0;
2971 2972 2973 2974
}

static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
{
2975
	struct intel_engine_cs *engine;
2976

2977
	for_each_engine(engine, dev_priv)
2978
		engine->hangcheck.deadlock = 0;
2979 2980
}

2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991
static bool instdone_unchanged(u32 current_instdone, u32 *old_instdone)
{
	u32 tmp = current_instdone | *old_instdone;
	bool unchanged;

	unchanged = tmp == *old_instdone;
	*old_instdone |= tmp;

	return unchanged;
}

2992
static bool subunits_stuck(struct intel_engine_cs *engine)
2993
{
2994 2995 2996
	struct drm_i915_private *dev_priv = engine->i915;
	struct intel_instdone instdone;
	struct intel_instdone *accu_instdone = &engine->hangcheck.instdone;
2997
	bool stuck;
2998 2999
	int slice;
	int subslice;
3000

3001
	if (engine->id != RCS)
3002 3003
		return true;

3004
	i915_get_engine_instdone(dev_priv, RCS, &instdone);
3005

3006 3007 3008 3009 3010
	/* There might be unstable subunit states even when
	 * actual head is not moving. Filter out the unstable ones by
	 * accumulating the undone -> done transitions and only
	 * consider those as progress.
	 */
3011 3012 3013 3014
	stuck = instdone_unchanged(instdone.instdone,
				   &accu_instdone->instdone);
	stuck &= instdone_unchanged(instdone.slice_common,
				    &accu_instdone->slice_common);
3015 3016 3017 3018 3019 3020 3021

	for_each_instdone_slice_subslice(dev_priv, slice, subslice) {
		stuck &= instdone_unchanged(instdone.sampler[slice][subslice],
					    &accu_instdone->sampler[slice][subslice]);
		stuck &= instdone_unchanged(instdone.row[slice][subslice],
					    &accu_instdone->row[slice][subslice]);
	}
3022 3023 3024 3025

	return stuck;
}

3026
static enum intel_engine_hangcheck_action
3027
head_stuck(struct intel_engine_cs *engine, u64 acthd)
3028
{
3029
	if (acthd != engine->hangcheck.acthd) {
3030 3031

		/* Clear subunit states on head movement */
3032
		memset(&engine->hangcheck.instdone, 0,
3033
		       sizeof(engine->hangcheck.instdone));
3034

3035
		return HANGCHECK_ACTIVE;
3036
	}
3037

3038
	if (!subunits_stuck(engine))
3039 3040 3041 3042 3043
		return HANGCHECK_ACTIVE;

	return HANGCHECK_HUNG;
}

3044 3045
static enum intel_engine_hangcheck_action
engine_stuck(struct intel_engine_cs *engine, u64 acthd)
3046
{
3047
	struct drm_i915_private *dev_priv = engine->i915;
3048
	enum intel_engine_hangcheck_action ha;
3049 3050
	u32 tmp;

3051
	ha = head_stuck(engine, acthd);
3052 3053 3054
	if (ha != HANGCHECK_HUNG)
		return ha;

3055
	if (IS_GEN2(dev_priv))
3056
		return HANGCHECK_HUNG;
3057 3058 3059 3060 3061 3062

	/* Is the chip hanging on a WAIT_FOR_EVENT?
	 * If so we can simply poke the RB_WAIT bit
	 * and break the hang. This should work on
	 * all but the second generation chipsets.
	 */
3063
	tmp = I915_READ_CTL(engine);
3064
	if (tmp & RING_WAIT) {
3065
		i915_handle_error(dev_priv, 0,
3066
				  "Kicking stuck wait on %s",
3067 3068
				  engine->name);
		I915_WRITE_CTL(engine, tmp);
3069
		return HANGCHECK_KICK;
3070 3071
	}

3072
	if (INTEL_GEN(dev_priv) >= 6 && tmp & RING_WAIT_SEMAPHORE) {
3073
		switch (semaphore_passed(engine)) {
3074
		default:
3075
			return HANGCHECK_HUNG;
3076
		case 1:
3077
			i915_handle_error(dev_priv, 0,
3078
					  "Kicking stuck semaphore on %s",
3079 3080
					  engine->name);
			I915_WRITE_CTL(engine, tmp);
3081
			return HANGCHECK_KICK;
3082
		case 0:
3083
			return HANGCHECK_WAIT;
3084
		}
3085
	}
3086

3087
	return HANGCHECK_HUNG;
3088 3089
}

3090
/*
B
Ben Gamari 已提交
3091
 * This is called when the chip hasn't reported back with completed
3092 3093 3094 3095 3096
 * batchbuffers in a long time. We keep track per ring seqno progress and
 * if there are no progress, hangcheck score for that ring is increased.
 * Further, acthd is inspected to see if the ring is stuck. On stuck case
 * we kick the ring. If we see no progress on three subsequent calls
 * we assume chip is wedged and try to fix it by resetting the chip.
B
Ben Gamari 已提交
3097
 */
3098
static void i915_hangcheck_elapsed(struct work_struct *work)
B
Ben Gamari 已提交
3099
{
3100 3101 3102
	struct drm_i915_private *dev_priv =
		container_of(work, typeof(*dev_priv),
			     gpu_error.hangcheck_work.work);
3103
	struct intel_engine_cs *engine;
3104 3105
	unsigned int hung = 0, stuck = 0;
	int busy_count = 0;
3106 3107 3108
#define BUSY 1
#define KICK 5
#define HUNG 20
3109
#define ACTIVE_DECAY 15
3110

3111
	if (!i915.enable_hangcheck)
3112 3113
		return;

3114
	if (!READ_ONCE(dev_priv->gt.awake))
3115
		return;
3116

3117 3118 3119 3120 3121 3122
	/* As enabling the GPU requires fairly extensive mmio access,
	 * periodically arm the mmio checker to see if we are triggering
	 * any invalid access.
	 */
	intel_uncore_arm_unclaimed_mmio_detection(dev_priv);

3123
	for_each_engine(engine, dev_priv) {
3124
		bool busy = intel_engine_has_waiter(engine);
3125 3126
		u64 acthd;
		u32 seqno;
3127
		u32 submit;
3128

3129 3130
		semaphore_clear_deadlocks(dev_priv);

3131 3132 3133 3134 3135 3136 3137 3138 3139 3140
		/* We don't strictly need an irq-barrier here, as we are not
		 * serving an interrupt request, be paranoid in case the
		 * barrier has side-effects (such as preventing a broken
		 * cacheline snoop) and so be sure that we can see the seqno
		 * advance. If the seqno should stick, due to a stale
		 * cacheline, we would erroneously declare the GPU hung.
		 */
		if (engine->irq_seqno_barrier)
			engine->irq_seqno_barrier(engine);

3141
		acthd = intel_engine_get_active_head(engine);
3142
		seqno = intel_engine_get_seqno(engine);
3143
		submit = READ_ONCE(engine->last_submitted_seqno);
3144

3145
		if (engine->hangcheck.seqno == seqno) {
3146
			if (i915_seqno_passed(seqno, submit)) {
3147
				engine->hangcheck.action = HANGCHECK_IDLE;
3148
			} else {
3149
				/* We always increment the hangcheck score
3150
				 * if the engine is busy and still processing
3151 3152 3153 3154
				 * the same request, so that no single request
				 * can run indefinitely (such as a chain of
				 * batches). The only time we do not increment
				 * the hangcheck score on this ring, if this
3155 3156
				 * engine is in a legitimate wait for another
				 * engine. In that case the waiting engine is a
3157 3158 3159 3160 3161 3162 3163
				 * victim and we want to be sure we catch the
				 * right culprit. Then every time we do kick
				 * the ring, add a small increment to the
				 * score so that we can catch a batch that is
				 * being repeatedly kicked and so responsible
				 * for stalling the machine.
				 */
3164 3165
				engine->hangcheck.action =
					engine_stuck(engine, acthd);
3166

3167
				switch (engine->hangcheck.action) {
3168
				case HANGCHECK_IDLE:
3169
				case HANGCHECK_WAIT:
3170
					break;
3171
				case HANGCHECK_ACTIVE:
3172
					engine->hangcheck.score += BUSY;
3173
					break;
3174
				case HANGCHECK_KICK:
3175
					engine->hangcheck.score += KICK;
3176
					break;
3177
				case HANGCHECK_HUNG:
3178
					engine->hangcheck.score += HUNG;
3179 3180
					break;
				}
3181
			}
3182 3183 3184 3185 3186 3187

			if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
				hung |= intel_engine_flag(engine);
				if (engine->hangcheck.action != HANGCHECK_HUNG)
					stuck |= intel_engine_flag(engine);
			}
3188
		} else {
3189
			engine->hangcheck.action = HANGCHECK_ACTIVE;
3190

3191 3192 3193
			/* Gradually reduce the count so that we catch DoS
			 * attempts across multiple batches.
			 */
3194 3195 3196 3197
			if (engine->hangcheck.score > 0)
				engine->hangcheck.score -= ACTIVE_DECAY;
			if (engine->hangcheck.score < 0)
				engine->hangcheck.score = 0;
3198

3199
			/* Clear head and subunit states on seqno movement */
3200
			acthd = 0;
3201

3202
			memset(&engine->hangcheck.instdone, 0,
3203
			       sizeof(engine->hangcheck.instdone));
3204 3205
		}

3206 3207
		engine->hangcheck.seqno = seqno;
		engine->hangcheck.acthd = acthd;
3208
		busy_count += busy;
3209
	}
3210

3211 3212
	if (hung) {
		char msg[80];
3213
		unsigned int tmp;
3214
		int len;
3215

3216 3217 3218 3219 3220 3221 3222
		/* If some rings hung but others were still busy, only
		 * blame the hanging rings in the synopsis.
		 */
		if (stuck != hung)
			hung &= ~stuck;
		len = scnprintf(msg, sizeof(msg),
				"%s on ", stuck == hung ? "No progress" : "Hang");
3223
		for_each_engine_masked(engine, dev_priv, hung, tmp)
3224 3225 3226 3227 3228 3229
			len += scnprintf(msg + len, sizeof(msg) - len,
					 "%s, ", engine->name);
		msg[len-2] = '\0';

		return i915_handle_error(dev_priv, hung, msg);
	}
B
Ben Gamari 已提交
3230

3231
	/* Reset timer in case GPU hangs without another request being added */
3232
	if (busy_count)
3233
		i915_queue_hangcheck(dev_priv);
3234 3235
}

3236
static void ibx_irq_reset(struct drm_device *dev)
P
Paulo Zanoni 已提交
3237
{
3238
	struct drm_i915_private *dev_priv = to_i915(dev);
P
Paulo Zanoni 已提交
3239 3240 3241 3242

	if (HAS_PCH_NOP(dev))
		return;

3243
	GEN5_IRQ_RESET(SDE);
3244 3245 3246

	if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
		I915_WRITE(SERR_INT, 0xffffffff);
P
Paulo Zanoni 已提交
3247
}
3248

P
Paulo Zanoni 已提交
3249 3250 3251 3252 3253 3254 3255 3256 3257 3258
/*
 * SDEIER is also touched by the interrupt handler to work around missed PCH
 * interrupts. Hence we can't update it after the interrupt handler is enabled -
 * instead we unconditionally enable all PCH interrupt sources here, but then
 * only unmask them as needed with SDEIMR.
 *
 * This function needs to be called before interrupts are enabled.
 */
static void ibx_irq_pre_postinstall(struct drm_device *dev)
{
3259
	struct drm_i915_private *dev_priv = to_i915(dev);
P
Paulo Zanoni 已提交
3260 3261 3262 3263 3264

	if (HAS_PCH_NOP(dev))
		return;

	WARN_ON(I915_READ(SDEIER) != 0);
P
Paulo Zanoni 已提交
3265 3266 3267 3268
	I915_WRITE(SDEIER, 0xffffffff);
	POSTING_READ(SDEIER);
}

3269
static void gen5_gt_irq_reset(struct drm_device *dev)
3270
{
3271
	struct drm_i915_private *dev_priv = to_i915(dev);
3272

3273
	GEN5_IRQ_RESET(GT);
P
Paulo Zanoni 已提交
3274
	if (INTEL_INFO(dev)->gen >= 6)
3275
		GEN5_IRQ_RESET(GEN6_PM);
3276 3277
}

3278 3279 3280 3281
static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
{
	enum pipe pipe;

3282 3283 3284 3285 3286
	if (IS_CHERRYVIEW(dev_priv))
		I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
	else
		I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);

3287
	i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
3288 3289
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));

3290 3291 3292 3293 3294 3295
	for_each_pipe(dev_priv, pipe) {
		I915_WRITE(PIPESTAT(pipe),
			   PIPE_FIFO_UNDERRUN_STATUS |
			   PIPESTAT_INT_STATUS_MASK);
		dev_priv->pipestat_irq_mask[pipe] = 0;
	}
3296 3297

	GEN5_IRQ_RESET(VLV_);
3298
	dev_priv->irq_mask = ~0;
3299 3300
}

3301 3302 3303
static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
{
	u32 pipestat_mask;
3304
	u32 enable_mask;
3305 3306 3307 3308 3309 3310 3311 3312 3313
	enum pipe pipe;

	pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
			PIPE_CRC_DONE_INTERRUPT_STATUS;

	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
	for_each_pipe(dev_priv, pipe)
		i915_enable_pipestat(dev_priv, pipe, pipestat_mask);

3314 3315 3316
	enable_mask = I915_DISPLAY_PORT_INTERRUPT |
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3317
	if (IS_CHERRYVIEW(dev_priv))
3318
		enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3319 3320 3321

	WARN_ON(dev_priv->irq_mask != ~0);

3322 3323 3324
	dev_priv->irq_mask = ~enable_mask;

	GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
3325 3326 3327 3328 3329 3330
}

/* drm_dma.h hooks
*/
static void ironlake_irq_reset(struct drm_device *dev)
{
3331
	struct drm_i915_private *dev_priv = to_i915(dev);
3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343

	I915_WRITE(HWSTAM, 0xffffffff);

	GEN5_IRQ_RESET(DE);
	if (IS_GEN7(dev))
		I915_WRITE(GEN7_ERR_INT, 0xffffffff);

	gen5_gt_irq_reset(dev);

	ibx_irq_reset(dev);
}

J
Jesse Barnes 已提交
3344 3345
static void valleyview_irq_preinstall(struct drm_device *dev)
{
3346
	struct drm_i915_private *dev_priv = to_i915(dev);
J
Jesse Barnes 已提交
3347

3348 3349 3350
	I915_WRITE(VLV_MASTER_IER, 0);
	POSTING_READ(VLV_MASTER_IER);

3351
	gen5_gt_irq_reset(dev);
J
Jesse Barnes 已提交
3352

3353
	spin_lock_irq(&dev_priv->irq_lock);
3354 3355
	if (dev_priv->display_irqs_enabled)
		vlv_display_irq_reset(dev_priv);
3356
	spin_unlock_irq(&dev_priv->irq_lock);
J
Jesse Barnes 已提交
3357 3358
}

3359 3360 3361 3362 3363 3364 3365 3366
static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
{
	GEN8_IRQ_RESET_NDX(GT, 0);
	GEN8_IRQ_RESET_NDX(GT, 1);
	GEN8_IRQ_RESET_NDX(GT, 2);
	GEN8_IRQ_RESET_NDX(GT, 3);
}

P
Paulo Zanoni 已提交
3367
static void gen8_irq_reset(struct drm_device *dev)
3368
{
3369
	struct drm_i915_private *dev_priv = to_i915(dev);
3370 3371 3372 3373 3374
	int pipe;

	I915_WRITE(GEN8_MASTER_IRQ, 0);
	POSTING_READ(GEN8_MASTER_IRQ);

3375
	gen8_gt_irq_reset(dev_priv);
3376

3377
	for_each_pipe(dev_priv, pipe)
3378 3379
		if (intel_display_power_is_enabled(dev_priv,
						   POWER_DOMAIN_PIPE(pipe)))
3380
			GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3381

3382 3383 3384
	GEN5_IRQ_RESET(GEN8_DE_PORT_);
	GEN5_IRQ_RESET(GEN8_DE_MISC_);
	GEN5_IRQ_RESET(GEN8_PCU_);
3385

3386 3387
	if (HAS_PCH_SPLIT(dev))
		ibx_irq_reset(dev);
3388
}
3389

3390 3391
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
				     unsigned int pipe_mask)
3392
{
3393
	uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3394
	enum pipe pipe;
3395

3396
	spin_lock_irq(&dev_priv->irq_lock);
3397 3398 3399 3400
	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
		GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
				  dev_priv->de_irq_mask[pipe],
				  ~dev_priv->de_irq_mask[pipe] | extra_ier);
3401
	spin_unlock_irq(&dev_priv->irq_lock);
3402 3403
}

3404 3405 3406
void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
				     unsigned int pipe_mask)
{
3407 3408
	enum pipe pipe;

3409
	spin_lock_irq(&dev_priv->irq_lock);
3410 3411
	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
		GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3412 3413 3414
	spin_unlock_irq(&dev_priv->irq_lock);

	/* make sure we're done processing display irqs */
3415
	synchronize_irq(dev_priv->drm.irq);
3416 3417
}

3418 3419
static void cherryview_irq_preinstall(struct drm_device *dev)
{
3420
	struct drm_i915_private *dev_priv = to_i915(dev);
3421 3422 3423 3424

	I915_WRITE(GEN8_MASTER_IRQ, 0);
	POSTING_READ(GEN8_MASTER_IRQ);

3425
	gen8_gt_irq_reset(dev_priv);
3426 3427 3428

	GEN5_IRQ_RESET(GEN8_PCU_);

3429
	spin_lock_irq(&dev_priv->irq_lock);
3430 3431
	if (dev_priv->display_irqs_enabled)
		vlv_display_irq_reset(dev_priv);
3432
	spin_unlock_irq(&dev_priv->irq_lock);
3433 3434
}

3435
static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
3436 3437 3438 3439 3440
				  const u32 hpd[HPD_NUM_PINS])
{
	struct intel_encoder *encoder;
	u32 enabled_irqs = 0;

3441
	for_each_intel_encoder(&dev_priv->drm, encoder)
3442 3443 3444 3445 3446 3447
		if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
			enabled_irqs |= hpd[encoder->hpd_pin];

	return enabled_irqs;
}

3448
static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
3449
{
3450
	u32 hotplug_irqs, hotplug, enabled_irqs;
3451

3452
	if (HAS_PCH_IBX(dev_priv)) {
3453
		hotplug_irqs = SDE_HOTPLUG_MASK;
3454
		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
3455
	} else {
3456
		hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3457
		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
3458
	}
3459

3460
	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3461 3462 3463

	/*
	 * Enable digital hotplug on the PCH, and configure the DP short pulse
3464 3465
	 * duration to 2ms (which is the minimum in the Display Port spec).
	 * The pulse duration bits are reserved on LPT+.
3466
	 */
3467 3468 3469 3470 3471
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
	hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3472 3473 3474 3475
	/*
	 * When CPU and PCH are on the same package, port A
	 * HPD must be enabled in both north and south.
	 */
3476
	if (HAS_PCH_LPT_LP(dev_priv))
3477
		hotplug |= PORTA_HOTPLUG_ENABLE;
3478
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3479
}
X
Xiong Zhang 已提交
3480

3481
static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3482 3483 3484 3485
{
	u32 hotplug_irqs, hotplug, enabled_irqs;

	hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
3486
	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
3487 3488 3489 3490 3491 3492

	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);

	/* Enable digital hotplug on the PCH */
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
	hotplug |= PORTD_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE |
3493
		PORTB_HOTPLUG_ENABLE | PORTA_HOTPLUG_ENABLE;
3494 3495 3496 3497 3498
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);

	hotplug = I915_READ(PCH_PORT_HOTPLUG2);
	hotplug |= PORTE_HOTPLUG_ENABLE;
	I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3499 3500
}

3501
static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3502 3503 3504
{
	u32 hotplug_irqs, hotplug, enabled_irqs;

3505
	if (INTEL_GEN(dev_priv) >= 8) {
3506
		hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
3507
		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw);
3508 3509

		bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3510
	} else if (INTEL_GEN(dev_priv) >= 7) {
3511
		hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
3512
		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb);
3513 3514

		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3515 3516
	} else {
		hotplug_irqs = DE_DP_A_HOTPLUG;
3517
		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk);
3518

3519 3520
		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
	}
3521 3522 3523 3524

	/*
	 * Enable digital hotplug on the CPU, and configure the DP short pulse
	 * duration to 2ms (which is the minimum in the Display Port spec)
3525
	 * The pulse duration bits are reserved on HSW+.
3526 3527 3528 3529 3530 3531
	 */
	hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
	hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
	hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms;
	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);

3532
	ibx_hpd_irq_setup(dev_priv);
3533 3534
}

3535
static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3536
{
3537
	u32 hotplug_irqs, hotplug, enabled_irqs;
3538

3539
	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
3540
	hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
3541

3542
	bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3543

3544 3545 3546
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
	hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE |
		PORTA_HOTPLUG_ENABLE;
3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566

	DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n",
		      hotplug, enabled_irqs);
	hotplug &= ~BXT_DDI_HPD_INVERT_MASK;

	/*
	 * For BXT invert bit has to be set based on AOB design
	 * for HPD detection logic, update it based on VBT fields.
	 */

	if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
		hotplug |= BXT_DDIA_HPD_INVERT;
	if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) &&
	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_B))
		hotplug |= BXT_DDIB_HPD_INVERT;
	if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) &&
	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_C))
		hotplug |= BXT_DDIC_HPD_INVERT;

3567
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3568 3569
}

P
Paulo Zanoni 已提交
3570 3571
static void ibx_irq_postinstall(struct drm_device *dev)
{
3572
	struct drm_i915_private *dev_priv = to_i915(dev);
3573
	u32 mask;
3574

D
Daniel Vetter 已提交
3575 3576 3577
	if (HAS_PCH_NOP(dev))
		return;

3578
	if (HAS_PCH_IBX(dev))
3579
		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3580
	else
3581
		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3582

3583
	gen5_assert_iir_is_zero(dev_priv, SDEIIR);
P
Paulo Zanoni 已提交
3584 3585 3586
	I915_WRITE(SDEIMR, ~mask);
}

3587 3588
static void gen5_gt_irq_postinstall(struct drm_device *dev)
{
3589
	struct drm_i915_private *dev_priv = to_i915(dev);
3590 3591 3592 3593 3594
	u32 pm_irqs, gt_irqs;

	pm_irqs = gt_irqs = 0;

	dev_priv->gt_irq_mask = ~0;
3595
	if (HAS_L3_DPF(dev)) {
3596
		/* L3 parity interrupt is always unmasked. */
3597 3598
		dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
		gt_irqs |= GT_PARITY_ERROR(dev);
3599 3600 3601 3602
	}

	gt_irqs |= GT_RENDER_USER_INTERRUPT;
	if (IS_GEN5(dev)) {
3603
		gt_irqs |= ILK_BSD_USER_INTERRUPT;
3604 3605 3606 3607
	} else {
		gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
	}

P
Paulo Zanoni 已提交
3608
	GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
3609 3610

	if (INTEL_INFO(dev)->gen >= 6) {
3611 3612 3613 3614
		/*
		 * RPS interrupts will get enabled/disabled on demand when RPS
		 * itself is enabled/disabled.
		 */
3615 3616 3617
		if (HAS_VEBOX(dev))
			pm_irqs |= PM_VEBOX_USER_INTERRUPT;

3618
		dev_priv->pm_irq_mask = 0xffffffff;
P
Paulo Zanoni 已提交
3619
		GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
3620 3621 3622
	}
}

3623
static int ironlake_irq_postinstall(struct drm_device *dev)
3624
{
3625
	struct drm_i915_private *dev_priv = to_i915(dev);
3626 3627 3628 3629 3630 3631
	u32 display_mask, extra_mask;

	if (INTEL_INFO(dev)->gen >= 7) {
		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
				DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
				DE_PLANEB_FLIP_DONE_IVB |
3632
				DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
3633
		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3634 3635
			      DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
			      DE_DP_A_HOTPLUG_IVB);
3636 3637 3638
	} else {
		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
				DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
3639 3640 3641
				DE_AUX_CHANNEL_A |
				DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
				DE_POISON);
3642 3643 3644
		extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
			      DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
			      DE_DP_A_HOTPLUG);
3645
	}
3646

3647
	dev_priv->irq_mask = ~display_mask;
3648

3649 3650
	I915_WRITE(HWSTAM, 0xeffe);

P
Paulo Zanoni 已提交
3651 3652
	ibx_irq_pre_postinstall(dev);

P
Paulo Zanoni 已提交
3653
	GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3654

3655
	gen5_gt_irq_postinstall(dev);
3656

P
Paulo Zanoni 已提交
3657
	ibx_irq_postinstall(dev);
3658

3659
	if (IS_IRONLAKE_M(dev)) {
3660 3661 3662
		/* Enable PCU event interrupts
		 *
		 * spinlocking not required here for correctness since interrupt
3663 3664
		 * setup is guaranteed to run in single-threaded context. But we
		 * need it to make the assert_spin_locked happy. */
3665
		spin_lock_irq(&dev_priv->irq_lock);
3666
		ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
3667
		spin_unlock_irq(&dev_priv->irq_lock);
3668 3669
	}

3670 3671 3672
	return 0;
}

3673 3674 3675 3676 3677 3678 3679 3680 3681
void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
{
	assert_spin_locked(&dev_priv->irq_lock);

	if (dev_priv->display_irqs_enabled)
		return;

	dev_priv->display_irqs_enabled = true;

3682 3683
	if (intel_irqs_enabled(dev_priv)) {
		vlv_display_irq_reset(dev_priv);
3684
		vlv_display_irq_postinstall(dev_priv);
3685
	}
3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696
}

void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
{
	assert_spin_locked(&dev_priv->irq_lock);

	if (!dev_priv->display_irqs_enabled)
		return;

	dev_priv->display_irqs_enabled = false;

3697
	if (intel_irqs_enabled(dev_priv))
3698
		vlv_display_irq_reset(dev_priv);
3699 3700
}

3701 3702 3703

static int valleyview_irq_postinstall(struct drm_device *dev)
{
3704
	struct drm_i915_private *dev_priv = to_i915(dev);
3705

3706
	gen5_gt_irq_postinstall(dev);
J
Jesse Barnes 已提交
3707

3708
	spin_lock_irq(&dev_priv->irq_lock);
3709 3710
	if (dev_priv->display_irqs_enabled)
		vlv_display_irq_postinstall(dev_priv);
3711 3712
	spin_unlock_irq(&dev_priv->irq_lock);

J
Jesse Barnes 已提交
3713
	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3714
	POSTING_READ(VLV_MASTER_IER);
3715 3716 3717 3718

	return 0;
}

3719 3720 3721 3722 3723
static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
{
	/* These are interrupts we'll toggle with the ring mask register */
	uint32_t gt_interrupts[] = {
		GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3724 3725 3726
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
			GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3727
		GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3728 3729 3730
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
			GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3731
		0,
3732 3733
		GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3734 3735
		};

3736 3737 3738
	if (HAS_L3_DPF(dev_priv))
		gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;

3739
	dev_priv->pm_irq_mask = 0xffffffff;
3740 3741
	GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
	GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
3742 3743 3744 3745 3746
	/*
	 * RPS interrupts will get enabled/disabled on demand when RPS itself
	 * is enabled/disabled.
	 */
	GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
3747
	GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
3748 3749 3750 3751
}

static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
{
3752 3753
	uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
	uint32_t de_pipe_enables;
3754 3755
	u32 de_port_masked = GEN8_AUX_CHANNEL_A;
	u32 de_port_enables;
3756
	u32 de_misc_masked = GEN8_DE_MISC_GSE;
3757
	enum pipe pipe;
3758

3759
	if (INTEL_INFO(dev_priv)->gen >= 9) {
3760 3761
		de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
				  GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3762 3763
		de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
				  GEN9_AUX_CHANNEL_D;
S
Shashank Sharma 已提交
3764
		if (IS_BROXTON(dev_priv))
3765 3766
			de_port_masked |= BXT_DE_PORT_GMBUS;
	} else {
3767 3768
		de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
				  GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3769
	}
3770 3771 3772 3773

	de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
					   GEN8_PIPE_FIFO_UNDERRUN;

3774
	de_port_enables = de_port_masked;
3775 3776 3777
	if (IS_BROXTON(dev_priv))
		de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
	else if (IS_BROADWELL(dev_priv))
3778 3779
		de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;

3780 3781 3782
	dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
	dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
	dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3783

3784
	for_each_pipe(dev_priv, pipe)
3785
		if (intel_display_power_is_enabled(dev_priv,
3786 3787 3788 3789
				POWER_DOMAIN_PIPE(pipe)))
			GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
					  dev_priv->de_irq_mask[pipe],
					  de_pipe_enables);
3790

3791
	GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
3792
	GEN5_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
3793 3794 3795 3796
}

static int gen8_irq_postinstall(struct drm_device *dev)
{
3797
	struct drm_i915_private *dev_priv = to_i915(dev);
3798

3799 3800
	if (HAS_PCH_SPLIT(dev))
		ibx_irq_pre_postinstall(dev);
P
Paulo Zanoni 已提交
3801

3802 3803 3804
	gen8_gt_irq_postinstall(dev_priv);
	gen8_de_irq_postinstall(dev_priv);

3805 3806
	if (HAS_PCH_SPLIT(dev))
		ibx_irq_postinstall(dev);
3807

3808
	I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3809 3810 3811 3812 3813
	POSTING_READ(GEN8_MASTER_IRQ);

	return 0;
}

3814 3815
static int cherryview_irq_postinstall(struct drm_device *dev)
{
3816
	struct drm_i915_private *dev_priv = to_i915(dev);
3817 3818 3819

	gen8_gt_irq_postinstall(dev_priv);

3820
	spin_lock_irq(&dev_priv->irq_lock);
3821 3822
	if (dev_priv->display_irqs_enabled)
		vlv_display_irq_postinstall(dev_priv);
3823 3824
	spin_unlock_irq(&dev_priv->irq_lock);

3825
	I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3826 3827 3828 3829 3830
	POSTING_READ(GEN8_MASTER_IRQ);

	return 0;
}

3831 3832
static void gen8_irq_uninstall(struct drm_device *dev)
{
3833
	struct drm_i915_private *dev_priv = to_i915(dev);
3834 3835 3836 3837

	if (!dev_priv)
		return;

P
Paulo Zanoni 已提交
3838
	gen8_irq_reset(dev);
3839 3840
}

J
Jesse Barnes 已提交
3841 3842
static void valleyview_irq_uninstall(struct drm_device *dev)
{
3843
	struct drm_i915_private *dev_priv = to_i915(dev);
J
Jesse Barnes 已提交
3844 3845 3846 3847

	if (!dev_priv)
		return;

3848
	I915_WRITE(VLV_MASTER_IER, 0);
3849
	POSTING_READ(VLV_MASTER_IER);
3850

3851 3852
	gen5_gt_irq_reset(dev);

J
Jesse Barnes 已提交
3853
	I915_WRITE(HWSTAM, 0xffffffff);
3854

3855
	spin_lock_irq(&dev_priv->irq_lock);
3856 3857
	if (dev_priv->display_irqs_enabled)
		vlv_display_irq_reset(dev_priv);
3858
	spin_unlock_irq(&dev_priv->irq_lock);
J
Jesse Barnes 已提交
3859 3860
}

3861 3862
static void cherryview_irq_uninstall(struct drm_device *dev)
{
3863
	struct drm_i915_private *dev_priv = to_i915(dev);
3864 3865 3866 3867 3868 3869 3870

	if (!dev_priv)
		return;

	I915_WRITE(GEN8_MASTER_IRQ, 0);
	POSTING_READ(GEN8_MASTER_IRQ);

3871
	gen8_gt_irq_reset(dev_priv);
3872

3873
	GEN5_IRQ_RESET(GEN8_PCU_);
3874

3875
	spin_lock_irq(&dev_priv->irq_lock);
3876 3877
	if (dev_priv->display_irqs_enabled)
		vlv_display_irq_reset(dev_priv);
3878
	spin_unlock_irq(&dev_priv->irq_lock);
3879 3880
}

3881
static void ironlake_irq_uninstall(struct drm_device *dev)
3882
{
3883
	struct drm_i915_private *dev_priv = to_i915(dev);
3884 3885 3886 3887

	if (!dev_priv)
		return;

P
Paulo Zanoni 已提交
3888
	ironlake_irq_reset(dev);
3889 3890
}

3891
static void i8xx_irq_preinstall(struct drm_device * dev)
L
Linus Torvalds 已提交
3892
{
3893
	struct drm_i915_private *dev_priv = to_i915(dev);
3894
	int pipe;
3895

3896
	for_each_pipe(dev_priv, pipe)
3897
		I915_WRITE(PIPESTAT(pipe), 0);
3898 3899 3900
	I915_WRITE16(IMR, 0xffff);
	I915_WRITE16(IER, 0x0);
	POSTING_READ16(IER);
C
Chris Wilson 已提交
3901 3902 3903 3904
}

static int i8xx_irq_postinstall(struct drm_device *dev)
{
3905
	struct drm_i915_private *dev_priv = to_i915(dev);
C
Chris Wilson 已提交
3906 3907 3908 3909 3910 3911 3912 3913 3914

	I915_WRITE16(EMR,
		     ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));

	/* Unmask the interrupts that we always want on. */
	dev_priv->irq_mask =
		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3915
		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
C
Chris Wilson 已提交
3916 3917 3918 3919 3920 3921 3922 3923
	I915_WRITE16(IMR, dev_priv->irq_mask);

	I915_WRITE16(IER,
		     I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		     I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
		     I915_USER_INTERRUPT);
	POSTING_READ16(IER);

3924 3925
	/* Interrupt setup is already guaranteed to be single-threaded, this is
	 * just to make the assert_spin_locked check happy. */
3926
	spin_lock_irq(&dev_priv->irq_lock);
3927 3928
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3929
	spin_unlock_irq(&dev_priv->irq_lock);
3930

C
Chris Wilson 已提交
3931 3932 3933
	return 0;
}

3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964
/*
 * Returns true when a page flip has completed.
 */
static bool i8xx_handle_vblank(struct drm_i915_private *dev_priv,
			       int plane, int pipe, u32 iir)
{
	u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);

	if (!intel_pipe_handle_vblank(dev_priv, pipe))
		return false;

	if ((iir & flip_pending) == 0)
		goto check_page_flip;

	/* We detect FlipDone by looking for the change in PendingFlip from '1'
	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
	 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
	 * the flip is completed (no longer pending). Since this doesn't raise
	 * an interrupt per se, we watch for the change at vblank.
	 */
	if (I915_READ16(ISR) & flip_pending)
		goto check_page_flip;

	intel_finish_page_flip_cs(dev_priv, pipe);
	return true;

check_page_flip:
	intel_check_page_flip(dev_priv, pipe);
	return false;
}

3965
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
C
Chris Wilson 已提交
3966
{
3967
	struct drm_device *dev = arg;
3968
	struct drm_i915_private *dev_priv = to_i915(dev);
C
Chris Wilson 已提交
3969 3970 3971 3972 3973 3974
	u16 iir, new_iir;
	u32 pipe_stats[2];
	int pipe;
	u16 flip_mask =
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3975
	irqreturn_t ret;
C
Chris Wilson 已提交
3976

3977 3978 3979
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

3980 3981 3982 3983
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
	disable_rpm_wakeref_asserts(dev_priv);

	ret = IRQ_NONE;
C
Chris Wilson 已提交
3984 3985
	iir = I915_READ16(IIR);
	if (iir == 0)
3986
		goto out;
C
Chris Wilson 已提交
3987 3988 3989 3990 3991 3992 3993

	while (iir & ~flip_mask) {
		/* Can't rely on pipestat interrupt bit in iir as it might
		 * have been cleared after the pipestat interrupt was received.
		 * It doesn't set the bit in iir again, but it still produces
		 * interrupts (for non-MSI).
		 */
3994
		spin_lock(&dev_priv->irq_lock);
C
Chris Wilson 已提交
3995
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3996
			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
C
Chris Wilson 已提交
3997

3998
		for_each_pipe(dev_priv, pipe) {
3999
			i915_reg_t reg = PIPESTAT(pipe);
C
Chris Wilson 已提交
4000 4001 4002 4003 4004
			pipe_stats[pipe] = I915_READ(reg);

			/*
			 * Clear the PIPE*STAT regs before the IIR
			 */
4005
			if (pipe_stats[pipe] & 0x8000ffff)
C
Chris Wilson 已提交
4006 4007
				I915_WRITE(reg, pipe_stats[pipe]);
		}
4008
		spin_unlock(&dev_priv->irq_lock);
C
Chris Wilson 已提交
4009 4010 4011 4012 4013

		I915_WRITE16(IIR, iir & ~flip_mask);
		new_iir = I915_READ16(IIR); /* Flush posted writes */

		if (iir & I915_USER_INTERRUPT)
4014
			notify_ring(&dev_priv->engine[RCS]);
C
Chris Wilson 已提交
4015

4016
		for_each_pipe(dev_priv, pipe) {
4017 4018 4019 4020 4021 4022 4023
			int plane = pipe;
			if (HAS_FBC(dev_priv))
				plane = !plane;

			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
			    i8xx_handle_vblank(dev_priv, plane, pipe, iir))
				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
C
Chris Wilson 已提交
4024

4025
			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4026
				i9xx_pipe_crc_irq_handler(dev_priv, pipe);
4027

4028 4029 4030
			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
				intel_cpu_fifo_underrun_irq_handler(dev_priv,
								    pipe);
4031
		}
C
Chris Wilson 已提交
4032 4033 4034

		iir = new_iir;
	}
4035 4036 4037 4038
	ret = IRQ_HANDLED;

out:
	enable_rpm_wakeref_asserts(dev_priv);
C
Chris Wilson 已提交
4039

4040
	return ret;
C
Chris Wilson 已提交
4041 4042 4043 4044
}

static void i8xx_irq_uninstall(struct drm_device * dev)
{
4045
	struct drm_i915_private *dev_priv = to_i915(dev);
C
Chris Wilson 已提交
4046 4047
	int pipe;

4048
	for_each_pipe(dev_priv, pipe) {
C
Chris Wilson 已提交
4049 4050 4051 4052 4053 4054 4055 4056 4057
		/* Clear enable bits; then clear status bits */
		I915_WRITE(PIPESTAT(pipe), 0);
		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
	}
	I915_WRITE16(IMR, 0xffff);
	I915_WRITE16(IER, 0x0);
	I915_WRITE16(IIR, I915_READ16(IIR));
}

4058 4059
static void i915_irq_preinstall(struct drm_device * dev)
{
4060
	struct drm_i915_private *dev_priv = to_i915(dev);
4061 4062 4063
	int pipe;

	if (I915_HAS_HOTPLUG(dev)) {
4064
		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4065 4066 4067
		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
	}

4068
	I915_WRITE16(HWSTAM, 0xeffe);
4069
	for_each_pipe(dev_priv, pipe)
4070 4071 4072 4073 4074 4075 4076 4077
		I915_WRITE(PIPESTAT(pipe), 0);
	I915_WRITE(IMR, 0xffffffff);
	I915_WRITE(IER, 0x0);
	POSTING_READ(IER);
}

static int i915_irq_postinstall(struct drm_device *dev)
{
4078
	struct drm_i915_private *dev_priv = to_i915(dev);
4079
	u32 enable_mask;
4080

4081 4082 4083 4084 4085 4086 4087 4088
	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));

	/* Unmask the interrupts that we always want on. */
	dev_priv->irq_mask =
		~(I915_ASLE_INTERRUPT |
		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4089
		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4090 4091 4092 4093 4094 4095 4096

	enable_mask =
		I915_ASLE_INTERRUPT |
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
		I915_USER_INTERRUPT;

4097
	if (I915_HAS_HOTPLUG(dev)) {
4098
		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4099 4100
		POSTING_READ(PORT_HOTPLUG_EN);

4101 4102 4103 4104 4105 4106 4107 4108 4109 4110
		/* Enable in IER... */
		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
		/* and unmask in IMR */
		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
	}

	I915_WRITE(IMR, dev_priv->irq_mask);
	I915_WRITE(IER, enable_mask);
	POSTING_READ(IER);

4111
	i915_enable_asle_pipestat(dev_priv);
4112

4113 4114
	/* Interrupt setup is already guaranteed to be single-threaded, this is
	 * just to make the assert_spin_locked check happy. */
4115
	spin_lock_irq(&dev_priv->irq_lock);
4116 4117
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4118
	spin_unlock_irq(&dev_priv->irq_lock);
4119

4120 4121 4122
	return 0;
}

4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153
/*
 * Returns true when a page flip has completed.
 */
static bool i915_handle_vblank(struct drm_i915_private *dev_priv,
			       int plane, int pipe, u32 iir)
{
	u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);

	if (!intel_pipe_handle_vblank(dev_priv, pipe))
		return false;

	if ((iir & flip_pending) == 0)
		goto check_page_flip;

	/* We detect FlipDone by looking for the change in PendingFlip from '1'
	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
	 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
	 * the flip is completed (no longer pending). Since this doesn't raise
	 * an interrupt per se, we watch for the change at vblank.
	 */
	if (I915_READ(ISR) & flip_pending)
		goto check_page_flip;

	intel_finish_page_flip_cs(dev_priv, pipe);
	return true;

check_page_flip:
	intel_check_page_flip(dev_priv, pipe);
	return false;
}

4154
static irqreturn_t i915_irq_handler(int irq, void *arg)
4155
{
4156
	struct drm_device *dev = arg;
4157
	struct drm_i915_private *dev_priv = to_i915(dev);
4158
	u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
4159 4160 4161 4162
	u32 flip_mask =
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
	int pipe, ret = IRQ_NONE;
4163

4164 4165 4166
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

4167 4168 4169
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
	disable_rpm_wakeref_asserts(dev_priv);

4170
	iir = I915_READ(IIR);
4171 4172
	do {
		bool irq_received = (iir & ~flip_mask) != 0;
4173
		bool blc_event = false;
4174 4175 4176 4177 4178 4179

		/* Can't rely on pipestat interrupt bit in iir as it might
		 * have been cleared after the pipestat interrupt was received.
		 * It doesn't set the bit in iir again, but it still produces
		 * interrupts (for non-MSI).
		 */
4180
		spin_lock(&dev_priv->irq_lock);
4181
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4182
			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4183

4184
		for_each_pipe(dev_priv, pipe) {
4185
			i915_reg_t reg = PIPESTAT(pipe);
4186 4187
			pipe_stats[pipe] = I915_READ(reg);

4188
			/* Clear the PIPE*STAT regs before the IIR */
4189 4190
			if (pipe_stats[pipe] & 0x8000ffff) {
				I915_WRITE(reg, pipe_stats[pipe]);
4191
				irq_received = true;
4192 4193
			}
		}
4194
		spin_unlock(&dev_priv->irq_lock);
4195 4196 4197 4198 4199

		if (!irq_received)
			break;

		/* Consume port.  Then clear IIR or we'll miss events */
4200
		if (I915_HAS_HOTPLUG(dev_priv) &&
4201 4202 4203
		    iir & I915_DISPLAY_PORT_INTERRUPT) {
			u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
			if (hotplug_status)
4204
				i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4205
		}
4206

4207
		I915_WRITE(IIR, iir & ~flip_mask);
4208 4209 4210
		new_iir = I915_READ(IIR); /* Flush posted writes */

		if (iir & I915_USER_INTERRUPT)
4211
			notify_ring(&dev_priv->engine[RCS]);
4212

4213
		for_each_pipe(dev_priv, pipe) {
4214 4215 4216 4217 4218 4219 4220
			int plane = pipe;
			if (HAS_FBC(dev_priv))
				plane = !plane;

			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
			    i915_handle_vblank(dev_priv, plane, pipe, iir))
				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4221 4222 4223

			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
				blc_event = true;
4224 4225

			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4226
				i9xx_pipe_crc_irq_handler(dev_priv, pipe);
4227

4228 4229 4230
			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
				intel_cpu_fifo_underrun_irq_handler(dev_priv,
								    pipe);
4231 4232 4233
		}

		if (blc_event || (iir & I915_ASLE_INTERRUPT))
4234
			intel_opregion_asle_intr(dev_priv);
4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250

		/* With MSI, interrupts are only generated when iir
		 * transitions from zero to nonzero.  If another bit got
		 * set while we were handling the existing iir bits, then
		 * we would never get another interrupt.
		 *
		 * This is fine on non-MSI as well, as if we hit this path
		 * we avoid exiting the interrupt handler only to generate
		 * another one.
		 *
		 * Note that for MSI this could cause a stray interrupt report
		 * if an interrupt landed in the time between writing IIR and
		 * the posting read.  This should be rare enough to never
		 * trigger the 99% of 100,000 interrupts test for disabling
		 * stray interrupts.
		 */
4251
		ret = IRQ_HANDLED;
4252
		iir = new_iir;
4253
	} while (iir & ~flip_mask);
4254

4255 4256
	enable_rpm_wakeref_asserts(dev_priv);

4257 4258 4259 4260 4261
	return ret;
}

static void i915_irq_uninstall(struct drm_device * dev)
{
4262
	struct drm_i915_private *dev_priv = to_i915(dev);
4263 4264 4265
	int pipe;

	if (I915_HAS_HOTPLUG(dev)) {
4266
		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4267 4268 4269
		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
	}

4270
	I915_WRITE16(HWSTAM, 0xffff);
4271
	for_each_pipe(dev_priv, pipe) {
4272
		/* Clear enable bits; then clear status bits */
4273
		I915_WRITE(PIPESTAT(pipe), 0);
4274 4275
		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
	}
4276 4277 4278 4279 4280 4281 4282 4283
	I915_WRITE(IMR, 0xffffffff);
	I915_WRITE(IER, 0x0);

	I915_WRITE(IIR, I915_READ(IIR));
}

static void i965_irq_preinstall(struct drm_device * dev)
{
4284
	struct drm_i915_private *dev_priv = to_i915(dev);
4285 4286
	int pipe;

4287
	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4288
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4289 4290

	I915_WRITE(HWSTAM, 0xeffe);
4291
	for_each_pipe(dev_priv, pipe)
4292 4293 4294 4295 4296 4297 4298 4299
		I915_WRITE(PIPESTAT(pipe), 0);
	I915_WRITE(IMR, 0xffffffff);
	I915_WRITE(IER, 0x0);
	POSTING_READ(IER);
}

static int i965_irq_postinstall(struct drm_device *dev)
{
4300
	struct drm_i915_private *dev_priv = to_i915(dev);
4301
	u32 enable_mask;
4302 4303 4304
	u32 error_mask;

	/* Unmask the interrupts that we always want on. */
4305
	dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
4306
			       I915_DISPLAY_PORT_INTERRUPT |
4307 4308 4309 4310 4311 4312 4313
			       I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
			       I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
			       I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
			       I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
			       I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);

	enable_mask = ~dev_priv->irq_mask;
4314 4315
	enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
			 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4316 4317
	enable_mask |= I915_USER_INTERRUPT;

4318
	if (IS_G4X(dev_priv))
4319
		enable_mask |= I915_BSD_USER_INTERRUPT;
4320

4321 4322
	/* Interrupt setup is already guaranteed to be single-threaded, this is
	 * just to make the assert_spin_locked check happy. */
4323
	spin_lock_irq(&dev_priv->irq_lock);
4324 4325 4326
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4327
	spin_unlock_irq(&dev_priv->irq_lock);
4328 4329 4330 4331 4332

	/*
	 * Enable some error detection, note the instruction error mask
	 * bit is reserved, so we leave it masked.
	 */
4333
	if (IS_G4X(dev_priv)) {
4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347
		error_mask = ~(GM45_ERROR_PAGE_TABLE |
			       GM45_ERROR_MEM_PRIV |
			       GM45_ERROR_CP_PRIV |
			       I915_ERROR_MEMORY_REFRESH);
	} else {
		error_mask = ~(I915_ERROR_PAGE_TABLE |
			       I915_ERROR_MEMORY_REFRESH);
	}
	I915_WRITE(EMR, error_mask);

	I915_WRITE(IMR, dev_priv->irq_mask);
	I915_WRITE(IER, enable_mask);
	POSTING_READ(IER);

4348
	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4349 4350
	POSTING_READ(PORT_HOTPLUG_EN);

4351
	i915_enable_asle_pipestat(dev_priv);
4352 4353 4354 4355

	return 0;
}

4356
static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
4357 4358 4359
{
	u32 hotplug_en;

4360 4361
	assert_spin_locked(&dev_priv->irq_lock);

4362 4363
	/* Note HDMI and DP share hotplug bits */
	/* enable bits are the same for all generations */
4364
	hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
4365 4366 4367 4368
	/* Programming the CRT detection parameters tends
	   to generate a spurious hotplug event about three
	   seconds later.  So just do it once.
	*/
4369
	if (IS_G4X(dev_priv))
4370 4371 4372 4373
		hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
	hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;

	/* Ignore TV since it's buggy */
4374
	i915_hotplug_interrupt_update_locked(dev_priv,
4375 4376 4377 4378
					     HOTPLUG_INT_EN_MASK |
					     CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
					     CRT_HOTPLUG_ACTIVATION_PERIOD_64,
					     hotplug_en);
4379 4380
}

4381
static irqreturn_t i965_irq_handler(int irq, void *arg)
4382
{
4383
	struct drm_device *dev = arg;
4384
	struct drm_i915_private *dev_priv = to_i915(dev);
4385 4386 4387
	u32 iir, new_iir;
	u32 pipe_stats[I915_MAX_PIPES];
	int ret = IRQ_NONE, pipe;
4388 4389 4390
	u32 flip_mask =
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4391

4392 4393 4394
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

4395 4396 4397
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
	disable_rpm_wakeref_asserts(dev_priv);

4398 4399 4400
	iir = I915_READ(IIR);

	for (;;) {
4401
		bool irq_received = (iir & ~flip_mask) != 0;
4402 4403
		bool blc_event = false;

4404 4405 4406 4407 4408
		/* Can't rely on pipestat interrupt bit in iir as it might
		 * have been cleared after the pipestat interrupt was received.
		 * It doesn't set the bit in iir again, but it still produces
		 * interrupts (for non-MSI).
		 */
4409
		spin_lock(&dev_priv->irq_lock);
4410
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4411
			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4412

4413
		for_each_pipe(dev_priv, pipe) {
4414
			i915_reg_t reg = PIPESTAT(pipe);
4415 4416 4417 4418 4419 4420 4421
			pipe_stats[pipe] = I915_READ(reg);

			/*
			 * Clear the PIPE*STAT regs before the IIR
			 */
			if (pipe_stats[pipe] & 0x8000ffff) {
				I915_WRITE(reg, pipe_stats[pipe]);
4422
				irq_received = true;
4423 4424
			}
		}
4425
		spin_unlock(&dev_priv->irq_lock);
4426 4427 4428 4429 4430 4431 4432

		if (!irq_received)
			break;

		ret = IRQ_HANDLED;

		/* Consume port.  Then clear IIR or we'll miss events */
4433 4434 4435
		if (iir & I915_DISPLAY_PORT_INTERRUPT) {
			u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
			if (hotplug_status)
4436
				i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4437
		}
4438

4439
		I915_WRITE(IIR, iir & ~flip_mask);
4440 4441 4442
		new_iir = I915_READ(IIR); /* Flush posted writes */

		if (iir & I915_USER_INTERRUPT)
4443
			notify_ring(&dev_priv->engine[RCS]);
4444
		if (iir & I915_BSD_USER_INTERRUPT)
4445
			notify_ring(&dev_priv->engine[VCS]);
4446

4447
		for_each_pipe(dev_priv, pipe) {
4448 4449 4450
			if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
			    i915_handle_vblank(dev_priv, pipe, pipe, iir))
				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
4451 4452 4453

			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
				blc_event = true;
4454 4455

			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4456
				i9xx_pipe_crc_irq_handler(dev_priv, pipe);
4457

4458 4459
			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
				intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
4460
		}
4461 4462

		if (blc_event || (iir & I915_ASLE_INTERRUPT))
4463
			intel_opregion_asle_intr(dev_priv);
4464

4465
		if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4466
			gmbus_irq_handler(dev_priv);
4467

4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485
		/* With MSI, interrupts are only generated when iir
		 * transitions from zero to nonzero.  If another bit got
		 * set while we were handling the existing iir bits, then
		 * we would never get another interrupt.
		 *
		 * This is fine on non-MSI as well, as if we hit this path
		 * we avoid exiting the interrupt handler only to generate
		 * another one.
		 *
		 * Note that for MSI this could cause a stray interrupt report
		 * if an interrupt landed in the time between writing IIR and
		 * the posting read.  This should be rare enough to never
		 * trigger the 99% of 100,000 interrupts test for disabling
		 * stray interrupts.
		 */
		iir = new_iir;
	}

4486 4487
	enable_rpm_wakeref_asserts(dev_priv);

4488 4489 4490 4491 4492
	return ret;
}

static void i965_irq_uninstall(struct drm_device * dev)
{
4493
	struct drm_i915_private *dev_priv = to_i915(dev);
4494 4495 4496 4497 4498
	int pipe;

	if (!dev_priv)
		return;

4499
	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4500
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4501 4502

	I915_WRITE(HWSTAM, 0xffffffff);
4503
	for_each_pipe(dev_priv, pipe)
4504 4505 4506 4507
		I915_WRITE(PIPESTAT(pipe), 0);
	I915_WRITE(IMR, 0xffffffff);
	I915_WRITE(IER, 0x0);

4508
	for_each_pipe(dev_priv, pipe)
4509 4510 4511 4512 4513
		I915_WRITE(PIPESTAT(pipe),
			   I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
	I915_WRITE(IIR, I915_READ(IIR));
}

4514 4515 4516 4517 4518 4519 4520
/**
 * intel_irq_init - initializes irq support
 * @dev_priv: i915 device instance
 *
 * This function initializes all the irq support including work items, timers
 * and all the vtables. It does not setup the interrupt itself though.
 */
4521
void intel_irq_init(struct drm_i915_private *dev_priv)
4522
{
4523
	struct drm_device *dev = &dev_priv->drm;
4524

4525 4526
	intel_hpd_init_work(dev_priv);

4527
	INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
4528
	INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4529

4530
	/* Let's track the enabled rps events */
4531
	if (IS_VALLEYVIEW(dev_priv))
4532
		/* WaGsvRC0ResidencyMethod:vlv */
4533
		dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
4534 4535
	else
		dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4536

4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548
	dev_priv->rps.pm_intr_keep = 0;

	/*
	 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
	 * if GEN6_PM_UP_EI_EXPIRED is masked.
	 *
	 * TODO: verify if this can be reproduced on VLV,CHV.
	 */
	if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
		dev_priv->rps.pm_intr_keep |= GEN6_PM_RP_UP_EI_EXPIRED;

	if (INTEL_INFO(dev_priv)->gen >= 8)
4549
		dev_priv->rps.pm_intr_keep |= GEN8_PMINTR_REDIRECT_TO_GUC;
4550

4551 4552
	INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
			  i915_hangcheck_elapsed);
4553

4554
	if (IS_GEN2(dev_priv)) {
4555
		/* Gen2 doesn't have a hardware frame counter */
4556
		dev->max_vblank_count = 0;
4557
		dev->driver->get_vblank_counter = drm_vblank_no_hw_counter;
4558
	} else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
4559
		dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4560
		dev->driver->get_vblank_counter = g4x_get_vblank_counter;
4561 4562 4563
	} else {
		dev->driver->get_vblank_counter = i915_get_vblank_counter;
		dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4564 4565
	}

4566 4567 4568 4569 4570
	/*
	 * Opt out of the vblank disable timer on everything except gen2.
	 * Gen2 doesn't have a hardware frame counter and so depends on
	 * vblank interrupts to produce sane vblank seuquence numbers.
	 */
4571
	if (!IS_GEN2(dev_priv))
4572 4573
		dev->vblank_disable_immediate = true;

4574 4575
	dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
	dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4576

4577
	if (IS_CHERRYVIEW(dev_priv)) {
4578 4579 4580 4581 4582 4583 4584
		dev->driver->irq_handler = cherryview_irq_handler;
		dev->driver->irq_preinstall = cherryview_irq_preinstall;
		dev->driver->irq_postinstall = cherryview_irq_postinstall;
		dev->driver->irq_uninstall = cherryview_irq_uninstall;
		dev->driver->enable_vblank = valleyview_enable_vblank;
		dev->driver->disable_vblank = valleyview_disable_vblank;
		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4585
	} else if (IS_VALLEYVIEW(dev_priv)) {
J
Jesse Barnes 已提交
4586 4587 4588 4589 4590 4591
		dev->driver->irq_handler = valleyview_irq_handler;
		dev->driver->irq_preinstall = valleyview_irq_preinstall;
		dev->driver->irq_postinstall = valleyview_irq_postinstall;
		dev->driver->irq_uninstall = valleyview_irq_uninstall;
		dev->driver->enable_vblank = valleyview_enable_vblank;
		dev->driver->disable_vblank = valleyview_disable_vblank;
4592
		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4593
	} else if (INTEL_INFO(dev_priv)->gen >= 8) {
4594
		dev->driver->irq_handler = gen8_irq_handler;
4595
		dev->driver->irq_preinstall = gen8_irq_reset;
4596 4597 4598 4599
		dev->driver->irq_postinstall = gen8_irq_postinstall;
		dev->driver->irq_uninstall = gen8_irq_uninstall;
		dev->driver->enable_vblank = gen8_enable_vblank;
		dev->driver->disable_vblank = gen8_disable_vblank;
4600
		if (IS_BROXTON(dev))
4601
			dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
4602
		else if (HAS_PCH_SPT(dev) || HAS_PCH_KBP(dev))
4603 4604
			dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
		else
4605
			dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4606 4607
	} else if (HAS_PCH_SPLIT(dev)) {
		dev->driver->irq_handler = ironlake_irq_handler;
4608
		dev->driver->irq_preinstall = ironlake_irq_reset;
4609 4610 4611 4612
		dev->driver->irq_postinstall = ironlake_irq_postinstall;
		dev->driver->irq_uninstall = ironlake_irq_uninstall;
		dev->driver->enable_vblank = ironlake_enable_vblank;
		dev->driver->disable_vblank = ironlake_disable_vblank;
4613
		dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4614
	} else {
4615
		if (IS_GEN2(dev_priv)) {
C
Chris Wilson 已提交
4616 4617 4618 4619
			dev->driver->irq_preinstall = i8xx_irq_preinstall;
			dev->driver->irq_postinstall = i8xx_irq_postinstall;
			dev->driver->irq_handler = i8xx_irq_handler;
			dev->driver->irq_uninstall = i8xx_irq_uninstall;
4620
		} else if (IS_GEN3(dev_priv)) {
4621 4622 4623 4624
			dev->driver->irq_preinstall = i915_irq_preinstall;
			dev->driver->irq_postinstall = i915_irq_postinstall;
			dev->driver->irq_uninstall = i915_irq_uninstall;
			dev->driver->irq_handler = i915_irq_handler;
C
Chris Wilson 已提交
4625
		} else {
4626 4627 4628 4629
			dev->driver->irq_preinstall = i965_irq_preinstall;
			dev->driver->irq_postinstall = i965_irq_postinstall;
			dev->driver->irq_uninstall = i965_irq_uninstall;
			dev->driver->irq_handler = i965_irq_handler;
C
Chris Wilson 已提交
4630
		}
4631 4632
		if (I915_HAS_HOTPLUG(dev_priv))
			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4633 4634 4635 4636
		dev->driver->enable_vblank = i915_enable_vblank;
		dev->driver->disable_vblank = i915_disable_vblank;
	}
}
4637

4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648
/**
 * intel_irq_install - enables the hardware interrupt
 * @dev_priv: i915 device instance
 *
 * This function enables the hardware interrupt handling, but leaves the hotplug
 * handling still disabled. It is called after intel_irq_init().
 *
 * In the driver load and resume code we need working interrupts in a few places
 * but don't want to deal with the hassle of concurrent probe and hotplug
 * workers. Hence the split into this two-stage approach.
 */
4649 4650 4651 4652 4653 4654 4655 4656 4657
int intel_irq_install(struct drm_i915_private *dev_priv)
{
	/*
	 * We enable some interrupt sources in our postinstall hooks, so mark
	 * interrupts as enabled _before_ actually enabling them to avoid
	 * special cases in our ordering checks.
	 */
	dev_priv->pm.irqs_enabled = true;

4658
	return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq);
4659 4660
}

4661 4662 4663 4664 4665 4666 4667
/**
 * intel_irq_uninstall - finilizes all irq handling
 * @dev_priv: i915 device instance
 *
 * This stops interrupt and hotplug handling and unregisters and frees all
 * resources acquired in the init functions.
 */
4668 4669
void intel_irq_uninstall(struct drm_i915_private *dev_priv)
{
4670
	drm_irq_uninstall(&dev_priv->drm);
4671 4672 4673 4674
	intel_hpd_cancel_work(dev_priv);
	dev_priv->pm.irqs_enabled = false;
}

4675 4676 4677 4678 4679 4680 4681
/**
 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
 * @dev_priv: i915 device instance
 *
 * This function is used to disable interrupts at runtime, both in the runtime
 * pm and the system suspend/resume code.
 */
4682
void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4683
{
4684
	dev_priv->drm.driver->irq_uninstall(&dev_priv->drm);
4685
	dev_priv->pm.irqs_enabled = false;
4686
	synchronize_irq(dev_priv->drm.irq);
4687 4688
}

4689 4690 4691 4692 4693 4694 4695
/**
 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
 * @dev_priv: i915 device instance
 *
 * This function is used to enable interrupts at runtime, both in the runtime
 * pm and the system suspend/resume code.
 */
4696
void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4697
{
4698
	dev_priv->pm.irqs_enabled = true;
4699 4700
	dev_priv->drm.driver->irq_preinstall(&dev_priv->drm);
	dev_priv->drm.driver->irq_postinstall(&dev_priv->drm);
4701
}