i915_irq.c 129.4 KB
Newer Older
D
Dave Airlie 已提交
1
/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
L
Linus Torvalds 已提交
2
 */
D
Dave Airlie 已提交
3
/*
L
Linus Torvalds 已提交
4 5
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
 * All Rights Reserved.
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
D
Dave Airlie 已提交
27
 */
L
Linus Torvalds 已提交
28

29 30
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

31
#include <linux/sysrq.h>
32
#include <linux/slab.h>
33
#include <linux/circ_buf.h>
34 35
#include <drm/drmP.h>
#include <drm/i915_drm.h>
L
Linus Torvalds 已提交
36
#include "i915_drv.h"
C
Chris Wilson 已提交
37
#include "i915_trace.h"
J
Jesse Barnes 已提交
38
#include "intel_drv.h"
L
Linus Torvalds 已提交
39

40 41 42 43 44 45 46 47
/**
 * DOC: interrupt handling
 *
 * These functions provide the basic support for enabling and disabling the
 * interrupt handling support. There's a lot more functionality in i915_irq.c
 * and related files, but that will be described in separate chapters.
 */

48 49 50 51
static const u32 hpd_ilk[HPD_NUM_PINS] = {
	[HPD_PORT_A] = DE_DP_A_HOTPLUG,
};

52 53 54 55
static const u32 hpd_ivb[HPD_NUM_PINS] = {
	[HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
};

56 57 58 59
static const u32 hpd_bdw[HPD_NUM_PINS] = {
	[HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
};

60
static const u32 hpd_ibx[HPD_NUM_PINS] = {
61 62 63 64 65 66 67
	[HPD_CRT] = SDE_CRT_HOTPLUG,
	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG
};

68
static const u32 hpd_cpt[HPD_NUM_PINS] = {
69
	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
70
	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
71 72 73 74 75
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
};

X
Xiong Zhang 已提交
76
static const u32 hpd_spt[HPD_NUM_PINS] = {
77
	[HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
X
Xiong Zhang 已提交
78 79 80 81 82 83
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
	[HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
};

84
static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
85 86 87 88 89 90 91 92
	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
};

93
static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
94 95 96 97 98 99 100 101
	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
};

102
static const u32 hpd_status_i915[HPD_NUM_PINS] = {
103 104 105 106 107 108 109 110
	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
};

111 112
/* BXT hpd list */
static const u32 hpd_bxt[HPD_NUM_PINS] = {
113
	[HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
114 115 116 117
	[HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
	[HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
};

118
/* IIR can theoretically queue up two events. Be paranoid. */
119
#define GEN8_IRQ_RESET_NDX(type, which) do { \
120 121 122 123 124 125 126 127 128
	I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
	POSTING_READ(GEN8_##type##_IMR(which)); \
	I915_WRITE(GEN8_##type##_IER(which), 0); \
	I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
	POSTING_READ(GEN8_##type##_IIR(which)); \
	I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
	POSTING_READ(GEN8_##type##_IIR(which)); \
} while (0)

129
#define GEN5_IRQ_RESET(type) do { \
P
Paulo Zanoni 已提交
130
	I915_WRITE(type##IMR, 0xffffffff); \
131
	POSTING_READ(type##IMR); \
P
Paulo Zanoni 已提交
132
	I915_WRITE(type##IER, 0); \
133 134 135 136
	I915_WRITE(type##IIR, 0xffffffff); \
	POSTING_READ(type##IIR); \
	I915_WRITE(type##IIR, 0xffffffff); \
	POSTING_READ(type##IIR); \
P
Paulo Zanoni 已提交
137 138
} while (0)

139 140 141
/*
 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
 */
142 143
static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv,
				    i915_reg_t reg)
144 145 146 147 148 149 150
{
	u32 val = I915_READ(reg);

	if (val == 0)
		return;

	WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
151
	     i915_mmio_reg_offset(reg), val);
152 153 154 155 156
	I915_WRITE(reg, 0xffffffff);
	POSTING_READ(reg);
	I915_WRITE(reg, 0xffffffff);
	POSTING_READ(reg);
}
157

P
Paulo Zanoni 已提交
158
#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
159
	gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
P
Paulo Zanoni 已提交
160
	I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
161 162
	I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
	POSTING_READ(GEN8_##type##_IMR(which)); \
P
Paulo Zanoni 已提交
163 164 165
} while (0)

#define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
166
	gen5_assert_iir_is_zero(dev_priv, type##IIR); \
P
Paulo Zanoni 已提交
167
	I915_WRITE(type##IER, (ier_val)); \
168 169
	I915_WRITE(type##IMR, (imr_val)); \
	POSTING_READ(type##IMR); \
P
Paulo Zanoni 已提交
170 171
} while (0)

172 173
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);

174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211
/* For display hotplug interrupt */
static inline void
i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
				     uint32_t mask,
				     uint32_t bits)
{
	uint32_t val;

	assert_spin_locked(&dev_priv->irq_lock);
	WARN_ON(bits & ~mask);

	val = I915_READ(PORT_HOTPLUG_EN);
	val &= ~mask;
	val |= bits;
	I915_WRITE(PORT_HOTPLUG_EN, val);
}

/**
 * i915_hotplug_interrupt_update - update hotplug interrupt enable
 * @dev_priv: driver private
 * @mask: bits to update
 * @bits: bits to enable
 * NOTE: the HPD enable bits are modified both inside and outside
 * of an interrupt context. To avoid that read-modify-write cycles
 * interfer, these bits are protected by a spinlock. Since this
 * function is usually not called from a context where the lock is
 * held already, this function acquires the lock itself. A non-locking
 * version is also available.
 */
void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
				   uint32_t mask,
				   uint32_t bits)
{
	spin_lock_irq(&dev_priv->irq_lock);
	i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
	spin_unlock_irq(&dev_priv->irq_lock);
}

212 213 214 215 216 217
/**
 * ilk_update_display_irq - update DEIMR
 * @dev_priv: driver private
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
218 219 220
void ilk_update_display_irq(struct drm_i915_private *dev_priv,
			    uint32_t interrupt_mask,
			    uint32_t enabled_irq_mask)
221
{
222 223
	uint32_t new_val;

224 225
	assert_spin_locked(&dev_priv->irq_lock);

226 227
	WARN_ON(enabled_irq_mask & ~interrupt_mask);

228
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
229 230
		return;

231 232 233 234 235 236
	new_val = dev_priv->irq_mask;
	new_val &= ~interrupt_mask;
	new_val |= (~enabled_irq_mask & interrupt_mask);

	if (new_val != dev_priv->irq_mask) {
		dev_priv->irq_mask = new_val;
237
		I915_WRITE(DEIMR, dev_priv->irq_mask);
238
		POSTING_READ(DEIMR);
239 240 241
	}
}

P
Paulo Zanoni 已提交
242 243 244 245 246 247 248 249 250 251 252 253
/**
 * ilk_update_gt_irq - update GTIMR
 * @dev_priv: driver private
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
			      uint32_t interrupt_mask,
			      uint32_t enabled_irq_mask)
{
	assert_spin_locked(&dev_priv->irq_lock);

254 255
	WARN_ON(enabled_irq_mask & ~interrupt_mask);

256
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
257 258
		return;

P
Paulo Zanoni 已提交
259 260 261 262 263
	dev_priv->gt_irq_mask &= ~interrupt_mask;
	dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
}

264
void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
P
Paulo Zanoni 已提交
265 266
{
	ilk_update_gt_irq(dev_priv, mask, mask);
267
	POSTING_READ_FW(GTIMR);
P
Paulo Zanoni 已提交
268 269
}

270
void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
P
Paulo Zanoni 已提交
271 272 273 274
{
	ilk_update_gt_irq(dev_priv, mask, 0);
}

275
static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
276 277 278 279
{
	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
}

280
static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv)
281 282 283 284
{
	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
}

285
static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
286 287 288 289
{
	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
}

P
Paulo Zanoni 已提交
290
/**
291 292 293 294 295
 * snb_update_pm_irq - update GEN6_PMIMR
 * @dev_priv: driver private
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
P
Paulo Zanoni 已提交
296 297 298 299
static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
			      uint32_t interrupt_mask,
			      uint32_t enabled_irq_mask)
{
300
	uint32_t new_val;
P
Paulo Zanoni 已提交
301

302 303
	WARN_ON(enabled_irq_mask & ~interrupt_mask);

P
Paulo Zanoni 已提交
304 305
	assert_spin_locked(&dev_priv->irq_lock);

306
	new_val = dev_priv->pm_imr;
307 308 309
	new_val &= ~interrupt_mask;
	new_val |= (~enabled_irq_mask & interrupt_mask);

310 311 312
	if (new_val != dev_priv->pm_imr) {
		dev_priv->pm_imr = new_val;
		I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_imr);
313
		POSTING_READ(gen6_pm_imr(dev_priv));
314
	}
P
Paulo Zanoni 已提交
315 316
}

317
void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
P
Paulo Zanoni 已提交
318
{
319 320 321
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
		return;

P
Paulo Zanoni 已提交
322 323 324
	snb_update_pm_irq(dev_priv, mask, mask);
}

325
static void __gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
P
Paulo Zanoni 已提交
326 327 328 329
{
	snb_update_pm_irq(dev_priv, mask, 0);
}

330
void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
331 332 333 334
{
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
		return;

335
	__gen6_mask_pm_irq(dev_priv, mask);
336 337
}

338
void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
I
Imre Deak 已提交
339
{
340
	i915_reg_t reg = gen6_pm_iir(dev_priv);
I
Imre Deak 已提交
341

342 343 344 345
	assert_spin_locked(&dev_priv->irq_lock);

	I915_WRITE(reg, reset_mask);
	I915_WRITE(reg, reset_mask);
I
Imre Deak 已提交
346
	POSTING_READ(reg);
347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372
}

void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask)
{
	assert_spin_locked(&dev_priv->irq_lock);

	dev_priv->pm_ier |= enable_mask;
	I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
	gen6_unmask_pm_irq(dev_priv, enable_mask);
	/* unmask_pm_irq provides an implicit barrier (POSTING_READ) */
}

void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask)
{
	assert_spin_locked(&dev_priv->irq_lock);

	dev_priv->pm_ier &= ~disable_mask;
	__gen6_mask_pm_irq(dev_priv, disable_mask);
	I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
	/* though a barrier is missing here, but don't really need a one */
}

void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
{
	spin_lock_irq(&dev_priv->irq_lock);
	gen6_reset_pm_iir(dev_priv, dev_priv->pm_rps_events);
373
	dev_priv->rps.pm_iir = 0;
I
Imre Deak 已提交
374 375 376
	spin_unlock_irq(&dev_priv->irq_lock);
}

377
void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
378
{
379 380 381
	if (READ_ONCE(dev_priv->rps.interrupts_enabled))
		return;

382
	spin_lock_irq(&dev_priv->irq_lock);
383 384
	WARN_ON_ONCE(dev_priv->rps.pm_iir);
	WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
I
Imre Deak 已提交
385
	dev_priv->rps.interrupts_enabled = true;
386
	gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
387

388 389 390
	spin_unlock_irq(&dev_priv->irq_lock);
}

391 392
u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
{
393
	return (mask & ~dev_priv->rps.pm_intr_keep);
394 395
}

396
void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
397
{
398 399 400
	if (!READ_ONCE(dev_priv->rps.interrupts_enabled))
		return;

I
Imre Deak 已提交
401 402
	spin_lock_irq(&dev_priv->irq_lock);
	dev_priv->rps.interrupts_enabled = false;
403

404
	I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
405

406
	gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
407 408

	spin_unlock_irq(&dev_priv->irq_lock);
409
	synchronize_irq(dev_priv->drm.irq);
410 411 412 413 414 415 416 417

	/* Now that we will not be generating any more work, flush any
	 * outsanding tasks. As we are called on the RPS idle path,
	 * we will reset the GPU to minimum frequencies, so the current
	 * state of the worker can be discarded.
	 */
	cancel_work_sync(&dev_priv->rps.work);
	gen6_reset_rps_interrupts(dev_priv);
418 419
}

420
/**
421 422 423 424 425
 * bdw_update_port_irq - update DE port interrupt
 * @dev_priv: driver private
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451
static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
				uint32_t interrupt_mask,
				uint32_t enabled_irq_mask)
{
	uint32_t new_val;
	uint32_t old_val;

	assert_spin_locked(&dev_priv->irq_lock);

	WARN_ON(enabled_irq_mask & ~interrupt_mask);

	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
		return;

	old_val = I915_READ(GEN8_DE_PORT_IMR);

	new_val = old_val;
	new_val &= ~interrupt_mask;
	new_val |= (~enabled_irq_mask & interrupt_mask);

	if (new_val != old_val) {
		I915_WRITE(GEN8_DE_PORT_IMR, new_val);
		POSTING_READ(GEN8_DE_PORT_IMR);
	}
}

452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483
/**
 * bdw_update_pipe_irq - update DE pipe interrupt
 * @dev_priv: driver private
 * @pipe: pipe whose interrupt to update
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
			 enum pipe pipe,
			 uint32_t interrupt_mask,
			 uint32_t enabled_irq_mask)
{
	uint32_t new_val;

	assert_spin_locked(&dev_priv->irq_lock);

	WARN_ON(enabled_irq_mask & ~interrupt_mask);

	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
		return;

	new_val = dev_priv->de_irq_mask[pipe];
	new_val &= ~interrupt_mask;
	new_val |= (~enabled_irq_mask & interrupt_mask);

	if (new_val != dev_priv->de_irq_mask[pipe]) {
		dev_priv->de_irq_mask[pipe] = new_val;
		I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
		POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
	}
}

484 485 486 487 488 489
/**
 * ibx_display_interrupt_update - update SDEIMR
 * @dev_priv: driver private
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
490 491 492
void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
				  uint32_t interrupt_mask,
				  uint32_t enabled_irq_mask)
493 494 495 496 497
{
	uint32_t sdeimr = I915_READ(SDEIMR);
	sdeimr &= ~interrupt_mask;
	sdeimr |= (~enabled_irq_mask & interrupt_mask);

498 499
	WARN_ON(enabled_irq_mask & ~interrupt_mask);

500 501
	assert_spin_locked(&dev_priv->irq_lock);

502
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
503 504
		return;

505 506 507
	I915_WRITE(SDEIMR, sdeimr);
	POSTING_READ(SDEIMR);
}
508

D
Daniel Vetter 已提交
509
static void
510 511
__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
		       u32 enable_mask, u32 status_mask)
512
{
513
	i915_reg_t reg = PIPESTAT(pipe);
514
	u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
515

516
	assert_spin_locked(&dev_priv->irq_lock);
517
	WARN_ON(!intel_irqs_enabled(dev_priv));
518

519 520 521 522
	if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
		      pipe_name(pipe), enable_mask, status_mask))
523 524 525
		return;

	if ((pipestat & enable_mask) == enable_mask)
526 527
		return;

528 529
	dev_priv->pipestat_irq_mask[pipe] |= status_mask;

530
	/* Enable the interrupt, clear any pending status */
531
	pipestat |= enable_mask | status_mask;
532 533
	I915_WRITE(reg, pipestat);
	POSTING_READ(reg);
534 535
}

D
Daniel Vetter 已提交
536
static void
537 538
__i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
		        u32 enable_mask, u32 status_mask)
539
{
540
	i915_reg_t reg = PIPESTAT(pipe);
541
	u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
542

543
	assert_spin_locked(&dev_priv->irq_lock);
544
	WARN_ON(!intel_irqs_enabled(dev_priv));
545

546 547 548 549
	if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
		      pipe_name(pipe), enable_mask, status_mask))
550 551
		return;

552 553 554
	if ((pipestat & enable_mask) == 0)
		return;

555 556
	dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;

557
	pipestat &= ~enable_mask;
558 559
	I915_WRITE(reg, pipestat);
	POSTING_READ(reg);
560 561
}

562 563 564 565 566
static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
{
	u32 enable_mask = status_mask << 16;

	/*
567 568
	 * On pipe A we don't support the PSR interrupt yet,
	 * on pipe B and C the same bit MBZ.
569 570 571
	 */
	if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
		return 0;
572 573 574 575 576 577
	/*
	 * On pipe B and C we don't support the PSR interrupt yet, on pipe
	 * A the same bit is for perf counters which we don't use either.
	 */
	if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
		return 0;
578 579 580 581 582 583 584 585 586 587 588 589

	enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
			 SPRITE0_FLIP_DONE_INT_EN_VLV |
			 SPRITE1_FLIP_DONE_INT_EN_VLV);
	if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
		enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
	if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
		enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;

	return enable_mask;
}

590 591 592 593 594 595
void
i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
		     u32 status_mask)
{
	u32 enable_mask;

596
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
597
		enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm,
598 599 600
							   status_mask);
	else
		enable_mask = status_mask << 16;
601 602 603 604 605 606 607 608 609
	__i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
}

void
i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
		      u32 status_mask)
{
	u32 enable_mask;

610
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
611
		enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm,
612 613 614
							   status_mask);
	else
		enable_mask = status_mask << 16;
615 616 617
	__i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
}

618
/**
619
 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
620
 * @dev_priv: i915 device private
621
 */
622
static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
623
{
624
	if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv))
625 626
		return;

627
	spin_lock_irq(&dev_priv->irq_lock);
628

629
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
630
	if (INTEL_GEN(dev_priv) >= 4)
631
		i915_enable_pipestat(dev_priv, PIPE_A,
632
				     PIPE_LEGACY_BLC_EVENT_STATUS);
633

634
	spin_unlock_irq(&dev_priv->irq_lock);
635 636
}

637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686
/*
 * This timing diagram depicts the video signal in and
 * around the vertical blanking period.
 *
 * Assumptions about the fictitious mode used in this example:
 *  vblank_start >= 3
 *  vsync_start = vblank_start + 1
 *  vsync_end = vblank_start + 2
 *  vtotal = vblank_start + 3
 *
 *           start of vblank:
 *           latch double buffered registers
 *           increment frame counter (ctg+)
 *           generate start of vblank interrupt (gen4+)
 *           |
 *           |          frame start:
 *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
 *           |          may be shifted forward 1-3 extra lines via PIPECONF
 *           |          |
 *           |          |  start of vsync:
 *           |          |  generate vsync interrupt
 *           |          |  |
 * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
 *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
 * ----va---> <-----------------vb--------------------> <--------va-------------
 *       |          |       <----vs----->                     |
 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
 *       |          |                                         |
 *       last visible pixel                                   first visible pixel
 *                  |                                         increment frame counter (gen3/4)
 *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
 *
 * x  = horizontal active
 * _  = horizontal blanking
 * hs = horizontal sync
 * va = vertical active
 * vb = vertical blanking
 * vs = vertical sync
 * vbs = vblank_start (number)
 *
 * Summary:
 * - most events happen at the start of horizontal sync
 * - frame start happens at the start of horizontal blank, 1-4 lines
 *   (depending on PIPECONF settings) after the start of vblank
 * - gen3/4 pixel and frame counter are synchronized with the start
 *   of horizontal active on the first line of vertical active
 */

687 688 689
/* Called from drm generic code, passed a 'crtc', which
 * we use as a pipe index
 */
690
static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
691
{
692
	struct drm_i915_private *dev_priv = to_i915(dev);
693
	i915_reg_t high_frame, low_frame;
694
	u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
695 696
	struct intel_crtc *intel_crtc =
		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
697
	const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
698

699 700 701 702 703
	htotal = mode->crtc_htotal;
	hsync_start = mode->crtc_hsync_start;
	vbl_start = mode->crtc_vblank_start;
	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
		vbl_start = DIV_ROUND_UP(vbl_start, 2);
704

705 706 707 708 709 710
	/* Convert to pixel count */
	vbl_start *= htotal;

	/* Start of vblank event occurs at start of hsync */
	vbl_start -= htotal - hsync_start;

711 712
	high_frame = PIPEFRAME(pipe);
	low_frame = PIPEFRAMEPIXEL(pipe);
713

714 715 716 717 718 719
	/*
	 * High & low register fields aren't synchronized, so make sure
	 * we get a low value that's stable across two reads of the high
	 * register.
	 */
	do {
720
		high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
721
		low   = I915_READ(low_frame);
722
		high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
723 724
	} while (high1 != high2);

725
	high1 >>= PIPE_FRAME_HIGH_SHIFT;
726
	pixel = low & PIPE_PIXEL_MASK;
727
	low >>= PIPE_FRAME_LOW_SHIFT;
728 729 730 731 732 733

	/*
	 * The frame counter increments at beginning of active.
	 * Cook up a vblank counter by also checking the pixel
	 * counter against vblank start.
	 */
734
	return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
735 736
}

737
static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
738
{
739
	struct drm_i915_private *dev_priv = to_i915(dev);
740

741
	return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
742 743
}

744
/* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
745 746 747
static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
{
	struct drm_device *dev = crtc->base.dev;
748
	struct drm_i915_private *dev_priv = to_i915(dev);
749
	const struct drm_display_mode *mode = &crtc->base.hwmode;
750
	enum pipe pipe = crtc->pipe;
751
	int position, vtotal;
752

753
	vtotal = mode->crtc_vtotal;
754 755 756
	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
		vtotal /= 2;

757
	if (IS_GEN2(dev_priv))
758
		position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
759
	else
760
		position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
761

762 763 764 765 766 767 768 769 770 771 772 773
	/*
	 * On HSW, the DSL reg (0x70000) appears to return 0 if we
	 * read it just before the start of vblank.  So try it again
	 * so we don't accidentally end up spanning a vblank frame
	 * increment, causing the pipe_update_end() code to squak at us.
	 *
	 * The nature of this problem means we can't simply check the ISR
	 * bit and return the vblank start value; nor can we use the scanline
	 * debug register in the transcoder as it appears to have the same
	 * problem.  We may need to extend this to include other platforms,
	 * but so far testing only shows the problem on HSW.
	 */
774
	if (HAS_DDI(dev_priv) && !position) {
775 776 777 778 779 780 781 782 783 784 785 786 787
		int i, temp;

		for (i = 0; i < 100; i++) {
			udelay(1);
			temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) &
				DSL_LINEMASK_GEN3;
			if (temp != position) {
				position = temp;
				break;
			}
		}
	}

788
	/*
789 790
	 * See update_scanline_offset() for the details on the
	 * scanline_offset adjustment.
791
	 */
792
	return (position + crtc->scanline_offset) % vtotal;
793 794
}

795
static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
796
				    unsigned int flags, int *vpos, int *hpos,
797 798
				    ktime_t *stime, ktime_t *etime,
				    const struct drm_display_mode *mode)
799
{
800
	struct drm_i915_private *dev_priv = to_i915(dev);
801 802
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
803
	int position;
804
	int vbl_start, vbl_end, hsync_start, htotal, vtotal;
805 806
	bool in_vbl = true;
	int ret = 0;
807
	unsigned long irqflags;
808

809
	if (WARN_ON(!mode->crtc_clock)) {
810
		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
811
				 "pipe %c\n", pipe_name(pipe));
812 813 814
		return 0;
	}

815
	htotal = mode->crtc_htotal;
816
	hsync_start = mode->crtc_hsync_start;
817 818 819
	vtotal = mode->crtc_vtotal;
	vbl_start = mode->crtc_vblank_start;
	vbl_end = mode->crtc_vblank_end;
820

821 822 823 824 825 826
	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
		vbl_start = DIV_ROUND_UP(vbl_start, 2);
		vbl_end /= 2;
		vtotal /= 2;
	}

827 828
	ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;

829 830 831 832 833 834
	/*
	 * Lock uncore.lock, as we will do multiple timing critical raw
	 * register reads, potentially with preemption disabled, so the
	 * following code must not block on uncore.lock.
	 */
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
835

836 837 838 839 840 841
	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */

	/* Get optional system timestamp before query. */
	if (stime)
		*stime = ktime_get();

842
	if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
843 844 845
		/* No obvious pixelcount register. Only query vertical
		 * scanout position from Display scan line register.
		 */
846
		position = __intel_get_crtc_scanline(intel_crtc);
847 848 849 850 851
	} else {
		/* Have access to pixelcount since start of frame.
		 * We can split this into vertical and horizontal
		 * scanout position.
		 */
852
		position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
853

854 855 856 857
		/* convert to pixel counts */
		vbl_start *= htotal;
		vbl_end *= htotal;
		vtotal *= htotal;
858

859 860 861 862 863 864 865 866 867 868 869 870
		/*
		 * In interlaced modes, the pixel counter counts all pixels,
		 * so one field will have htotal more pixels. In order to avoid
		 * the reported position from jumping backwards when the pixel
		 * counter is beyond the length of the shorter field, just
		 * clamp the position the length of the shorter field. This
		 * matches how the scanline counter based position works since
		 * the scanline counter doesn't count the two half lines.
		 */
		if (position >= vtotal)
			position = vtotal - 1;

871 872 873 874 875 876 877 878 879 880
		/*
		 * Start of vblank interrupt is triggered at start of hsync,
		 * just prior to the first active line of vblank. However we
		 * consider lines to start at the leading edge of horizontal
		 * active. So, should we get here before we've crossed into
		 * the horizontal active of the first line in vblank, we would
		 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
		 * always add htotal-hsync_start to the current pixel position.
		 */
		position = (position + htotal - hsync_start) % vtotal;
881 882
	}

883 884 885 886 887 888 889 890
	/* Get optional system timestamp after query. */
	if (etime)
		*etime = ktime_get();

	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */

	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);

891 892 893 894 895 896 897 898 899 900 901 902
	in_vbl = position >= vbl_start && position < vbl_end;

	/*
	 * While in vblank, position will be negative
	 * counting up towards 0 at vbl_end. And outside
	 * vblank, position will be positive counting
	 * up since vbl_end.
	 */
	if (position >= vbl_start)
		position -= vbl_end;
	else
		position += vtotal - vbl_end;
903

904
	if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
905 906 907 908 909 910
		*vpos = position;
		*hpos = 0;
	} else {
		*vpos = position / htotal;
		*hpos = position - (*vpos * htotal);
	}
911 912 913

	/* In vblank? */
	if (in_vbl)
914
		ret |= DRM_SCANOUTPOS_IN_VBLANK;
915 916 917 918

	return ret;
}

919 920
int intel_get_crtc_scanline(struct intel_crtc *crtc)
{
921
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
922 923 924 925 926 927 928 929 930 931
	unsigned long irqflags;
	int position;

	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
	position = __intel_get_crtc_scanline(crtc);
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);

	return position;
}

932
static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
933 934 935 936
			      int *max_error,
			      struct timeval *vblank_time,
			      unsigned flags)
{
937
	struct drm_crtc *crtc;
938

939 940
	if (pipe >= INTEL_INFO(dev)->num_pipes) {
		DRM_ERROR("Invalid crtc %u\n", pipe);
941 942 943 944
		return -EINVAL;
	}

	/* Get drm_crtc to timestamp: */
945 946
	crtc = intel_get_crtc_for_pipe(dev, pipe);
	if (crtc == NULL) {
947
		DRM_ERROR("Invalid crtc %u\n", pipe);
948 949 950
		return -EINVAL;
	}

951
	if (!crtc->hwmode.crtc_clock) {
952
		DRM_DEBUG_KMS("crtc %u is disabled\n", pipe);
953 954
		return -EBUSY;
	}
955 956

	/* Helper routine in DRM core does all the work: */
957 958
	return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
						     vblank_time, flags,
959
						     &crtc->hwmode);
960 961
}

962
static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
963
{
964
	u32 busy_up, busy_down, max_avg, min_avg;
965 966
	u8 new_delay;

967
	spin_lock(&mchdev_lock);
968

969 970
	I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));

971
	new_delay = dev_priv->ips.cur_delay;
972

973
	I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
974 975
	busy_up = I915_READ(RCPREVBSYTUPAVG);
	busy_down = I915_READ(RCPREVBSYTDNAVG);
976 977 978 979
	max_avg = I915_READ(RCBMAXAVG);
	min_avg = I915_READ(RCBMINAVG);

	/* Handle RCS change request from hw */
980
	if (busy_up > max_avg) {
981 982 983 984
		if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
			new_delay = dev_priv->ips.cur_delay - 1;
		if (new_delay < dev_priv->ips.max_delay)
			new_delay = dev_priv->ips.max_delay;
985
	} else if (busy_down < min_avg) {
986 987 988 989
		if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
			new_delay = dev_priv->ips.cur_delay + 1;
		if (new_delay > dev_priv->ips.min_delay)
			new_delay = dev_priv->ips.min_delay;
990 991
	}

992
	if (ironlake_set_drps(dev_priv, new_delay))
993
		dev_priv->ips.cur_delay = new_delay;
994

995
	spin_unlock(&mchdev_lock);
996

997 998 999
	return;
}

1000
static void notify_ring(struct intel_engine_cs *engine)
1001
{
1002
	smp_store_mb(engine->breadcrumbs.irq_posted, true);
1003
	if (intel_engine_wakeup(engine))
1004
		trace_i915_gem_request_notify(engine);
1005 1006
}

1007 1008
static void vlv_c0_read(struct drm_i915_private *dev_priv,
			struct intel_rps_ei *ei)
1009
{
1010 1011 1012 1013
	ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
	ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
	ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
}
1014

1015 1016 1017 1018 1019 1020
static bool vlv_c0_above(struct drm_i915_private *dev_priv,
			 const struct intel_rps_ei *old,
			 const struct intel_rps_ei *now,
			 int threshold)
{
	u64 time, c0;
1021
	unsigned int mul = 100;
1022

1023 1024
	if (old->cz_clock == 0)
		return false;
1025

1026 1027 1028
	if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
		mul <<= 8;

1029
	time = now->cz_clock - old->cz_clock;
1030
	time *= threshold * dev_priv->czclk_freq;
1031

1032 1033 1034
	/* Workload can be split between render + media, e.g. SwapBuffers
	 * being blitted in X after being rendered in mesa. To account for
	 * this we need to combine both engines into our activity counter.
1035
	 */
1036 1037
	c0 = now->render_c0 - old->render_c0;
	c0 += now->media_c0 - old->media_c0;
1038
	c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC;
1039

1040
	return c0 >= time;
1041 1042
}

1043
void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
1044
{
1045 1046 1047
	vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
	dev_priv->rps.up_ei = dev_priv->rps.down_ei;
}
1048

1049 1050 1051 1052
static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
{
	struct intel_rps_ei now;
	u32 events = 0;
1053

1054
	if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
1055
		return 0;
1056

1057 1058 1059
	vlv_c0_read(dev_priv, &now);
	if (now.cz_clock == 0)
		return 0;
1060

1061 1062 1063
	if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
		if (!vlv_c0_above(dev_priv,
				  &dev_priv->rps.down_ei, &now,
1064
				  dev_priv->rps.down_threshold))
1065 1066 1067
			events |= GEN6_PM_RP_DOWN_THRESHOLD;
		dev_priv->rps.down_ei = now;
	}
1068

1069 1070 1071
	if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
		if (vlv_c0_above(dev_priv,
				 &dev_priv->rps.up_ei, &now,
1072
				 dev_priv->rps.up_threshold))
1073 1074
			events |= GEN6_PM_RP_UP_THRESHOLD;
		dev_priv->rps.up_ei = now;
1075 1076
	}

1077
	return events;
1078 1079
}

1080 1081
static bool any_waiters(struct drm_i915_private *dev_priv)
{
1082
	struct intel_engine_cs *engine;
1083
	enum intel_engine_id id;
1084

1085
	for_each_engine(engine, dev_priv, id)
1086
		if (intel_engine_has_waiter(engine))
1087 1088 1089 1090 1091
			return true;

	return false;
}

1092
static void gen6_pm_rps_work(struct work_struct *work)
1093
{
1094 1095
	struct drm_i915_private *dev_priv =
		container_of(work, struct drm_i915_private, rps.work);
1096 1097
	bool client_boost;
	int new_delay, adj, min, max;
P
Paulo Zanoni 已提交
1098
	u32 pm_iir;
1099

1100
	spin_lock_irq(&dev_priv->irq_lock);
I
Imre Deak 已提交
1101 1102 1103 1104 1105
	/* Speed up work cancelation during disabling rps interrupts. */
	if (!dev_priv->rps.interrupts_enabled) {
		spin_unlock_irq(&dev_priv->irq_lock);
		return;
	}
1106

1107 1108
	pm_iir = dev_priv->rps.pm_iir;
	dev_priv->rps.pm_iir = 0;
1109
	/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
1110
	gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events);
1111 1112
	client_boost = dev_priv->rps.client_boost;
	dev_priv->rps.client_boost = false;
1113
	spin_unlock_irq(&dev_priv->irq_lock);
1114

1115
	/* Make sure we didn't queue anything we're not going to process. */
1116
	WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1117

1118
	if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
1119
		return;
1120

1121
	mutex_lock(&dev_priv->rps.hw_lock);
1122

1123 1124
	pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);

1125
	adj = dev_priv->rps.last_adj;
1126
	new_delay = dev_priv->rps.cur_freq;
1127 1128
	min = dev_priv->rps.min_freq_softlimit;
	max = dev_priv->rps.max_freq_softlimit;
1129 1130 1131 1132
	if (client_boost || any_waiters(dev_priv))
		max = dev_priv->rps.max_freq;
	if (client_boost && new_delay < dev_priv->rps.boost_freq) {
		new_delay = dev_priv->rps.boost_freq;
1133 1134
		adj = 0;
	} else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1135 1136
		if (adj > 0)
			adj *= 2;
1137 1138
		else /* CHV needs even encode values */
			adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
1139 1140 1141 1142
		/*
		 * For better performance, jump directly
		 * to RPe if we're below it.
		 */
1143
		if (new_delay < dev_priv->rps.efficient_freq - adj) {
1144
			new_delay = dev_priv->rps.efficient_freq;
1145 1146
			adj = 0;
		}
1147
	} else if (client_boost || any_waiters(dev_priv)) {
1148
		adj = 0;
1149
	} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1150 1151
		if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
			new_delay = dev_priv->rps.efficient_freq;
1152
		else
1153
			new_delay = dev_priv->rps.min_freq_softlimit;
1154 1155 1156 1157
		adj = 0;
	} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
		if (adj < 0)
			adj *= 2;
1158 1159
		else /* CHV needs even encode values */
			adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
1160
	} else { /* unknown event */
1161
		adj = 0;
1162
	}
1163

1164 1165
	dev_priv->rps.last_adj = adj;

1166 1167 1168
	/* sysfs frequency interfaces may have snuck in while servicing the
	 * interrupt
	 */
1169
	new_delay += adj;
1170
	new_delay = clamp_t(int, new_delay, min, max);
1171

1172
	intel_set_rps(dev_priv, new_delay);
1173

1174
	mutex_unlock(&dev_priv->rps.hw_lock);
1175 1176
}

1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188

/**
 * ivybridge_parity_work - Workqueue called when a parity error interrupt
 * occurred.
 * @work: workqueue struct
 *
 * Doesn't actually do anything except notify userspace. As a consequence of
 * this event, userspace should try to remap the bad rows since statistically
 * it is likely the same row is more likely to go bad again.
 */
static void ivybridge_parity_work(struct work_struct *work)
{
1189 1190
	struct drm_i915_private *dev_priv =
		container_of(work, struct drm_i915_private, l3_parity.error_work);
1191
	u32 error_status, row, bank, subbank;
1192
	char *parity_event[6];
1193
	uint32_t misccpctl;
1194
	uint8_t slice = 0;
1195 1196 1197 1198 1199

	/* We must turn off DOP level clock gating to access the L3 registers.
	 * In order to prevent a get/put style interface, acquire struct mutex
	 * any time we access those registers.
	 */
1200
	mutex_lock(&dev_priv->drm.struct_mutex);
1201

1202 1203 1204 1205
	/* If we've screwed up tracking, just let the interrupt fire again */
	if (WARN_ON(!dev_priv->l3_parity.which_slice))
		goto out;

1206 1207 1208 1209
	misccpctl = I915_READ(GEN7_MISCCPCTL);
	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
	POSTING_READ(GEN7_MISCCPCTL);

1210
	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1211
		i915_reg_t reg;
1212

1213
		slice--;
1214
		if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv)))
1215
			break;
1216

1217
		dev_priv->l3_parity.which_slice &= ~(1<<slice);
1218

1219
		reg = GEN7_L3CDERRST1(slice);
1220

1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235
		error_status = I915_READ(reg);
		row = GEN7_PARITY_ERROR_ROW(error_status);
		bank = GEN7_PARITY_ERROR_BANK(error_status);
		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);

		I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
		POSTING_READ(reg);

		parity_event[0] = I915_L3_PARITY_UEVENT "=1";
		parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
		parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
		parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
		parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
		parity_event[5] = NULL;

1236
		kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
1237
				   KOBJ_CHANGE, parity_event);
1238

1239 1240
		DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
			  slice, row, bank, subbank);
1241

1242 1243 1244 1245 1246
		kfree(parity_event[4]);
		kfree(parity_event[3]);
		kfree(parity_event[2]);
		kfree(parity_event[1]);
	}
1247

1248
	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1249

1250 1251
out:
	WARN_ON(dev_priv->l3_parity.which_slice);
1252
	spin_lock_irq(&dev_priv->irq_lock);
1253
	gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1254
	spin_unlock_irq(&dev_priv->irq_lock);
1255

1256
	mutex_unlock(&dev_priv->drm.struct_mutex);
1257 1258
}

1259 1260
static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv,
					       u32 iir)
1261
{
1262
	if (!HAS_L3_DPF(dev_priv))
1263 1264
		return;

1265
	spin_lock(&dev_priv->irq_lock);
1266
	gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1267
	spin_unlock(&dev_priv->irq_lock);
1268

1269
	iir &= GT_PARITY_ERROR(dev_priv);
1270 1271 1272 1273 1274 1275
	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
		dev_priv->l3_parity.which_slice |= 1 << 1;

	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
		dev_priv->l3_parity.which_slice |= 1 << 0;

1276
	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1277 1278
}

1279
static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
1280 1281
			       u32 gt_iir)
{
1282
	if (gt_iir & GT_RENDER_USER_INTERRUPT)
1283
		notify_ring(dev_priv->engine[RCS]);
1284
	if (gt_iir & ILK_BSD_USER_INTERRUPT)
1285
		notify_ring(dev_priv->engine[VCS]);
1286 1287
}

1288
static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
1289 1290
			       u32 gt_iir)
{
1291
	if (gt_iir & GT_RENDER_USER_INTERRUPT)
1292
		notify_ring(dev_priv->engine[RCS]);
1293
	if (gt_iir & GT_BSD_USER_INTERRUPT)
1294
		notify_ring(dev_priv->engine[VCS]);
1295
	if (gt_iir & GT_BLT_USER_INTERRUPT)
1296
		notify_ring(dev_priv->engine[BCS]);
1297

1298 1299
	if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
		      GT_BSD_CS_ERROR_INTERRUPT |
1300 1301
		      GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
		DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
1302

1303 1304
	if (gt_iir & GT_PARITY_ERROR(dev_priv))
		ivybridge_parity_error_irq_handler(dev_priv, gt_iir);
1305 1306
}

1307
static __always_inline void
1308
gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
1309 1310
{
	if (iir & (GT_RENDER_USER_INTERRUPT << test_shift))
1311
		notify_ring(engine);
1312
	if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift))
1313
		tasklet_schedule(&engine->irq_tasklet);
1314 1315
}

1316 1317 1318
static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv,
				   u32 master_ctl,
				   u32 gt_iir[4])
1319 1320 1321 1322
{
	irqreturn_t ret = IRQ_NONE;

	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1323 1324 1325
		gt_iir[0] = I915_READ_FW(GEN8_GT_IIR(0));
		if (gt_iir[0]) {
			I915_WRITE_FW(GEN8_GT_IIR(0), gt_iir[0]);
1326 1327 1328 1329 1330
			ret = IRQ_HANDLED;
		} else
			DRM_ERROR("The master control interrupt lied (GT0)!\n");
	}

1331
	if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1332 1333 1334
		gt_iir[1] = I915_READ_FW(GEN8_GT_IIR(1));
		if (gt_iir[1]) {
			I915_WRITE_FW(GEN8_GT_IIR(1), gt_iir[1]);
1335
			ret = IRQ_HANDLED;
1336
		} else
1337
			DRM_ERROR("The master control interrupt lied (GT1)!\n");
1338 1339
	}

1340
	if (master_ctl & GEN8_GT_VECS_IRQ) {
1341 1342 1343
		gt_iir[3] = I915_READ_FW(GEN8_GT_IIR(3));
		if (gt_iir[3]) {
			I915_WRITE_FW(GEN8_GT_IIR(3), gt_iir[3]);
1344 1345 1346 1347 1348
			ret = IRQ_HANDLED;
		} else
			DRM_ERROR("The master control interrupt lied (GT3)!\n");
	}

1349
	if (master_ctl & GEN8_GT_PM_IRQ) {
1350 1351
		gt_iir[2] = I915_READ_FW(GEN8_GT_IIR(2));
		if (gt_iir[2] & dev_priv->pm_rps_events) {
1352
			I915_WRITE_FW(GEN8_GT_IIR(2),
1353
				      gt_iir[2] & dev_priv->pm_rps_events);
1354
			ret = IRQ_HANDLED;
1355 1356 1357 1358
		} else
			DRM_ERROR("The master control interrupt lied (PM)!\n");
	}

1359 1360 1361
	return ret;
}

1362 1363 1364 1365
static void gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
				u32 gt_iir[4])
{
	if (gt_iir[0]) {
1366
		gen8_cs_irq_handler(dev_priv->engine[RCS],
1367
				    gt_iir[0], GEN8_RCS_IRQ_SHIFT);
1368
		gen8_cs_irq_handler(dev_priv->engine[BCS],
1369 1370 1371 1372
				    gt_iir[0], GEN8_BCS_IRQ_SHIFT);
	}

	if (gt_iir[1]) {
1373
		gen8_cs_irq_handler(dev_priv->engine[VCS],
1374
				    gt_iir[1], GEN8_VCS1_IRQ_SHIFT);
1375
		gen8_cs_irq_handler(dev_priv->engine[VCS2],
1376 1377 1378 1379
				    gt_iir[1], GEN8_VCS2_IRQ_SHIFT);
	}

	if (gt_iir[3])
1380
		gen8_cs_irq_handler(dev_priv->engine[VECS],
1381 1382 1383 1384 1385 1386
				    gt_iir[3], GEN8_VECS_IRQ_SHIFT);

	if (gt_iir[2] & dev_priv->pm_rps_events)
		gen6_rps_irq_handler(dev_priv, gt_iir[2]);
}

1387 1388 1389 1390
static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
{
	switch (port) {
	case PORT_A:
1391
		return val & PORTA_HOTPLUG_LONG_DETECT;
1392 1393 1394 1395 1396 1397 1398 1399 1400
	case PORT_B:
		return val & PORTB_HOTPLUG_LONG_DETECT;
	case PORT_C:
		return val & PORTC_HOTPLUG_LONG_DETECT;
	default:
		return false;
	}
}

1401 1402 1403 1404 1405 1406 1407 1408 1409 1410
static bool spt_port_hotplug2_long_detect(enum port port, u32 val)
{
	switch (port) {
	case PORT_E:
		return val & PORTE_HOTPLUG_LONG_DETECT;
	default:
		return false;
	}
}

1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426
static bool spt_port_hotplug_long_detect(enum port port, u32 val)
{
	switch (port) {
	case PORT_A:
		return val & PORTA_HOTPLUG_LONG_DETECT;
	case PORT_B:
		return val & PORTB_HOTPLUG_LONG_DETECT;
	case PORT_C:
		return val & PORTC_HOTPLUG_LONG_DETECT;
	case PORT_D:
		return val & PORTD_HOTPLUG_LONG_DETECT;
	default:
		return false;
	}
}

1427 1428 1429 1430 1431 1432 1433 1434 1435 1436
static bool ilk_port_hotplug_long_detect(enum port port, u32 val)
{
	switch (port) {
	case PORT_A:
		return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
	default:
		return false;
	}
}

1437
static bool pch_port_hotplug_long_detect(enum port port, u32 val)
1438 1439 1440
{
	switch (port) {
	case PORT_B:
1441
		return val & PORTB_HOTPLUG_LONG_DETECT;
1442
	case PORT_C:
1443
		return val & PORTC_HOTPLUG_LONG_DETECT;
1444
	case PORT_D:
1445 1446 1447
		return val & PORTD_HOTPLUG_LONG_DETECT;
	default:
		return false;
1448 1449 1450
	}
}

1451
static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
1452 1453 1454
{
	switch (port) {
	case PORT_B:
1455
		return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1456
	case PORT_C:
1457
		return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1458
	case PORT_D:
1459 1460 1461
		return val & PORTD_HOTPLUG_INT_LONG_PULSE;
	default:
		return false;
1462 1463 1464
	}
}

1465 1466 1467 1468 1469 1470 1471
/*
 * Get a bit mask of pins that have triggered, and which ones may be long.
 * This can be called multiple times with the same masks to accumulate
 * hotplug detection results from several registers.
 *
 * Note that the caller is expected to zero out the masks initially.
 */
1472
static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
1473
			     u32 hotplug_trigger, u32 dig_hotplug_reg,
1474 1475
			     const u32 hpd[HPD_NUM_PINS],
			     bool long_pulse_detect(enum port port, u32 val))
1476
{
1477
	enum port port;
1478 1479 1480
	int i;

	for_each_hpd_pin(i) {
1481 1482
		if ((hpd[i] & hotplug_trigger) == 0)
			continue;
1483

1484 1485
		*pin_mask |= BIT(i);

1486 1487 1488
		if (!intel_hpd_pin_to_port(i, &port))
			continue;

1489
		if (long_pulse_detect(port, dig_hotplug_reg))
1490
			*long_mask |= BIT(i);
1491 1492 1493 1494 1495 1496 1497
	}

	DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
			 hotplug_trigger, dig_hotplug_reg, *pin_mask);

}

1498
static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
1499
{
1500
	wake_up_all(&dev_priv->gmbus_wait_queue);
1501 1502
}

1503
static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
1504
{
1505
	wake_up_all(&dev_priv->gmbus_wait_queue);
1506 1507
}

1508
#if defined(CONFIG_DEBUG_FS)
1509 1510
static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
					 enum pipe pipe,
1511 1512 1513
					 uint32_t crc0, uint32_t crc1,
					 uint32_t crc2, uint32_t crc3,
					 uint32_t crc4)
1514 1515 1516
{
	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
	struct intel_pipe_crc_entry *entry;
1517
	int head, tail;
1518

1519 1520
	spin_lock(&pipe_crc->lock);

1521
	if (!pipe_crc->entries) {
1522
		spin_unlock(&pipe_crc->lock);
1523
		DRM_DEBUG_KMS("spurious interrupt\n");
1524 1525 1526
		return;
	}

1527 1528
	head = pipe_crc->head;
	tail = pipe_crc->tail;
1529 1530

	if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1531
		spin_unlock(&pipe_crc->lock);
1532 1533 1534 1535 1536
		DRM_ERROR("CRC buffer overflowing\n");
		return;
	}

	entry = &pipe_crc->entries[head];
1537

1538
	entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm,
1539
								 pipe);
1540 1541 1542 1543 1544
	entry->crc[0] = crc0;
	entry->crc[1] = crc1;
	entry->crc[2] = crc2;
	entry->crc[3] = crc3;
	entry->crc[4] = crc4;
1545 1546

	head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1547 1548 1549
	pipe_crc->head = head;

	spin_unlock(&pipe_crc->lock);
1550 1551

	wake_up_interruptible(&pipe_crc->wq);
1552
}
1553 1554
#else
static inline void
1555 1556
display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
			     enum pipe pipe,
1557 1558 1559 1560 1561
			     uint32_t crc0, uint32_t crc1,
			     uint32_t crc2, uint32_t crc3,
			     uint32_t crc4) {}
#endif

1562

1563 1564
static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
				     enum pipe pipe)
D
Daniel Vetter 已提交
1565
{
1566
	display_pipe_crc_irq_handler(dev_priv, pipe,
1567 1568
				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
				     0, 0, 0, 0);
D
Daniel Vetter 已提交
1569 1570
}

1571 1572
static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
				     enum pipe pipe)
1573
{
1574
	display_pipe_crc_irq_handler(dev_priv, pipe,
1575 1576 1577 1578 1579
				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
				     I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
				     I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
				     I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
				     I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1580
}
1581

1582 1583
static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
				      enum pipe pipe)
1584
{
1585 1586
	uint32_t res1, res2;

1587
	if (INTEL_GEN(dev_priv) >= 3)
1588 1589 1590 1591
		res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
	else
		res1 = 0;

1592
	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1593 1594 1595
		res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
	else
		res2 = 0;
1596

1597
	display_pipe_crc_irq_handler(dev_priv, pipe,
1598 1599 1600 1601
				     I915_READ(PIPE_CRC_RES_RED(pipe)),
				     I915_READ(PIPE_CRC_RES_GREEN(pipe)),
				     I915_READ(PIPE_CRC_RES_BLUE(pipe)),
				     res1, res2);
1602
}
1603

1604 1605 1606 1607
/* The RPS events need forcewake, so we add them to a work queue and mask their
 * IMR bits until the work is done. Other interrupts can be processed without
 * the work queue. */
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1608
{
1609
	if (pm_iir & dev_priv->pm_rps_events) {
1610
		spin_lock(&dev_priv->irq_lock);
1611
		gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
I
Imre Deak 已提交
1612 1613
		if (dev_priv->rps.interrupts_enabled) {
			dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1614
			schedule_work(&dev_priv->rps.work);
I
Imre Deak 已提交
1615
		}
1616
		spin_unlock(&dev_priv->irq_lock);
1617 1618
	}

1619 1620 1621
	if (INTEL_INFO(dev_priv)->gen >= 8)
		return;

1622
	if (HAS_VEBOX(dev_priv)) {
1623
		if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1624
			notify_ring(dev_priv->engine[VECS]);
B
Ben Widawsky 已提交
1625

1626 1627
		if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
			DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
B
Ben Widawsky 已提交
1628
	}
1629 1630
}

1631
static bool intel_pipe_handle_vblank(struct drm_i915_private *dev_priv,
1632
				     enum pipe pipe)
1633
{
1634 1635
	bool ret;

1636
	ret = drm_handle_vblank(&dev_priv->drm, pipe);
1637
	if (ret)
1638
		intel_finish_page_flip_mmio(dev_priv, pipe);
1639 1640

	return ret;
1641 1642
}

1643 1644
static void valleyview_pipestat_irq_ack(struct drm_i915_private *dev_priv,
					u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1645 1646 1647
{
	int pipe;

1648
	spin_lock(&dev_priv->irq_lock);
1649 1650 1651 1652 1653 1654

	if (!dev_priv->display_irqs_enabled) {
		spin_unlock(&dev_priv->irq_lock);
		return;
	}

1655
	for_each_pipe(dev_priv, pipe) {
1656
		i915_reg_t reg;
1657
		u32 mask, iir_bit = 0;
1658

1659 1660 1661 1662 1663 1664 1665
		/*
		 * PIPESTAT bits get signalled even when the interrupt is
		 * disabled with the mask bits, and some of the status bits do
		 * not generate interrupts at all (like the underrun bit). Hence
		 * we need to be careful that we only handle what we want to
		 * handle.
		 */
1666 1667 1668

		/* fifo underruns are filterered in the underrun handler. */
		mask = PIPE_FIFO_UNDERRUN_STATUS;
1669 1670 1671 1672 1673 1674 1675 1676

		switch (pipe) {
		case PIPE_A:
			iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
			break;
		case PIPE_B:
			iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
			break;
1677 1678 1679
		case PIPE_C:
			iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
			break;
1680 1681 1682 1683 1684
		}
		if (iir & iir_bit)
			mask |= dev_priv->pipestat_irq_mask[pipe];

		if (!mask)
1685 1686 1687
			continue;

		reg = PIPESTAT(pipe);
1688 1689
		mask |= PIPESTAT_INT_ENABLE_MASK;
		pipe_stats[pipe] = I915_READ(reg) & mask;
1690 1691 1692 1693

		/*
		 * Clear the PIPE*STAT regs before the IIR
		 */
1694 1695
		if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
					PIPESTAT_INT_STATUS_MASK))
1696 1697
			I915_WRITE(reg, pipe_stats[pipe]);
	}
1698
	spin_unlock(&dev_priv->irq_lock);
1699 1700
}

1701
static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1702 1703 1704
					    u32 pipe_stats[I915_MAX_PIPES])
{
	enum pipe pipe;
1705

1706
	for_each_pipe(dev_priv, pipe) {
1707 1708 1709
		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
		    intel_pipe_handle_vblank(dev_priv, pipe))
			intel_check_page_flip(dev_priv, pipe);
1710

1711
		if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV)
1712
			intel_finish_page_flip_cs(dev_priv, pipe);
1713 1714

		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1715
			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1716

1717 1718
		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1719 1720 1721
	}

	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1722
		gmbus_irq_handler(dev_priv);
1723 1724
}

1725
static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
1726 1727 1728
{
	u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);

1729 1730
	if (hotplug_status)
		I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1731

1732 1733 1734
	return hotplug_status;
}

1735
static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1736 1737 1738
				 u32 hotplug_status)
{
	u32 pin_mask = 0, long_mask = 0;
1739

1740 1741
	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
	    IS_CHERRYVIEW(dev_priv)) {
1742
		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1743

1744 1745 1746 1747 1748
		if (hotplug_trigger) {
			intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
					   hotplug_trigger, hpd_status_g4x,
					   i9xx_port_hotplug_long_detect);

1749
			intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1750
		}
1751 1752

		if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1753
			dp_aux_irq_handler(dev_priv);
1754 1755
	} else {
		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1756

1757 1758
		if (hotplug_trigger) {
			intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1759
					   hotplug_trigger, hpd_status_i915,
1760
					   i9xx_port_hotplug_long_detect);
1761
			intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1762
		}
1763
	}
1764 1765
}

1766
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
J
Jesse Barnes 已提交
1767
{
1768
	struct drm_device *dev = arg;
1769
	struct drm_i915_private *dev_priv = to_i915(dev);
J
Jesse Barnes 已提交
1770 1771
	irqreturn_t ret = IRQ_NONE;

1772 1773 1774
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

1775 1776 1777
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
	disable_rpm_wakeref_asserts(dev_priv);

1778
	do {
1779
		u32 iir, gt_iir, pm_iir;
1780
		u32 pipe_stats[I915_MAX_PIPES] = {};
1781
		u32 hotplug_status = 0;
1782
		u32 ier = 0;
1783

J
Jesse Barnes 已提交
1784 1785
		gt_iir = I915_READ(GTIIR);
		pm_iir = I915_READ(GEN6_PMIIR);
1786
		iir = I915_READ(VLV_IIR);
J
Jesse Barnes 已提交
1787 1788

		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1789
			break;
J
Jesse Barnes 已提交
1790 1791 1792

		ret = IRQ_HANDLED;

1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805
		/*
		 * Theory on interrupt generation, based on empirical evidence:
		 *
		 * x = ((VLV_IIR & VLV_IER) ||
		 *      (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
		 *       (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
		 *
		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
		 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
		 * guarantee the CPU interrupt will be raised again even if we
		 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
		 * bits this time around.
		 */
1806
		I915_WRITE(VLV_MASTER_IER, 0);
1807 1808
		ier = I915_READ(VLV_IER);
		I915_WRITE(VLV_IER, 0);
1809 1810 1811 1812 1813 1814

		if (gt_iir)
			I915_WRITE(GTIIR, gt_iir);
		if (pm_iir)
			I915_WRITE(GEN6_PMIIR, pm_iir);

1815
		if (iir & I915_DISPLAY_PORT_INTERRUPT)
1816
			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1817

1818 1819
		/* Call regardless, as some status bits might not be
		 * signalled in iir */
1820
		valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1821 1822 1823 1824 1825 1826 1827

		/*
		 * VLV_IIR is single buffered, and reflects the level
		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
		 */
		if (iir)
			I915_WRITE(VLV_IIR, iir);
1828

1829
		I915_WRITE(VLV_IER, ier);
1830 1831
		I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
		POSTING_READ(VLV_MASTER_IER);
1832

1833
		if (gt_iir)
1834
			snb_gt_irq_handler(dev_priv, gt_iir);
1835 1836 1837
		if (pm_iir)
			gen6_rps_irq_handler(dev_priv, pm_iir);

1838
		if (hotplug_status)
1839
			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1840

1841
		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1842
	} while (0);
J
Jesse Barnes 已提交
1843

1844 1845
	enable_rpm_wakeref_asserts(dev_priv);

J
Jesse Barnes 已提交
1846 1847 1848
	return ret;
}

1849 1850
static irqreturn_t cherryview_irq_handler(int irq, void *arg)
{
1851
	struct drm_device *dev = arg;
1852
	struct drm_i915_private *dev_priv = to_i915(dev);
1853 1854
	irqreturn_t ret = IRQ_NONE;

1855 1856 1857
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

1858 1859 1860
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
	disable_rpm_wakeref_asserts(dev_priv);

1861
	do {
1862
		u32 master_ctl, iir;
1863
		u32 gt_iir[4] = {};
1864
		u32 pipe_stats[I915_MAX_PIPES] = {};
1865
		u32 hotplug_status = 0;
1866 1867
		u32 ier = 0;

1868 1869
		master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
		iir = I915_READ(VLV_IIR);
1870

1871 1872
		if (master_ctl == 0 && iir == 0)
			break;
1873

1874 1875
		ret = IRQ_HANDLED;

1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888
		/*
		 * Theory on interrupt generation, based on empirical evidence:
		 *
		 * x = ((VLV_IIR & VLV_IER) ||
		 *      ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
		 *       (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
		 *
		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
		 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
		 * guarantee the CPU interrupt will be raised again even if we
		 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
		 * bits this time around.
		 */
1889
		I915_WRITE(GEN8_MASTER_IRQ, 0);
1890 1891
		ier = I915_READ(VLV_IER);
		I915_WRITE(VLV_IER, 0);
1892

1893
		gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
1894

1895
		if (iir & I915_DISPLAY_PORT_INTERRUPT)
1896
			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1897

1898 1899
		/* Call regardless, as some status bits might not be
		 * signalled in iir */
1900
		valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1901

1902 1903 1904 1905 1906 1907 1908
		/*
		 * VLV_IIR is single buffered, and reflects the level
		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
		 */
		if (iir)
			I915_WRITE(VLV_IIR, iir);

1909
		I915_WRITE(VLV_IER, ier);
1910
		I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
1911
		POSTING_READ(GEN8_MASTER_IRQ);
1912

1913 1914
		gen8_gt_irq_handler(dev_priv, gt_iir);

1915
		if (hotplug_status)
1916
			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1917

1918
		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1919
	} while (0);
1920

1921 1922
	enable_rpm_wakeref_asserts(dev_priv);

1923 1924 1925
	return ret;
}

1926 1927
static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
				u32 hotplug_trigger,
1928 1929 1930 1931
				const u32 hpd[HPD_NUM_PINS])
{
	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;

1932 1933 1934 1935 1936 1937
	/*
	 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
	 * unless we touch the hotplug register, even if hotplug_trigger is
	 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
	 * errors.
	 */
1938
	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1939 1940 1941 1942 1943 1944 1945 1946
	if (!hotplug_trigger) {
		u32 mask = PORTA_HOTPLUG_STATUS_MASK |
			PORTD_HOTPLUG_STATUS_MASK |
			PORTC_HOTPLUG_STATUS_MASK |
			PORTB_HOTPLUG_STATUS_MASK;
		dig_hotplug_reg &= ~mask;
	}

1947
	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1948 1949
	if (!hotplug_trigger)
		return;
1950 1951 1952 1953 1954

	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
			   dig_hotplug_reg, hpd,
			   pch_port_hotplug_long_detect);

1955
	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1956 1957
}

1958
static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1959
{
1960
	int pipe;
1961
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1962

1963
	ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
1964

1965 1966 1967
	if (pch_iir & SDE_AUDIO_POWER_MASK) {
		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
			       SDE_AUDIO_POWER_SHIFT);
1968
		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1969 1970
				 port_name(port));
	}
1971

1972
	if (pch_iir & SDE_AUX_MASK)
1973
		dp_aux_irq_handler(dev_priv);
1974

1975
	if (pch_iir & SDE_GMBUS)
1976
		gmbus_irq_handler(dev_priv);
1977 1978 1979 1980 1981 1982 1983 1984 1985 1986

	if (pch_iir & SDE_AUDIO_HDCP_MASK)
		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");

	if (pch_iir & SDE_AUDIO_TRANS_MASK)
		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");

	if (pch_iir & SDE_POISON)
		DRM_ERROR("PCH poison interrupt\n");

1987
	if (pch_iir & SDE_FDI_MASK)
1988
		for_each_pipe(dev_priv, pipe)
1989 1990 1991
			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
					 pipe_name(pipe),
					 I915_READ(FDI_RX_IIR(pipe)));
1992 1993 1994 1995 1996 1997 1998 1999

	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");

	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");

	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
2000
		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
2001 2002

	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
2003
		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
2004 2005
}

2006
static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
2007 2008
{
	u32 err_int = I915_READ(GEN7_ERR_INT);
D
Daniel Vetter 已提交
2009
	enum pipe pipe;
2010

2011 2012 2013
	if (err_int & ERR_INT_POISON)
		DRM_ERROR("Poison interrupt\n");

2014
	for_each_pipe(dev_priv, pipe) {
2015 2016
		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2017

D
Daniel Vetter 已提交
2018
		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
2019 2020
			if (IS_IVYBRIDGE(dev_priv))
				ivb_pipe_crc_irq_handler(dev_priv, pipe);
D
Daniel Vetter 已提交
2021
			else
2022
				hsw_pipe_crc_irq_handler(dev_priv, pipe);
D
Daniel Vetter 已提交
2023 2024
		}
	}
2025

2026 2027 2028
	I915_WRITE(GEN7_ERR_INT, err_int);
}

2029
static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
2030 2031 2032
{
	u32 serr_int = I915_READ(SERR_INT);

2033 2034 2035
	if (serr_int & SERR_INT_POISON)
		DRM_ERROR("PCH poison interrupt\n");

2036
	if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
2037
		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
2038 2039

	if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
2040
		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
2041 2042

	if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
2043
		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
2044 2045

	I915_WRITE(SERR_INT, serr_int);
2046 2047
}

2048
static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2049 2050
{
	int pipe;
2051
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
2052

2053
	ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
2054

2055 2056 2057 2058 2059 2060
	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
			       SDE_AUDIO_POWER_SHIFT_CPT);
		DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
				 port_name(port));
	}
2061 2062

	if (pch_iir & SDE_AUX_MASK_CPT)
2063
		dp_aux_irq_handler(dev_priv);
2064 2065

	if (pch_iir & SDE_GMBUS_CPT)
2066
		gmbus_irq_handler(dev_priv);
2067 2068 2069 2070 2071 2072 2073 2074

	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
		DRM_DEBUG_DRIVER("Audio CP request interrupt\n");

	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
		DRM_DEBUG_DRIVER("Audio CP change interrupt\n");

	if (pch_iir & SDE_FDI_MASK_CPT)
2075
		for_each_pipe(dev_priv, pipe)
2076 2077 2078
			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
					 pipe_name(pipe),
					 I915_READ(FDI_RX_IIR(pipe)));
2079 2080

	if (pch_iir & SDE_ERROR_CPT)
2081
		cpt_serr_int_handler(dev_priv);
2082 2083
}

2084
static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098
{
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
		~SDE_PORTE_HOTPLUG_SPT;
	u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
	u32 pin_mask = 0, long_mask = 0;

	if (hotplug_trigger) {
		u32 dig_hotplug_reg;

		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
		I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);

		intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
				   dig_hotplug_reg, hpd_spt,
2099
				   spt_port_hotplug_long_detect);
2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113
	}

	if (hotplug2_trigger) {
		u32 dig_hotplug_reg;

		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
		I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);

		intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger,
				   dig_hotplug_reg, hpd_spt,
				   spt_port_hotplug2_long_detect);
	}

	if (pin_mask)
2114
		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2115 2116

	if (pch_iir & SDE_GMBUS_CPT)
2117
		gmbus_irq_handler(dev_priv);
2118 2119
}

2120 2121
static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
				u32 hotplug_trigger,
2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132
				const u32 hpd[HPD_NUM_PINS])
{
	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;

	dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);

	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
			   dig_hotplug_reg, hpd,
			   ilk_port_hotplug_long_detect);

2133
	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2134 2135
}

2136 2137
static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
				    u32 de_iir)
2138
{
2139
	enum pipe pipe;
2140 2141
	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;

2142
	if (hotplug_trigger)
2143
		ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk);
2144 2145

	if (de_iir & DE_AUX_CHANNEL_A)
2146
		dp_aux_irq_handler(dev_priv);
2147 2148

	if (de_iir & DE_GSE)
2149
		intel_opregion_asle_intr(dev_priv);
2150 2151 2152 2153

	if (de_iir & DE_POISON)
		DRM_ERROR("Poison interrupt\n");

2154
	for_each_pipe(dev_priv, pipe) {
2155 2156 2157
		if (de_iir & DE_PIPE_VBLANK(pipe) &&
		    intel_pipe_handle_vblank(dev_priv, pipe))
			intel_check_page_flip(dev_priv, pipe);
2158

2159
		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2160
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2161

2162
		if (de_iir & DE_PIPE_CRC_DONE(pipe))
2163
			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2164

2165
		/* plane/pipes map 1:1 on ilk+ */
2166
		if (de_iir & DE_PLANE_FLIP_DONE(pipe))
2167
			intel_finish_page_flip_cs(dev_priv, pipe);
2168 2169 2170 2171 2172 2173
	}

	/* check event from PCH */
	if (de_iir & DE_PCH_EVENT) {
		u32 pch_iir = I915_READ(SDEIIR);

2174 2175
		if (HAS_PCH_CPT(dev_priv))
			cpt_irq_handler(dev_priv, pch_iir);
2176
		else
2177
			ibx_irq_handler(dev_priv, pch_iir);
2178 2179 2180 2181 2182

		/* should clear PCH hotplug event before clear CPU irq */
		I915_WRITE(SDEIIR, pch_iir);
	}

2183 2184
	if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT)
		ironlake_rps_change_irq_handler(dev_priv);
2185 2186
}

2187 2188
static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
				    u32 de_iir)
2189
{
2190
	enum pipe pipe;
2191 2192
	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;

2193
	if (hotplug_trigger)
2194
		ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb);
2195 2196

	if (de_iir & DE_ERR_INT_IVB)
2197
		ivb_err_int_handler(dev_priv);
2198 2199

	if (de_iir & DE_AUX_CHANNEL_A_IVB)
2200
		dp_aux_irq_handler(dev_priv);
2201 2202

	if (de_iir & DE_GSE_IVB)
2203
		intel_opregion_asle_intr(dev_priv);
2204

2205
	for_each_pipe(dev_priv, pipe) {
2206 2207 2208
		if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
		    intel_pipe_handle_vblank(dev_priv, pipe))
			intel_check_page_flip(dev_priv, pipe);
2209 2210

		/* plane/pipes map 1:1 on ilk+ */
2211
		if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe))
2212
			intel_finish_page_flip_cs(dev_priv, pipe);
2213 2214 2215
	}

	/* check event from PCH */
2216
	if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
2217 2218
		u32 pch_iir = I915_READ(SDEIIR);

2219
		cpt_irq_handler(dev_priv, pch_iir);
2220 2221 2222 2223 2224 2225

		/* clear PCH hotplug event before clear CPU irq */
		I915_WRITE(SDEIIR, pch_iir);
	}
}

2226 2227 2228 2229 2230 2231 2232 2233
/*
 * To handle irqs with the minimum potential races with fresh interrupts, we:
 * 1 - Disable Master Interrupt Control.
 * 2 - Find the source(s) of the interrupt.
 * 3 - Clear the Interrupt Identity bits (IIR).
 * 4 - Process the interrupt(s) that had bits set in the IIRs.
 * 5 - Re-enable Master Interrupt Control.
 */
2234
static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2235
{
2236
	struct drm_device *dev = arg;
2237
	struct drm_i915_private *dev_priv = to_i915(dev);
2238
	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2239
	irqreturn_t ret = IRQ_NONE;
2240

2241 2242 2243
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

2244 2245 2246
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
	disable_rpm_wakeref_asserts(dev_priv);

2247 2248 2249
	/* disable master interrupt before clearing iir  */
	de_ier = I915_READ(DEIER);
	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2250
	POSTING_READ(DEIER);
2251

2252 2253 2254 2255 2256
	/* Disable south interrupts. We'll only write to SDEIIR once, so further
	 * interrupts will will be stored on its back queue, and then we'll be
	 * able to process them after we restore SDEIER (as soon as we restore
	 * it, we'll get an interrupt if SDEIIR still has something to process
	 * due to its back queue). */
2257
	if (!HAS_PCH_NOP(dev_priv)) {
2258 2259 2260 2261
		sde_ier = I915_READ(SDEIER);
		I915_WRITE(SDEIER, 0);
		POSTING_READ(SDEIER);
	}
2262

2263 2264
	/* Find, clear, then process each source of interrupt */

2265
	gt_iir = I915_READ(GTIIR);
2266
	if (gt_iir) {
2267 2268
		I915_WRITE(GTIIR, gt_iir);
		ret = IRQ_HANDLED;
2269
		if (INTEL_GEN(dev_priv) >= 6)
2270
			snb_gt_irq_handler(dev_priv, gt_iir);
2271
		else
2272
			ilk_gt_irq_handler(dev_priv, gt_iir);
2273 2274
	}

2275 2276
	de_iir = I915_READ(DEIIR);
	if (de_iir) {
2277 2278
		I915_WRITE(DEIIR, de_iir);
		ret = IRQ_HANDLED;
2279 2280
		if (INTEL_GEN(dev_priv) >= 7)
			ivb_display_irq_handler(dev_priv, de_iir);
2281
		else
2282
			ilk_display_irq_handler(dev_priv, de_iir);
2283 2284
	}

2285
	if (INTEL_GEN(dev_priv) >= 6) {
2286 2287 2288 2289
		u32 pm_iir = I915_READ(GEN6_PMIIR);
		if (pm_iir) {
			I915_WRITE(GEN6_PMIIR, pm_iir);
			ret = IRQ_HANDLED;
2290
			gen6_rps_irq_handler(dev_priv, pm_iir);
2291
		}
2292
	}
2293 2294 2295

	I915_WRITE(DEIER, de_ier);
	POSTING_READ(DEIER);
2296
	if (!HAS_PCH_NOP(dev_priv)) {
2297 2298 2299
		I915_WRITE(SDEIER, sde_ier);
		POSTING_READ(SDEIER);
	}
2300

2301 2302 2303
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
	enable_rpm_wakeref_asserts(dev_priv);

2304 2305 2306
	return ret;
}

2307 2308
static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
				u32 hotplug_trigger,
2309
				const u32 hpd[HPD_NUM_PINS])
2310
{
2311
	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2312

2313 2314
	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2315

2316
	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2317
			   dig_hotplug_reg, hpd,
2318
			   bxt_port_hotplug_long_detect);
2319

2320
	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2321 2322
}

2323 2324
static irqreturn_t
gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2325 2326
{
	irqreturn_t ret = IRQ_NONE;
2327
	u32 iir;
2328
	enum pipe pipe;
J
Jesse Barnes 已提交
2329

2330
	if (master_ctl & GEN8_DE_MISC_IRQ) {
2331 2332 2333
		iir = I915_READ(GEN8_DE_MISC_IIR);
		if (iir) {
			I915_WRITE(GEN8_DE_MISC_IIR, iir);
2334
			ret = IRQ_HANDLED;
2335
			if (iir & GEN8_DE_MISC_GSE)
2336
				intel_opregion_asle_intr(dev_priv);
2337 2338
			else
				DRM_ERROR("Unexpected DE Misc interrupt\n");
2339
		}
2340 2341
		else
			DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2342 2343
	}

2344
	if (master_ctl & GEN8_DE_PORT_IRQ) {
2345 2346 2347
		iir = I915_READ(GEN8_DE_PORT_IIR);
		if (iir) {
			u32 tmp_mask;
2348
			bool found = false;
2349

2350
			I915_WRITE(GEN8_DE_PORT_IIR, iir);
2351
			ret = IRQ_HANDLED;
J
Jesse Barnes 已提交
2352

2353 2354 2355 2356 2357 2358 2359
			tmp_mask = GEN8_AUX_CHANNEL_A;
			if (INTEL_INFO(dev_priv)->gen >= 9)
				tmp_mask |= GEN9_AUX_CHANNEL_B |
					    GEN9_AUX_CHANNEL_C |
					    GEN9_AUX_CHANNEL_D;

			if (iir & tmp_mask) {
2360
				dp_aux_irq_handler(dev_priv);
2361 2362 2363
				found = true;
			}

2364 2365 2366
			if (IS_BROXTON(dev_priv)) {
				tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
				if (tmp_mask) {
2367 2368
					bxt_hpd_irq_handler(dev_priv, tmp_mask,
							    hpd_bxt);
2369 2370 2371 2372 2373
					found = true;
				}
			} else if (IS_BROADWELL(dev_priv)) {
				tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
				if (tmp_mask) {
2374 2375
					ilk_hpd_irq_handler(dev_priv,
							    tmp_mask, hpd_bdw);
2376 2377
					found = true;
				}
2378 2379
			}

2380 2381
			if (IS_BROXTON(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
				gmbus_irq_handler(dev_priv);
S
Shashank Sharma 已提交
2382 2383 2384
				found = true;
			}

2385
			if (!found)
2386
				DRM_ERROR("Unexpected DE Port interrupt\n");
2387
		}
2388 2389
		else
			DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2390 2391
	}

2392
	for_each_pipe(dev_priv, pipe) {
2393
		u32 flip_done, fault_errors;
2394

2395 2396
		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
			continue;
2397

2398 2399 2400 2401 2402
		iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
		if (!iir) {
			DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
			continue;
		}
2403

2404 2405
		ret = IRQ_HANDLED;
		I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
2406

2407 2408 2409
		if (iir & GEN8_PIPE_VBLANK &&
		    intel_pipe_handle_vblank(dev_priv, pipe))
			intel_check_page_flip(dev_priv, pipe);
2410

2411 2412 2413 2414 2415
		flip_done = iir;
		if (INTEL_INFO(dev_priv)->gen >= 9)
			flip_done &= GEN9_PIPE_PLANE1_FLIP_DONE;
		else
			flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE;
2416

2417
		if (flip_done)
2418
			intel_finish_page_flip_cs(dev_priv, pipe);
2419

2420
		if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
2421
			hsw_pipe_crc_irq_handler(dev_priv, pipe);
2422

2423 2424
		if (iir & GEN8_PIPE_FIFO_UNDERRUN)
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2425

2426 2427 2428 2429 2430
		fault_errors = iir;
		if (INTEL_INFO(dev_priv)->gen >= 9)
			fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
		else
			fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2431

2432 2433 2434 2435
		if (fault_errors)
			DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
				  pipe_name(pipe),
				  fault_errors);
2436 2437
	}

2438
	if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2439
	    master_ctl & GEN8_DE_PCH_IRQ) {
2440 2441 2442 2443 2444
		/*
		 * FIXME(BDW): Assume for now that the new interrupt handling
		 * scheme also closed the SDE interrupt handling race we've seen
		 * on older pch-split platforms. But this needs testing.
		 */
2445 2446 2447
		iir = I915_READ(SDEIIR);
		if (iir) {
			I915_WRITE(SDEIIR, iir);
2448
			ret = IRQ_HANDLED;
2449

2450
			if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv))
2451
				spt_irq_handler(dev_priv, iir);
2452
			else
2453
				cpt_irq_handler(dev_priv, iir);
2454 2455 2456 2457 2458 2459 2460
		} else {
			/*
			 * Like on previous PCH there seems to be something
			 * fishy going on with forwarding PCH interrupts.
			 */
			DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
		}
2461 2462
	}

2463 2464 2465 2466 2467 2468
	return ret;
}

static irqreturn_t gen8_irq_handler(int irq, void *arg)
{
	struct drm_device *dev = arg;
2469
	struct drm_i915_private *dev_priv = to_i915(dev);
2470
	u32 master_ctl;
2471
	u32 gt_iir[4] = {};
2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487
	irqreturn_t ret;

	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

	master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
	master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
	if (!master_ctl)
		return IRQ_NONE;

	I915_WRITE_FW(GEN8_MASTER_IRQ, 0);

	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
	disable_rpm_wakeref_asserts(dev_priv);

	/* Find, clear, then process each source of interrupt */
2488 2489
	ret = gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
	gen8_gt_irq_handler(dev_priv, gt_iir);
2490 2491
	ret |= gen8_de_irq_handler(dev_priv, master_ctl);

2492 2493
	I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
	POSTING_READ_FW(GEN8_MASTER_IRQ);
2494

2495 2496
	enable_rpm_wakeref_asserts(dev_priv);

2497 2498 2499
	return ret;
}

2500
static void i915_error_wake_up(struct drm_i915_private *dev_priv)
2501 2502 2503 2504 2505 2506 2507 2508 2509
{
	/*
	 * Notify all waiters for GPU completion events that reset state has
	 * been changed, and that they need to restart their wait after
	 * checking for potential errors (and bail out to drop locks if there is
	 * a gpu reset pending so that i915_error_work_func can acquire them).
	 */

	/* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2510
	wake_up_all(&dev_priv->gpu_error.wait_queue);
2511 2512 2513 2514 2515

	/* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
	wake_up_all(&dev_priv->pending_flip_queue);
}

2516
/**
2517
 * i915_reset_and_wakeup - do process context error handling work
2518
 * @dev_priv: i915 device private
2519 2520 2521 2522
 *
 * Fire an error uevent so userspace can see that a hang or error
 * was detected.
 */
2523
static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv)
2524
{
2525
	struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj;
2526 2527 2528
	char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
	char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
	char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2529

2530
	kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
2531

2532 2533 2534
	DRM_DEBUG_DRIVER("resetting chip\n");
	kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);

2535
	/*
2536 2537 2538 2539 2540
	 * In most cases it's guaranteed that we get here with an RPM
	 * reference held, for example because there is a pending GPU
	 * request that won't finish until the reset is done. This
	 * isn't the case at least when we get here by doing a
	 * simulated reset via debugs, so get an RPM reference.
2541
	 */
2542 2543
	intel_runtime_pm_get(dev_priv);
	intel_prepare_reset(dev_priv);
2544

2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555
	do {
		/*
		 * All state reset _must_ be completed before we update the
		 * reset counter, for otherwise waiters might miss the reset
		 * pending state and not properly drop locks, resulting in
		 * deadlocks with the reset work.
		 */
		if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
			i915_reset(dev_priv);
			mutex_unlock(&dev_priv->drm.struct_mutex);
		}
2556

2557 2558 2559 2560 2561
		/* We need to wait for anyone holding the lock to wakeup */
	} while (wait_on_bit_timeout(&dev_priv->gpu_error.flags,
				     I915_RESET_IN_PROGRESS,
				     TASK_UNINTERRUPTIBLE,
				     HZ));
2562

2563
	intel_finish_reset(dev_priv);
2564
	intel_runtime_pm_put(dev_priv);
2565

2566
	if (!test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
2567 2568
		kobject_uevent_env(kobj,
				   KOBJ_CHANGE, reset_done_event);
2569

2570 2571 2572 2573 2574
	/*
	 * Note: The wake_up also serves as a memory barrier so that
	 * waiters see the updated value of the dev_priv->gpu_error.
	 */
	wake_up_all(&dev_priv->gpu_error.reset_queue);
2575 2576
}

2577 2578 2579 2580
static inline void
i915_err_print_instdone(struct drm_i915_private *dev_priv,
			struct intel_instdone *instdone)
{
2581 2582 2583
	int slice;
	int subslice;

2584 2585 2586 2587 2588 2589 2590 2591 2592 2593
	pr_err("  INSTDONE: 0x%08x\n", instdone->instdone);

	if (INTEL_GEN(dev_priv) <= 3)
		return;

	pr_err("  SC_INSTDONE: 0x%08x\n", instdone->slice_common);

	if (INTEL_GEN(dev_priv) <= 6)
		return;

2594 2595 2596 2597 2598 2599 2600
	for_each_instdone_slice_subslice(dev_priv, slice, subslice)
		pr_err("  SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
		       slice, subslice, instdone->sampler[slice][subslice]);

	for_each_instdone_slice_subslice(dev_priv, slice, subslice)
		pr_err("  ROW_INSTDONE[%d][%d]: 0x%08x\n",
		       slice, subslice, instdone->row[slice][subslice]);
2601 2602
}

2603
static void i915_clear_error_registers(struct drm_i915_private *dev_priv)
2604
{
2605
	u32 eir;
2606

2607 2608
	if (!IS_GEN2(dev_priv))
		I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER));
2609

2610 2611 2612 2613
	if (INTEL_GEN(dev_priv) < 4)
		I915_WRITE(IPEIR, I915_READ(IPEIR));
	else
		I915_WRITE(IPEIR_I965, I915_READ(IPEIR_I965));
2614

2615
	I915_WRITE(EIR, I915_READ(EIR));
2616 2617 2618 2619 2620 2621
	eir = I915_READ(EIR);
	if (eir) {
		/*
		 * some errors might have become stuck,
		 * mask them.
		 */
2622
		DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
2623 2624 2625
		I915_WRITE(EMR, I915_READ(EMR) | eir);
		I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
	}
2626 2627 2628
}

/**
2629
 * i915_handle_error - handle a gpu error
2630
 * @dev_priv: i915 device private
2631
 * @engine_mask: mask representing engines that are hung
2632
 * Do some basic checking of register state at error time and
2633 2634 2635 2636
 * dump it to the syslog.  Also call i915_capture_error_state() to make
 * sure we get a record and make it available in debugfs.  Fire a uevent
 * so userspace knows something bad happened (should trigger collection
 * of a ring dump etc.).
2637
 * @fmt: Error message format string
2638
 */
2639 2640
void i915_handle_error(struct drm_i915_private *dev_priv,
		       u32 engine_mask,
2641
		       const char *fmt, ...)
2642
{
2643 2644
	va_list args;
	char error_msg[80];
2645

2646 2647 2648 2649
	va_start(args, fmt);
	vscnprintf(error_msg, sizeof(error_msg), fmt, args);
	va_end(args);

2650
	i915_capture_error_state(dev_priv, engine_mask, error_msg);
2651
	i915_clear_error_registers(dev_priv);
2652

2653 2654
	if (!engine_mask)
		return;
2655

2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672
	if (test_and_set_bit(I915_RESET_IN_PROGRESS,
			     &dev_priv->gpu_error.flags))
		return;

	/*
	 * Wakeup waiting processes so that the reset function
	 * i915_reset_and_wakeup doesn't deadlock trying to grab
	 * various locks. By bumping the reset counter first, the woken
	 * processes will see a reset in progress and back off,
	 * releasing their locks and then wait for the reset completion.
	 * We must do this for _all_ gpu waiters that might hold locks
	 * that the reset work needs to acquire.
	 *
	 * Note: The wake_up also provides a memory barrier to ensure that the
	 * waiters see the updated value of the reset flags.
	 */
	i915_error_wake_up(dev_priv);
2673

2674
	i915_reset_and_wakeup(dev_priv);
2675 2676
}

2677 2678 2679
/* Called from drm generic code, passed 'crtc' which
 * we use as a pipe index
 */
2680
static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe)
2681
{
2682
	struct drm_i915_private *dev_priv = to_i915(dev);
2683
	unsigned long irqflags;
2684

2685
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2686
	i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2687
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2688

2689 2690 2691
	return 0;
}

2692
static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe)
2693
{
2694
	struct drm_i915_private *dev_priv = to_i915(dev);
2695 2696 2697
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2698 2699
	i915_enable_pipestat(dev_priv, pipe,
			     PIPE_START_VBLANK_INTERRUPT_STATUS);
2700 2701 2702 2703 2704
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);

	return 0;
}

2705
static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
J
Jesse Barnes 已提交
2706
{
2707
	struct drm_i915_private *dev_priv = to_i915(dev);
J
Jesse Barnes 已提交
2708
	unsigned long irqflags;
2709
	uint32_t bit = INTEL_GEN(dev_priv) >= 7 ?
2710
		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
J
Jesse Barnes 已提交
2711 2712

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2713
	ilk_enable_display_irq(dev_priv, bit);
J
Jesse Barnes 已提交
2714 2715 2716 2717 2718
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);

	return 0;
}

2719
static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
2720
{
2721
	struct drm_i915_private *dev_priv = to_i915(dev);
2722 2723 2724
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2725
	bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2726
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2727

2728 2729 2730
	return 0;
}

2731 2732 2733
/* Called from drm generic code, passed 'crtc' which
 * we use as a pipe index
 */
2734
static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe)
2735
{
2736
	struct drm_i915_private *dev_priv = to_i915(dev);
2737
	unsigned long irqflags;
2738

2739
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2740
	i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2741 2742 2743
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

2744
static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe)
2745
{
2746
	struct drm_i915_private *dev_priv = to_i915(dev);
2747 2748 2749
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2750 2751
	i915_disable_pipestat(dev_priv, pipe,
			      PIPE_START_VBLANK_INTERRUPT_STATUS);
2752 2753 2754
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

2755
static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
J
Jesse Barnes 已提交
2756
{
2757
	struct drm_i915_private *dev_priv = to_i915(dev);
J
Jesse Barnes 已提交
2758
	unsigned long irqflags;
2759
	uint32_t bit = INTEL_GEN(dev_priv) >= 7 ?
2760
		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
J
Jesse Barnes 已提交
2761 2762

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2763
	ilk_disable_display_irq(dev_priv, bit);
J
Jesse Barnes 已提交
2764 2765 2766
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

2767
static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
2768
{
2769
	struct drm_i915_private *dev_priv = to_i915(dev);
2770 2771 2772
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2773
	bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2774 2775 2776
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

2777
static bool
2778
ipehr_is_semaphore_wait(struct intel_engine_cs *engine, u32 ipehr)
2779
{
2780
	if (INTEL_GEN(engine->i915) >= 8) {
2781
		return (ipehr >> 23) == 0x1c;
2782 2783 2784 2785 2786 2787 2788
	} else {
		ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
		return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
				 MI_SEMAPHORE_REGISTER);
	}
}

2789
static struct intel_engine_cs *
2790 2791
semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
				 u64 offset)
2792
{
2793
	struct drm_i915_private *dev_priv = engine->i915;
2794
	struct intel_engine_cs *signaller;
2795
	enum intel_engine_id id;
2796

2797
	if (INTEL_GEN(dev_priv) >= 8) {
2798
		for_each_engine(signaller, dev_priv, id) {
2799
			if (engine == signaller)
2800 2801
				continue;

2802
			if (offset == signaller->semaphore.signal_ggtt[engine->hw_id])
2803 2804
				return signaller;
		}
2805 2806 2807
	} else {
		u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;

2808
		for_each_engine(signaller, dev_priv, id) {
2809
			if(engine == signaller)
2810 2811
				continue;

2812
			if (sync_bits == signaller->semaphore.mbox.wait[engine->hw_id])
2813 2814 2815 2816
				return signaller;
		}
	}

2817 2818
	DRM_DEBUG_DRIVER("No signaller ring found for %s, ipehr 0x%08x, offset 0x%016llx\n",
			 engine->name, ipehr, offset);
2819

2820
	return ERR_PTR(-ENODEV);
2821 2822
}

2823
static struct intel_engine_cs *
2824
semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
2825
{
2826
	struct drm_i915_private *dev_priv = engine->i915;
2827
	void __iomem *vaddr;
2828
	u32 cmd, ipehr, head;
2829 2830
	u64 offset = 0;
	int i, backwards;
2831

2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848
	/*
	 * This function does not support execlist mode - any attempt to
	 * proceed further into this function will result in a kernel panic
	 * when dereferencing ring->buffer, which is not set up in execlist
	 * mode.
	 *
	 * The correct way of doing it would be to derive the currently
	 * executing ring buffer from the current context, which is derived
	 * from the currently running request. Unfortunately, to get the
	 * current request we would have to grab the struct_mutex before doing
	 * anything else, which would be ill-advised since some other thread
	 * might have grabbed it already and managed to hang itself, causing
	 * the hang checker to deadlock.
	 *
	 * Therefore, this function does not support execlist mode in its
	 * current form. Just return NULL and move on.
	 */
2849
	if (engine->buffer == NULL)
2850 2851
		return NULL;

2852
	ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
2853
	if (!ipehr_is_semaphore_wait(engine, ipehr))
2854
		return NULL;
2855

2856 2857 2858
	/*
	 * HEAD is likely pointing to the dword after the actual command,
	 * so scan backwards until we find the MBOX. But limit it to just 3
2859 2860
	 * or 4 dwords depending on the semaphore wait command size.
	 * Note that we don't care about ACTHD here since that might
2861 2862
	 * point at at batch, and semaphores are always emitted into the
	 * ringbuffer itself.
2863
	 */
2864
	head = I915_READ_HEAD(engine) & HEAD_ADDR;
2865
	backwards = (INTEL_GEN(dev_priv) >= 8) ? 5 : 4;
2866
	vaddr = (void __iomem *)engine->buffer->vaddr;
2867

2868
	for (i = backwards; i; --i) {
2869 2870 2871 2872 2873
		/*
		 * Be paranoid and presume the hw has gone off into the wild -
		 * our ring is smaller than what the hardware (and hence
		 * HEAD_ADDR) allows. Also handles wrap-around.
		 */
2874
		head &= engine->buffer->size - 1;
2875 2876

		/* This here seems to blow up */
2877
		cmd = ioread32(vaddr + head);
2878 2879 2880
		if (cmd == ipehr)
			break;

2881 2882
		head -= 4;
	}
2883

2884 2885
	if (!i)
		return NULL;
2886

2887
	*seqno = ioread32(vaddr + head + 4) + 1;
2888
	if (INTEL_GEN(dev_priv) >= 8) {
2889
		offset = ioread32(vaddr + head + 12);
2890
		offset <<= 32;
2891
		offset |= ioread32(vaddr + head + 8);
2892
	}
2893
	return semaphore_wait_to_signaller_ring(engine, ipehr, offset);
2894 2895
}

2896
static int semaphore_passed(struct intel_engine_cs *engine)
2897
{
2898
	struct drm_i915_private *dev_priv = engine->i915;
2899
	struct intel_engine_cs *signaller;
2900
	u32 seqno;
2901

2902
	engine->hangcheck.deadlock++;
2903

2904
	signaller = semaphore_waits_for(engine, &seqno);
2905 2906 2907
	if (signaller == NULL)
		return -1;

2908 2909 2910
	if (IS_ERR(signaller))
		return 0;

2911
	/* Prevent pathological recursion due to driver bugs */
2912
	if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
2913 2914
		return -1;

2915
	if (i915_seqno_passed(intel_engine_get_seqno(signaller), seqno))
2916 2917
		return 1;

2918 2919 2920
	/* cursory check for an unkickable deadlock */
	if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
	    semaphore_passed(signaller) < 0)
2921 2922 2923
		return -1;

	return 0;
2924 2925 2926 2927
}

static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
{
2928
	struct intel_engine_cs *engine;
2929
	enum intel_engine_id id;
2930

2931
	for_each_engine(engine, dev_priv, id)
2932
		engine->hangcheck.deadlock = 0;
2933 2934
}

2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945
static bool instdone_unchanged(u32 current_instdone, u32 *old_instdone)
{
	u32 tmp = current_instdone | *old_instdone;
	bool unchanged;

	unchanged = tmp == *old_instdone;
	*old_instdone |= tmp;

	return unchanged;
}

2946
static bool subunits_stuck(struct intel_engine_cs *engine)
2947
{
2948 2949 2950
	struct drm_i915_private *dev_priv = engine->i915;
	struct intel_instdone instdone;
	struct intel_instdone *accu_instdone = &engine->hangcheck.instdone;
2951
	bool stuck;
2952 2953
	int slice;
	int subslice;
2954

2955
	if (engine->id != RCS)
2956 2957
		return true;

2958
	intel_engine_get_instdone(engine, &instdone);
2959

2960 2961 2962 2963 2964
	/* There might be unstable subunit states even when
	 * actual head is not moving. Filter out the unstable ones by
	 * accumulating the undone -> done transitions and only
	 * consider those as progress.
	 */
2965 2966 2967 2968
	stuck = instdone_unchanged(instdone.instdone,
				   &accu_instdone->instdone);
	stuck &= instdone_unchanged(instdone.slice_common,
				    &accu_instdone->slice_common);
2969 2970 2971 2972 2973 2974 2975

	for_each_instdone_slice_subslice(dev_priv, slice, subslice) {
		stuck &= instdone_unchanged(instdone.sampler[slice][subslice],
					    &accu_instdone->sampler[slice][subslice]);
		stuck &= instdone_unchanged(instdone.row[slice][subslice],
					    &accu_instdone->row[slice][subslice]);
	}
2976 2977 2978 2979

	return stuck;
}

2980
static enum intel_engine_hangcheck_action
2981
head_stuck(struct intel_engine_cs *engine, u64 acthd)
2982
{
2983
	if (acthd != engine->hangcheck.acthd) {
2984 2985

		/* Clear subunit states on head movement */
2986
		memset(&engine->hangcheck.instdone, 0,
2987
		       sizeof(engine->hangcheck.instdone));
2988

2989
		return HANGCHECK_ACTIVE;
2990
	}
2991

2992
	if (!subunits_stuck(engine))
2993 2994 2995 2996 2997
		return HANGCHECK_ACTIVE;

	return HANGCHECK_HUNG;
}

2998 2999
static enum intel_engine_hangcheck_action
engine_stuck(struct intel_engine_cs *engine, u64 acthd)
3000
{
3001
	struct drm_i915_private *dev_priv = engine->i915;
3002
	enum intel_engine_hangcheck_action ha;
3003 3004
	u32 tmp;

3005
	ha = head_stuck(engine, acthd);
3006 3007 3008
	if (ha != HANGCHECK_HUNG)
		return ha;

3009
	if (IS_GEN2(dev_priv))
3010
		return HANGCHECK_HUNG;
3011 3012 3013 3014 3015 3016

	/* Is the chip hanging on a WAIT_FOR_EVENT?
	 * If so we can simply poke the RB_WAIT bit
	 * and break the hang. This should work on
	 * all but the second generation chipsets.
	 */
3017
	tmp = I915_READ_CTL(engine);
3018
	if (tmp & RING_WAIT) {
3019
		i915_handle_error(dev_priv, 0,
3020
				  "Kicking stuck wait on %s",
3021 3022
				  engine->name);
		I915_WRITE_CTL(engine, tmp);
3023
		return HANGCHECK_KICK;
3024 3025
	}

3026
	if (INTEL_GEN(dev_priv) >= 6 && tmp & RING_WAIT_SEMAPHORE) {
3027
		switch (semaphore_passed(engine)) {
3028
		default:
3029
			return HANGCHECK_HUNG;
3030
		case 1:
3031
			i915_handle_error(dev_priv, 0,
3032
					  "Kicking stuck semaphore on %s",
3033 3034
					  engine->name);
			I915_WRITE_CTL(engine, tmp);
3035
			return HANGCHECK_KICK;
3036
		case 0:
3037
			return HANGCHECK_WAIT;
3038
		}
3039
	}
3040

3041
	return HANGCHECK_HUNG;
3042 3043
}

3044
/*
B
Ben Gamari 已提交
3045
 * This is called when the chip hasn't reported back with completed
3046 3047 3048 3049 3050
 * batchbuffers in a long time. We keep track per ring seqno progress and
 * if there are no progress, hangcheck score for that ring is increased.
 * Further, acthd is inspected to see if the ring is stuck. On stuck case
 * we kick the ring. If we see no progress on three subsequent calls
 * we assume chip is wedged and try to fix it by resetting the chip.
B
Ben Gamari 已提交
3051
 */
3052
static void i915_hangcheck_elapsed(struct work_struct *work)
B
Ben Gamari 已提交
3053
{
3054 3055 3056
	struct drm_i915_private *dev_priv =
		container_of(work, typeof(*dev_priv),
			     gpu_error.hangcheck_work.work);
3057
	struct intel_engine_cs *engine;
3058
	enum intel_engine_id id;
3059 3060
	unsigned int hung = 0, stuck = 0;
	int busy_count = 0;
3061 3062 3063
#define BUSY 1
#define KICK 5
#define HUNG 20
3064
#define ACTIVE_DECAY 15
3065

3066
	if (!i915.enable_hangcheck)
3067 3068
		return;

3069
	if (!READ_ONCE(dev_priv->gt.awake))
3070
		return;
3071

3072 3073 3074 3075 3076 3077
	/* As enabling the GPU requires fairly extensive mmio access,
	 * periodically arm the mmio checker to see if we are triggering
	 * any invalid access.
	 */
	intel_uncore_arm_unclaimed_mmio_detection(dev_priv);

3078
	for_each_engine(engine, dev_priv, id) {
3079
		bool busy = intel_engine_has_waiter(engine);
3080 3081
		u64 acthd;
		u32 seqno;
3082
		u32 submit;
3083

3084 3085
		semaphore_clear_deadlocks(dev_priv);

3086 3087 3088 3089 3090 3091 3092 3093 3094 3095
		/* We don't strictly need an irq-barrier here, as we are not
		 * serving an interrupt request, be paranoid in case the
		 * barrier has side-effects (such as preventing a broken
		 * cacheline snoop) and so be sure that we can see the seqno
		 * advance. If the seqno should stick, due to a stale
		 * cacheline, we would erroneously declare the GPU hung.
		 */
		if (engine->irq_seqno_barrier)
			engine->irq_seqno_barrier(engine);

3096
		acthd = intel_engine_get_active_head(engine);
3097
		seqno = intel_engine_get_seqno(engine);
3098
		submit = READ_ONCE(engine->last_submitted_seqno);
3099

3100
		if (engine->hangcheck.seqno == seqno) {
3101
			if (i915_seqno_passed(seqno, submit)) {
3102
				engine->hangcheck.action = HANGCHECK_IDLE;
3103
			} else {
3104
				/* We always increment the hangcheck score
3105
				 * if the engine is busy and still processing
3106 3107 3108 3109
				 * the same request, so that no single request
				 * can run indefinitely (such as a chain of
				 * batches). The only time we do not increment
				 * the hangcheck score on this ring, if this
3110 3111
				 * engine is in a legitimate wait for another
				 * engine. In that case the waiting engine is a
3112 3113 3114 3115 3116 3117 3118
				 * victim and we want to be sure we catch the
				 * right culprit. Then every time we do kick
				 * the ring, add a small increment to the
				 * score so that we can catch a batch that is
				 * being repeatedly kicked and so responsible
				 * for stalling the machine.
				 */
3119 3120
				engine->hangcheck.action =
					engine_stuck(engine, acthd);
3121

3122
				switch (engine->hangcheck.action) {
3123
				case HANGCHECK_IDLE:
3124
				case HANGCHECK_WAIT:
3125
					break;
3126
				case HANGCHECK_ACTIVE:
3127
					engine->hangcheck.score += BUSY;
3128
					break;
3129
				case HANGCHECK_KICK:
3130
					engine->hangcheck.score += KICK;
3131
					break;
3132
				case HANGCHECK_HUNG:
3133
					engine->hangcheck.score += HUNG;
3134 3135
					break;
				}
3136
			}
3137 3138 3139 3140 3141 3142

			if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
				hung |= intel_engine_flag(engine);
				if (engine->hangcheck.action != HANGCHECK_HUNG)
					stuck |= intel_engine_flag(engine);
			}
3143
		} else {
3144
			engine->hangcheck.action = HANGCHECK_ACTIVE;
3145

3146 3147 3148
			/* Gradually reduce the count so that we catch DoS
			 * attempts across multiple batches.
			 */
3149 3150 3151 3152
			if (engine->hangcheck.score > 0)
				engine->hangcheck.score -= ACTIVE_DECAY;
			if (engine->hangcheck.score < 0)
				engine->hangcheck.score = 0;
3153

3154
			/* Clear head and subunit states on seqno movement */
3155
			acthd = 0;
3156

3157
			memset(&engine->hangcheck.instdone, 0,
3158
			       sizeof(engine->hangcheck.instdone));
3159 3160
		}

3161 3162
		engine->hangcheck.seqno = seqno;
		engine->hangcheck.acthd = acthd;
3163
		busy_count += busy;
3164
	}
3165

3166 3167
	if (hung) {
		char msg[80];
3168
		unsigned int tmp;
3169
		int len;
3170

3171 3172 3173 3174 3175 3176 3177
		/* If some rings hung but others were still busy, only
		 * blame the hanging rings in the synopsis.
		 */
		if (stuck != hung)
			hung &= ~stuck;
		len = scnprintf(msg, sizeof(msg),
				"%s on ", stuck == hung ? "No progress" : "Hang");
3178
		for_each_engine_masked(engine, dev_priv, hung, tmp)
3179 3180 3181 3182 3183 3184
			len += scnprintf(msg + len, sizeof(msg) - len,
					 "%s, ", engine->name);
		msg[len-2] = '\0';

		return i915_handle_error(dev_priv, hung, msg);
	}
B
Ben Gamari 已提交
3185

3186
	/* Reset timer in case GPU hangs without another request being added */
3187
	if (busy_count)
3188
		i915_queue_hangcheck(dev_priv);
3189 3190
}

3191
static void ibx_irq_reset(struct drm_device *dev)
P
Paulo Zanoni 已提交
3192
{
3193
	struct drm_i915_private *dev_priv = to_i915(dev);
P
Paulo Zanoni 已提交
3194

3195
	if (HAS_PCH_NOP(dev_priv))
P
Paulo Zanoni 已提交
3196 3197
		return;

3198
	GEN5_IRQ_RESET(SDE);
3199

3200
	if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3201
		I915_WRITE(SERR_INT, 0xffffffff);
P
Paulo Zanoni 已提交
3202
}
3203

P
Paulo Zanoni 已提交
3204 3205 3206 3207 3208 3209 3210 3211 3212 3213
/*
 * SDEIER is also touched by the interrupt handler to work around missed PCH
 * interrupts. Hence we can't update it after the interrupt handler is enabled -
 * instead we unconditionally enable all PCH interrupt sources here, but then
 * only unmask them as needed with SDEIMR.
 *
 * This function needs to be called before interrupts are enabled.
 */
static void ibx_irq_pre_postinstall(struct drm_device *dev)
{
3214
	struct drm_i915_private *dev_priv = to_i915(dev);
P
Paulo Zanoni 已提交
3215

3216
	if (HAS_PCH_NOP(dev_priv))
P
Paulo Zanoni 已提交
3217 3218 3219
		return;

	WARN_ON(I915_READ(SDEIER) != 0);
P
Paulo Zanoni 已提交
3220 3221 3222 3223
	I915_WRITE(SDEIER, 0xffffffff);
	POSTING_READ(SDEIER);
}

3224
static void gen5_gt_irq_reset(struct drm_device *dev)
3225
{
3226
	struct drm_i915_private *dev_priv = to_i915(dev);
3227

3228
	GEN5_IRQ_RESET(GT);
P
Paulo Zanoni 已提交
3229
	if (INTEL_INFO(dev)->gen >= 6)
3230
		GEN5_IRQ_RESET(GEN6_PM);
3231 3232
}

3233 3234 3235 3236
static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
{
	enum pipe pipe;

3237 3238 3239 3240 3241
	if (IS_CHERRYVIEW(dev_priv))
		I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
	else
		I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);

3242
	i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
3243 3244
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));

3245 3246 3247 3248 3249 3250
	for_each_pipe(dev_priv, pipe) {
		I915_WRITE(PIPESTAT(pipe),
			   PIPE_FIFO_UNDERRUN_STATUS |
			   PIPESTAT_INT_STATUS_MASK);
		dev_priv->pipestat_irq_mask[pipe] = 0;
	}
3251 3252

	GEN5_IRQ_RESET(VLV_);
3253
	dev_priv->irq_mask = ~0;
3254 3255
}

3256 3257 3258
static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
{
	u32 pipestat_mask;
3259
	u32 enable_mask;
3260 3261 3262 3263 3264 3265 3266 3267 3268
	enum pipe pipe;

	pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
			PIPE_CRC_DONE_INTERRUPT_STATUS;

	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
	for_each_pipe(dev_priv, pipe)
		i915_enable_pipestat(dev_priv, pipe, pipestat_mask);

3269 3270 3271
	enable_mask = I915_DISPLAY_PORT_INTERRUPT |
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3272
	if (IS_CHERRYVIEW(dev_priv))
3273
		enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3274 3275 3276

	WARN_ON(dev_priv->irq_mask != ~0);

3277 3278 3279
	dev_priv->irq_mask = ~enable_mask;

	GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
3280 3281 3282 3283 3284 3285
}

/* drm_dma.h hooks
*/
static void ironlake_irq_reset(struct drm_device *dev)
{
3286
	struct drm_i915_private *dev_priv = to_i915(dev);
3287 3288 3289 3290

	I915_WRITE(HWSTAM, 0xffffffff);

	GEN5_IRQ_RESET(DE);
3291
	if (IS_GEN7(dev_priv))
3292 3293 3294 3295 3296 3297 3298
		I915_WRITE(GEN7_ERR_INT, 0xffffffff);

	gen5_gt_irq_reset(dev);

	ibx_irq_reset(dev);
}

J
Jesse Barnes 已提交
3299 3300
static void valleyview_irq_preinstall(struct drm_device *dev)
{
3301
	struct drm_i915_private *dev_priv = to_i915(dev);
J
Jesse Barnes 已提交
3302

3303 3304 3305
	I915_WRITE(VLV_MASTER_IER, 0);
	POSTING_READ(VLV_MASTER_IER);

3306
	gen5_gt_irq_reset(dev);
J
Jesse Barnes 已提交
3307

3308
	spin_lock_irq(&dev_priv->irq_lock);
3309 3310
	if (dev_priv->display_irqs_enabled)
		vlv_display_irq_reset(dev_priv);
3311
	spin_unlock_irq(&dev_priv->irq_lock);
J
Jesse Barnes 已提交
3312 3313
}

3314 3315 3316 3317 3318 3319 3320 3321
static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
{
	GEN8_IRQ_RESET_NDX(GT, 0);
	GEN8_IRQ_RESET_NDX(GT, 1);
	GEN8_IRQ_RESET_NDX(GT, 2);
	GEN8_IRQ_RESET_NDX(GT, 3);
}

P
Paulo Zanoni 已提交
3322
static void gen8_irq_reset(struct drm_device *dev)
3323
{
3324
	struct drm_i915_private *dev_priv = to_i915(dev);
3325 3326 3327 3328 3329
	int pipe;

	I915_WRITE(GEN8_MASTER_IRQ, 0);
	POSTING_READ(GEN8_MASTER_IRQ);

3330
	gen8_gt_irq_reset(dev_priv);
3331

3332
	for_each_pipe(dev_priv, pipe)
3333 3334
		if (intel_display_power_is_enabled(dev_priv,
						   POWER_DOMAIN_PIPE(pipe)))
3335
			GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3336

3337 3338 3339
	GEN5_IRQ_RESET(GEN8_DE_PORT_);
	GEN5_IRQ_RESET(GEN8_DE_MISC_);
	GEN5_IRQ_RESET(GEN8_PCU_);
3340

3341
	if (HAS_PCH_SPLIT(dev_priv))
3342
		ibx_irq_reset(dev);
3343
}
3344

3345 3346
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
				     unsigned int pipe_mask)
3347
{
3348
	uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3349
	enum pipe pipe;
3350

3351
	spin_lock_irq(&dev_priv->irq_lock);
3352 3353 3354 3355
	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
		GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
				  dev_priv->de_irq_mask[pipe],
				  ~dev_priv->de_irq_mask[pipe] | extra_ier);
3356
	spin_unlock_irq(&dev_priv->irq_lock);
3357 3358
}

3359 3360 3361
void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
				     unsigned int pipe_mask)
{
3362 3363
	enum pipe pipe;

3364
	spin_lock_irq(&dev_priv->irq_lock);
3365 3366
	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
		GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3367 3368 3369
	spin_unlock_irq(&dev_priv->irq_lock);

	/* make sure we're done processing display irqs */
3370
	synchronize_irq(dev_priv->drm.irq);
3371 3372
}

3373 3374
static void cherryview_irq_preinstall(struct drm_device *dev)
{
3375
	struct drm_i915_private *dev_priv = to_i915(dev);
3376 3377 3378 3379

	I915_WRITE(GEN8_MASTER_IRQ, 0);
	POSTING_READ(GEN8_MASTER_IRQ);

3380
	gen8_gt_irq_reset(dev_priv);
3381 3382 3383

	GEN5_IRQ_RESET(GEN8_PCU_);

3384
	spin_lock_irq(&dev_priv->irq_lock);
3385 3386
	if (dev_priv->display_irqs_enabled)
		vlv_display_irq_reset(dev_priv);
3387
	spin_unlock_irq(&dev_priv->irq_lock);
3388 3389
}

3390
static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
3391 3392 3393 3394 3395
				  const u32 hpd[HPD_NUM_PINS])
{
	struct intel_encoder *encoder;
	u32 enabled_irqs = 0;

3396
	for_each_intel_encoder(&dev_priv->drm, encoder)
3397 3398 3399 3400 3401 3402
		if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
			enabled_irqs |= hpd[encoder->hpd_pin];

	return enabled_irqs;
}

3403
static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
3404
{
3405
	u32 hotplug_irqs, hotplug, enabled_irqs;
3406

3407
	if (HAS_PCH_IBX(dev_priv)) {
3408
		hotplug_irqs = SDE_HOTPLUG_MASK;
3409
		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
3410
	} else {
3411
		hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3412
		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
3413
	}
3414

3415
	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3416 3417 3418

	/*
	 * Enable digital hotplug on the PCH, and configure the DP short pulse
3419 3420
	 * duration to 2ms (which is the minimum in the Display Port spec).
	 * The pulse duration bits are reserved on LPT+.
3421
	 */
3422 3423 3424 3425 3426
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
	hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3427 3428 3429 3430
	/*
	 * When CPU and PCH are on the same package, port A
	 * HPD must be enabled in both north and south.
	 */
3431
	if (HAS_PCH_LPT_LP(dev_priv))
3432
		hotplug |= PORTA_HOTPLUG_ENABLE;
3433
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3434
}
X
Xiong Zhang 已提交
3435

3436
static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3437 3438 3439 3440
{
	u32 hotplug_irqs, hotplug, enabled_irqs;

	hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
3441
	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
3442 3443 3444 3445 3446 3447

	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);

	/* Enable digital hotplug on the PCH */
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
	hotplug |= PORTD_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE |
3448
		PORTB_HOTPLUG_ENABLE | PORTA_HOTPLUG_ENABLE;
3449 3450 3451 3452 3453
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);

	hotplug = I915_READ(PCH_PORT_HOTPLUG2);
	hotplug |= PORTE_HOTPLUG_ENABLE;
	I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3454 3455
}

3456
static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3457 3458 3459
{
	u32 hotplug_irqs, hotplug, enabled_irqs;

3460
	if (INTEL_GEN(dev_priv) >= 8) {
3461
		hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
3462
		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw);
3463 3464

		bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3465
	} else if (INTEL_GEN(dev_priv) >= 7) {
3466
		hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
3467
		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb);
3468 3469

		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3470 3471
	} else {
		hotplug_irqs = DE_DP_A_HOTPLUG;
3472
		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk);
3473

3474 3475
		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
	}
3476 3477 3478 3479

	/*
	 * Enable digital hotplug on the CPU, and configure the DP short pulse
	 * duration to 2ms (which is the minimum in the Display Port spec)
3480
	 * The pulse duration bits are reserved on HSW+.
3481 3482 3483 3484 3485 3486
	 */
	hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
	hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
	hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms;
	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);

3487
	ibx_hpd_irq_setup(dev_priv);
3488 3489
}

3490
static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3491
{
3492
	u32 hotplug_irqs, hotplug, enabled_irqs;
3493

3494
	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
3495
	hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
3496

3497
	bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3498

3499 3500 3501
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
	hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE |
		PORTA_HOTPLUG_ENABLE;
3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521

	DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n",
		      hotplug, enabled_irqs);
	hotplug &= ~BXT_DDI_HPD_INVERT_MASK;

	/*
	 * For BXT invert bit has to be set based on AOB design
	 * for HPD detection logic, update it based on VBT fields.
	 */

	if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
		hotplug |= BXT_DDIA_HPD_INVERT;
	if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) &&
	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_B))
		hotplug |= BXT_DDIB_HPD_INVERT;
	if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) &&
	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_C))
		hotplug |= BXT_DDIC_HPD_INVERT;

3522
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3523 3524
}

P
Paulo Zanoni 已提交
3525 3526
static void ibx_irq_postinstall(struct drm_device *dev)
{
3527
	struct drm_i915_private *dev_priv = to_i915(dev);
3528
	u32 mask;
3529

3530
	if (HAS_PCH_NOP(dev_priv))
D
Daniel Vetter 已提交
3531 3532
		return;

3533
	if (HAS_PCH_IBX(dev_priv))
3534
		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3535
	else
3536
		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3537

3538
	gen5_assert_iir_is_zero(dev_priv, SDEIIR);
P
Paulo Zanoni 已提交
3539 3540 3541
	I915_WRITE(SDEIMR, ~mask);
}

3542 3543
static void gen5_gt_irq_postinstall(struct drm_device *dev)
{
3544
	struct drm_i915_private *dev_priv = to_i915(dev);
3545 3546 3547 3548 3549
	u32 pm_irqs, gt_irqs;

	pm_irqs = gt_irqs = 0;

	dev_priv->gt_irq_mask = ~0;
3550
	if (HAS_L3_DPF(dev_priv)) {
3551
		/* L3 parity interrupt is always unmasked. */
3552 3553
		dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv);
		gt_irqs |= GT_PARITY_ERROR(dev_priv);
3554 3555 3556
	}

	gt_irqs |= GT_RENDER_USER_INTERRUPT;
3557
	if (IS_GEN5(dev_priv)) {
3558
		gt_irqs |= ILK_BSD_USER_INTERRUPT;
3559 3560 3561 3562
	} else {
		gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
	}

P
Paulo Zanoni 已提交
3563
	GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
3564 3565

	if (INTEL_INFO(dev)->gen >= 6) {
3566 3567 3568 3569
		/*
		 * RPS interrupts will get enabled/disabled on demand when RPS
		 * itself is enabled/disabled.
		 */
3570
		if (HAS_VEBOX(dev_priv)) {
3571
			pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3572 3573
			dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT;
		}
3574

3575 3576
		dev_priv->pm_imr = 0xffffffff;
		GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_imr, pm_irqs);
3577 3578 3579
	}
}

3580
static int ironlake_irq_postinstall(struct drm_device *dev)
3581
{
3582
	struct drm_i915_private *dev_priv = to_i915(dev);
3583 3584 3585 3586 3587 3588
	u32 display_mask, extra_mask;

	if (INTEL_INFO(dev)->gen >= 7) {
		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
				DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
				DE_PLANEB_FLIP_DONE_IVB |
3589
				DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
3590
		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3591 3592
			      DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
			      DE_DP_A_HOTPLUG_IVB);
3593 3594 3595
	} else {
		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
				DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
3596 3597 3598
				DE_AUX_CHANNEL_A |
				DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
				DE_POISON);
3599 3600 3601
		extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
			      DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
			      DE_DP_A_HOTPLUG);
3602
	}
3603

3604
	dev_priv->irq_mask = ~display_mask;
3605

3606 3607
	I915_WRITE(HWSTAM, 0xeffe);

P
Paulo Zanoni 已提交
3608 3609
	ibx_irq_pre_postinstall(dev);

P
Paulo Zanoni 已提交
3610
	GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3611

3612
	gen5_gt_irq_postinstall(dev);
3613

P
Paulo Zanoni 已提交
3614
	ibx_irq_postinstall(dev);
3615

3616
	if (IS_IRONLAKE_M(dev_priv)) {
3617 3618 3619
		/* Enable PCU event interrupts
		 *
		 * spinlocking not required here for correctness since interrupt
3620 3621
		 * setup is guaranteed to run in single-threaded context. But we
		 * need it to make the assert_spin_locked happy. */
3622
		spin_lock_irq(&dev_priv->irq_lock);
3623
		ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
3624
		spin_unlock_irq(&dev_priv->irq_lock);
3625 3626
	}

3627 3628 3629
	return 0;
}

3630 3631 3632 3633 3634 3635 3636 3637 3638
void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
{
	assert_spin_locked(&dev_priv->irq_lock);

	if (dev_priv->display_irqs_enabled)
		return;

	dev_priv->display_irqs_enabled = true;

3639 3640
	if (intel_irqs_enabled(dev_priv)) {
		vlv_display_irq_reset(dev_priv);
3641
		vlv_display_irq_postinstall(dev_priv);
3642
	}
3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653
}

void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
{
	assert_spin_locked(&dev_priv->irq_lock);

	if (!dev_priv->display_irqs_enabled)
		return;

	dev_priv->display_irqs_enabled = false;

3654
	if (intel_irqs_enabled(dev_priv))
3655
		vlv_display_irq_reset(dev_priv);
3656 3657
}

3658 3659 3660

static int valleyview_irq_postinstall(struct drm_device *dev)
{
3661
	struct drm_i915_private *dev_priv = to_i915(dev);
3662

3663
	gen5_gt_irq_postinstall(dev);
J
Jesse Barnes 已提交
3664

3665
	spin_lock_irq(&dev_priv->irq_lock);
3666 3667
	if (dev_priv->display_irqs_enabled)
		vlv_display_irq_postinstall(dev_priv);
3668 3669
	spin_unlock_irq(&dev_priv->irq_lock);

J
Jesse Barnes 已提交
3670
	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3671
	POSTING_READ(VLV_MASTER_IER);
3672 3673 3674 3675

	return 0;
}

3676 3677 3678 3679 3680
static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
{
	/* These are interrupts we'll toggle with the ring mask register */
	uint32_t gt_interrupts[] = {
		GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3681 3682 3683
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
			GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3684
		GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3685 3686 3687
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
			GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3688
		0,
3689 3690
		GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3691 3692
		};

3693 3694 3695
	if (HAS_L3_DPF(dev_priv))
		gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;

3696 3697
	dev_priv->pm_ier = 0x0;
	dev_priv->pm_imr = ~dev_priv->pm_ier;
3698 3699
	GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
	GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
3700 3701 3702 3703
	/*
	 * RPS interrupts will get enabled/disabled on demand when RPS itself
	 * is enabled/disabled.
	 */
3704
	GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_imr, dev_priv->pm_ier);
3705
	GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
3706 3707 3708 3709
}

static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
{
3710 3711
	uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
	uint32_t de_pipe_enables;
3712 3713
	u32 de_port_masked = GEN8_AUX_CHANNEL_A;
	u32 de_port_enables;
3714
	u32 de_misc_masked = GEN8_DE_MISC_GSE;
3715
	enum pipe pipe;
3716

3717
	if (INTEL_INFO(dev_priv)->gen >= 9) {
3718 3719
		de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
				  GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3720 3721
		de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
				  GEN9_AUX_CHANNEL_D;
S
Shashank Sharma 已提交
3722
		if (IS_BROXTON(dev_priv))
3723 3724
			de_port_masked |= BXT_DE_PORT_GMBUS;
	} else {
3725 3726
		de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
				  GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3727
	}
3728 3729 3730 3731

	de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
					   GEN8_PIPE_FIFO_UNDERRUN;

3732
	de_port_enables = de_port_masked;
3733 3734 3735
	if (IS_BROXTON(dev_priv))
		de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
	else if (IS_BROADWELL(dev_priv))
3736 3737
		de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;

3738 3739 3740
	dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
	dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
	dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3741

3742
	for_each_pipe(dev_priv, pipe)
3743
		if (intel_display_power_is_enabled(dev_priv,
3744 3745 3746 3747
				POWER_DOMAIN_PIPE(pipe)))
			GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
					  dev_priv->de_irq_mask[pipe],
					  de_pipe_enables);
3748

3749
	GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
3750
	GEN5_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
3751 3752 3753 3754
}

static int gen8_irq_postinstall(struct drm_device *dev)
{
3755
	struct drm_i915_private *dev_priv = to_i915(dev);
3756

3757
	if (HAS_PCH_SPLIT(dev_priv))
3758
		ibx_irq_pre_postinstall(dev);
P
Paulo Zanoni 已提交
3759

3760 3761 3762
	gen8_gt_irq_postinstall(dev_priv);
	gen8_de_irq_postinstall(dev_priv);

3763
	if (HAS_PCH_SPLIT(dev_priv))
3764
		ibx_irq_postinstall(dev);
3765

3766
	I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3767 3768 3769 3770 3771
	POSTING_READ(GEN8_MASTER_IRQ);

	return 0;
}

3772 3773
static int cherryview_irq_postinstall(struct drm_device *dev)
{
3774
	struct drm_i915_private *dev_priv = to_i915(dev);
3775 3776 3777

	gen8_gt_irq_postinstall(dev_priv);

3778
	spin_lock_irq(&dev_priv->irq_lock);
3779 3780
	if (dev_priv->display_irqs_enabled)
		vlv_display_irq_postinstall(dev_priv);
3781 3782
	spin_unlock_irq(&dev_priv->irq_lock);

3783
	I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3784 3785 3786 3787 3788
	POSTING_READ(GEN8_MASTER_IRQ);

	return 0;
}

3789 3790
static void gen8_irq_uninstall(struct drm_device *dev)
{
3791
	struct drm_i915_private *dev_priv = to_i915(dev);
3792 3793 3794 3795

	if (!dev_priv)
		return;

P
Paulo Zanoni 已提交
3796
	gen8_irq_reset(dev);
3797 3798
}

J
Jesse Barnes 已提交
3799 3800
static void valleyview_irq_uninstall(struct drm_device *dev)
{
3801
	struct drm_i915_private *dev_priv = to_i915(dev);
J
Jesse Barnes 已提交
3802 3803 3804 3805

	if (!dev_priv)
		return;

3806
	I915_WRITE(VLV_MASTER_IER, 0);
3807
	POSTING_READ(VLV_MASTER_IER);
3808

3809 3810
	gen5_gt_irq_reset(dev);

J
Jesse Barnes 已提交
3811
	I915_WRITE(HWSTAM, 0xffffffff);
3812

3813
	spin_lock_irq(&dev_priv->irq_lock);
3814 3815
	if (dev_priv->display_irqs_enabled)
		vlv_display_irq_reset(dev_priv);
3816
	spin_unlock_irq(&dev_priv->irq_lock);
J
Jesse Barnes 已提交
3817 3818
}

3819 3820
static void cherryview_irq_uninstall(struct drm_device *dev)
{
3821
	struct drm_i915_private *dev_priv = to_i915(dev);
3822 3823 3824 3825 3826 3827 3828

	if (!dev_priv)
		return;

	I915_WRITE(GEN8_MASTER_IRQ, 0);
	POSTING_READ(GEN8_MASTER_IRQ);

3829
	gen8_gt_irq_reset(dev_priv);
3830

3831
	GEN5_IRQ_RESET(GEN8_PCU_);
3832

3833
	spin_lock_irq(&dev_priv->irq_lock);
3834 3835
	if (dev_priv->display_irqs_enabled)
		vlv_display_irq_reset(dev_priv);
3836
	spin_unlock_irq(&dev_priv->irq_lock);
3837 3838
}

3839
static void ironlake_irq_uninstall(struct drm_device *dev)
3840
{
3841
	struct drm_i915_private *dev_priv = to_i915(dev);
3842 3843 3844 3845

	if (!dev_priv)
		return;

P
Paulo Zanoni 已提交
3846
	ironlake_irq_reset(dev);
3847 3848
}

3849
static void i8xx_irq_preinstall(struct drm_device * dev)
L
Linus Torvalds 已提交
3850
{
3851
	struct drm_i915_private *dev_priv = to_i915(dev);
3852
	int pipe;
3853

3854
	for_each_pipe(dev_priv, pipe)
3855
		I915_WRITE(PIPESTAT(pipe), 0);
3856 3857 3858
	I915_WRITE16(IMR, 0xffff);
	I915_WRITE16(IER, 0x0);
	POSTING_READ16(IER);
C
Chris Wilson 已提交
3859 3860 3861 3862
}

static int i8xx_irq_postinstall(struct drm_device *dev)
{
3863
	struct drm_i915_private *dev_priv = to_i915(dev);
C
Chris Wilson 已提交
3864 3865 3866 3867 3868 3869 3870 3871 3872

	I915_WRITE16(EMR,
		     ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));

	/* Unmask the interrupts that we always want on. */
	dev_priv->irq_mask =
		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3873
		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
C
Chris Wilson 已提交
3874 3875 3876 3877 3878 3879 3880 3881
	I915_WRITE16(IMR, dev_priv->irq_mask);

	I915_WRITE16(IER,
		     I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		     I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
		     I915_USER_INTERRUPT);
	POSTING_READ16(IER);

3882 3883
	/* Interrupt setup is already guaranteed to be single-threaded, this is
	 * just to make the assert_spin_locked check happy. */
3884
	spin_lock_irq(&dev_priv->irq_lock);
3885 3886
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3887
	spin_unlock_irq(&dev_priv->irq_lock);
3888

C
Chris Wilson 已提交
3889 3890 3891
	return 0;
}

3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922
/*
 * Returns true when a page flip has completed.
 */
static bool i8xx_handle_vblank(struct drm_i915_private *dev_priv,
			       int plane, int pipe, u32 iir)
{
	u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);

	if (!intel_pipe_handle_vblank(dev_priv, pipe))
		return false;

	if ((iir & flip_pending) == 0)
		goto check_page_flip;

	/* We detect FlipDone by looking for the change in PendingFlip from '1'
	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
	 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
	 * the flip is completed (no longer pending). Since this doesn't raise
	 * an interrupt per se, we watch for the change at vblank.
	 */
	if (I915_READ16(ISR) & flip_pending)
		goto check_page_flip;

	intel_finish_page_flip_cs(dev_priv, pipe);
	return true;

check_page_flip:
	intel_check_page_flip(dev_priv, pipe);
	return false;
}

3923
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
C
Chris Wilson 已提交
3924
{
3925
	struct drm_device *dev = arg;
3926
	struct drm_i915_private *dev_priv = to_i915(dev);
C
Chris Wilson 已提交
3927 3928 3929 3930 3931 3932
	u16 iir, new_iir;
	u32 pipe_stats[2];
	int pipe;
	u16 flip_mask =
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3933
	irqreturn_t ret;
C
Chris Wilson 已提交
3934

3935 3936 3937
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

3938 3939 3940 3941
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
	disable_rpm_wakeref_asserts(dev_priv);

	ret = IRQ_NONE;
C
Chris Wilson 已提交
3942 3943
	iir = I915_READ16(IIR);
	if (iir == 0)
3944
		goto out;
C
Chris Wilson 已提交
3945 3946 3947 3948 3949 3950 3951

	while (iir & ~flip_mask) {
		/* Can't rely on pipestat interrupt bit in iir as it might
		 * have been cleared after the pipestat interrupt was received.
		 * It doesn't set the bit in iir again, but it still produces
		 * interrupts (for non-MSI).
		 */
3952
		spin_lock(&dev_priv->irq_lock);
C
Chris Wilson 已提交
3953
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3954
			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
C
Chris Wilson 已提交
3955

3956
		for_each_pipe(dev_priv, pipe) {
3957
			i915_reg_t reg = PIPESTAT(pipe);
C
Chris Wilson 已提交
3958 3959 3960 3961 3962
			pipe_stats[pipe] = I915_READ(reg);

			/*
			 * Clear the PIPE*STAT regs before the IIR
			 */
3963
			if (pipe_stats[pipe] & 0x8000ffff)
C
Chris Wilson 已提交
3964 3965
				I915_WRITE(reg, pipe_stats[pipe]);
		}
3966
		spin_unlock(&dev_priv->irq_lock);
C
Chris Wilson 已提交
3967 3968 3969 3970 3971

		I915_WRITE16(IIR, iir & ~flip_mask);
		new_iir = I915_READ16(IIR); /* Flush posted writes */

		if (iir & I915_USER_INTERRUPT)
3972
			notify_ring(dev_priv->engine[RCS]);
C
Chris Wilson 已提交
3973

3974
		for_each_pipe(dev_priv, pipe) {
3975 3976 3977 3978 3979 3980 3981
			int plane = pipe;
			if (HAS_FBC(dev_priv))
				plane = !plane;

			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
			    i8xx_handle_vblank(dev_priv, plane, pipe, iir))
				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
C
Chris Wilson 已提交
3982

3983
			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3984
				i9xx_pipe_crc_irq_handler(dev_priv, pipe);
3985

3986 3987 3988
			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
				intel_cpu_fifo_underrun_irq_handler(dev_priv,
								    pipe);
3989
		}
C
Chris Wilson 已提交
3990 3991 3992

		iir = new_iir;
	}
3993 3994 3995 3996
	ret = IRQ_HANDLED;

out:
	enable_rpm_wakeref_asserts(dev_priv);
C
Chris Wilson 已提交
3997

3998
	return ret;
C
Chris Wilson 已提交
3999 4000 4001 4002
}

static void i8xx_irq_uninstall(struct drm_device * dev)
{
4003
	struct drm_i915_private *dev_priv = to_i915(dev);
C
Chris Wilson 已提交
4004 4005
	int pipe;

4006
	for_each_pipe(dev_priv, pipe) {
C
Chris Wilson 已提交
4007 4008 4009 4010 4011 4012 4013 4014 4015
		/* Clear enable bits; then clear status bits */
		I915_WRITE(PIPESTAT(pipe), 0);
		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
	}
	I915_WRITE16(IMR, 0xffff);
	I915_WRITE16(IER, 0x0);
	I915_WRITE16(IIR, I915_READ16(IIR));
}

4016 4017
static void i915_irq_preinstall(struct drm_device * dev)
{
4018
	struct drm_i915_private *dev_priv = to_i915(dev);
4019 4020 4021
	int pipe;

	if (I915_HAS_HOTPLUG(dev)) {
4022
		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4023 4024 4025
		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
	}

4026
	I915_WRITE16(HWSTAM, 0xeffe);
4027
	for_each_pipe(dev_priv, pipe)
4028 4029 4030 4031 4032 4033 4034 4035
		I915_WRITE(PIPESTAT(pipe), 0);
	I915_WRITE(IMR, 0xffffffff);
	I915_WRITE(IER, 0x0);
	POSTING_READ(IER);
}

static int i915_irq_postinstall(struct drm_device *dev)
{
4036
	struct drm_i915_private *dev_priv = to_i915(dev);
4037
	u32 enable_mask;
4038

4039 4040 4041 4042 4043 4044 4045 4046
	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));

	/* Unmask the interrupts that we always want on. */
	dev_priv->irq_mask =
		~(I915_ASLE_INTERRUPT |
		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4047
		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4048 4049 4050 4051 4052 4053 4054

	enable_mask =
		I915_ASLE_INTERRUPT |
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
		I915_USER_INTERRUPT;

4055
	if (I915_HAS_HOTPLUG(dev)) {
4056
		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4057 4058
		POSTING_READ(PORT_HOTPLUG_EN);

4059 4060 4061 4062 4063 4064 4065 4066 4067 4068
		/* Enable in IER... */
		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
		/* and unmask in IMR */
		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
	}

	I915_WRITE(IMR, dev_priv->irq_mask);
	I915_WRITE(IER, enable_mask);
	POSTING_READ(IER);

4069
	i915_enable_asle_pipestat(dev_priv);
4070

4071 4072
	/* Interrupt setup is already guaranteed to be single-threaded, this is
	 * just to make the assert_spin_locked check happy. */
4073
	spin_lock_irq(&dev_priv->irq_lock);
4074 4075
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4076
	spin_unlock_irq(&dev_priv->irq_lock);
4077

4078 4079 4080
	return 0;
}

4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111
/*
 * Returns true when a page flip has completed.
 */
static bool i915_handle_vblank(struct drm_i915_private *dev_priv,
			       int plane, int pipe, u32 iir)
{
	u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);

	if (!intel_pipe_handle_vblank(dev_priv, pipe))
		return false;

	if ((iir & flip_pending) == 0)
		goto check_page_flip;

	/* We detect FlipDone by looking for the change in PendingFlip from '1'
	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
	 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
	 * the flip is completed (no longer pending). Since this doesn't raise
	 * an interrupt per se, we watch for the change at vblank.
	 */
	if (I915_READ(ISR) & flip_pending)
		goto check_page_flip;

	intel_finish_page_flip_cs(dev_priv, pipe);
	return true;

check_page_flip:
	intel_check_page_flip(dev_priv, pipe);
	return false;
}

4112
static irqreturn_t i915_irq_handler(int irq, void *arg)
4113
{
4114
	struct drm_device *dev = arg;
4115
	struct drm_i915_private *dev_priv = to_i915(dev);
4116
	u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
4117 4118 4119 4120
	u32 flip_mask =
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
	int pipe, ret = IRQ_NONE;
4121

4122 4123 4124
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

4125 4126 4127
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
	disable_rpm_wakeref_asserts(dev_priv);

4128
	iir = I915_READ(IIR);
4129 4130
	do {
		bool irq_received = (iir & ~flip_mask) != 0;
4131
		bool blc_event = false;
4132 4133 4134 4135 4136 4137

		/* Can't rely on pipestat interrupt bit in iir as it might
		 * have been cleared after the pipestat interrupt was received.
		 * It doesn't set the bit in iir again, but it still produces
		 * interrupts (for non-MSI).
		 */
4138
		spin_lock(&dev_priv->irq_lock);
4139
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4140
			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4141

4142
		for_each_pipe(dev_priv, pipe) {
4143
			i915_reg_t reg = PIPESTAT(pipe);
4144 4145
			pipe_stats[pipe] = I915_READ(reg);

4146
			/* Clear the PIPE*STAT regs before the IIR */
4147 4148
			if (pipe_stats[pipe] & 0x8000ffff) {
				I915_WRITE(reg, pipe_stats[pipe]);
4149
				irq_received = true;
4150 4151
			}
		}
4152
		spin_unlock(&dev_priv->irq_lock);
4153 4154 4155 4156 4157

		if (!irq_received)
			break;

		/* Consume port.  Then clear IIR or we'll miss events */
4158
		if (I915_HAS_HOTPLUG(dev_priv) &&
4159 4160 4161
		    iir & I915_DISPLAY_PORT_INTERRUPT) {
			u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
			if (hotplug_status)
4162
				i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4163
		}
4164

4165
		I915_WRITE(IIR, iir & ~flip_mask);
4166 4167 4168
		new_iir = I915_READ(IIR); /* Flush posted writes */

		if (iir & I915_USER_INTERRUPT)
4169
			notify_ring(dev_priv->engine[RCS]);
4170

4171
		for_each_pipe(dev_priv, pipe) {
4172 4173 4174 4175 4176 4177 4178
			int plane = pipe;
			if (HAS_FBC(dev_priv))
				plane = !plane;

			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
			    i915_handle_vblank(dev_priv, plane, pipe, iir))
				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4179 4180 4181

			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
				blc_event = true;
4182 4183

			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4184
				i9xx_pipe_crc_irq_handler(dev_priv, pipe);
4185

4186 4187 4188
			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
				intel_cpu_fifo_underrun_irq_handler(dev_priv,
								    pipe);
4189 4190 4191
		}

		if (blc_event || (iir & I915_ASLE_INTERRUPT))
4192
			intel_opregion_asle_intr(dev_priv);
4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208

		/* With MSI, interrupts are only generated when iir
		 * transitions from zero to nonzero.  If another bit got
		 * set while we were handling the existing iir bits, then
		 * we would never get another interrupt.
		 *
		 * This is fine on non-MSI as well, as if we hit this path
		 * we avoid exiting the interrupt handler only to generate
		 * another one.
		 *
		 * Note that for MSI this could cause a stray interrupt report
		 * if an interrupt landed in the time between writing IIR and
		 * the posting read.  This should be rare enough to never
		 * trigger the 99% of 100,000 interrupts test for disabling
		 * stray interrupts.
		 */
4209
		ret = IRQ_HANDLED;
4210
		iir = new_iir;
4211
	} while (iir & ~flip_mask);
4212

4213 4214
	enable_rpm_wakeref_asserts(dev_priv);

4215 4216 4217 4218 4219
	return ret;
}

static void i915_irq_uninstall(struct drm_device * dev)
{
4220
	struct drm_i915_private *dev_priv = to_i915(dev);
4221 4222 4223
	int pipe;

	if (I915_HAS_HOTPLUG(dev)) {
4224
		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4225 4226 4227
		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
	}

4228
	I915_WRITE16(HWSTAM, 0xffff);
4229
	for_each_pipe(dev_priv, pipe) {
4230
		/* Clear enable bits; then clear status bits */
4231
		I915_WRITE(PIPESTAT(pipe), 0);
4232 4233
		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
	}
4234 4235 4236 4237 4238 4239 4240 4241
	I915_WRITE(IMR, 0xffffffff);
	I915_WRITE(IER, 0x0);

	I915_WRITE(IIR, I915_READ(IIR));
}

static void i965_irq_preinstall(struct drm_device * dev)
{
4242
	struct drm_i915_private *dev_priv = to_i915(dev);
4243 4244
	int pipe;

4245
	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4246
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4247 4248

	I915_WRITE(HWSTAM, 0xeffe);
4249
	for_each_pipe(dev_priv, pipe)
4250 4251 4252 4253 4254 4255 4256 4257
		I915_WRITE(PIPESTAT(pipe), 0);
	I915_WRITE(IMR, 0xffffffff);
	I915_WRITE(IER, 0x0);
	POSTING_READ(IER);
}

static int i965_irq_postinstall(struct drm_device *dev)
{
4258
	struct drm_i915_private *dev_priv = to_i915(dev);
4259
	u32 enable_mask;
4260 4261 4262
	u32 error_mask;

	/* Unmask the interrupts that we always want on. */
4263
	dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
4264
			       I915_DISPLAY_PORT_INTERRUPT |
4265 4266 4267 4268 4269 4270 4271
			       I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
			       I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
			       I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
			       I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
			       I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);

	enable_mask = ~dev_priv->irq_mask;
4272 4273
	enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
			 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4274 4275
	enable_mask |= I915_USER_INTERRUPT;

4276
	if (IS_G4X(dev_priv))
4277
		enable_mask |= I915_BSD_USER_INTERRUPT;
4278

4279 4280
	/* Interrupt setup is already guaranteed to be single-threaded, this is
	 * just to make the assert_spin_locked check happy. */
4281
	spin_lock_irq(&dev_priv->irq_lock);
4282 4283 4284
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4285
	spin_unlock_irq(&dev_priv->irq_lock);
4286 4287 4288 4289 4290

	/*
	 * Enable some error detection, note the instruction error mask
	 * bit is reserved, so we leave it masked.
	 */
4291
	if (IS_G4X(dev_priv)) {
4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305
		error_mask = ~(GM45_ERROR_PAGE_TABLE |
			       GM45_ERROR_MEM_PRIV |
			       GM45_ERROR_CP_PRIV |
			       I915_ERROR_MEMORY_REFRESH);
	} else {
		error_mask = ~(I915_ERROR_PAGE_TABLE |
			       I915_ERROR_MEMORY_REFRESH);
	}
	I915_WRITE(EMR, error_mask);

	I915_WRITE(IMR, dev_priv->irq_mask);
	I915_WRITE(IER, enable_mask);
	POSTING_READ(IER);

4306
	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4307 4308
	POSTING_READ(PORT_HOTPLUG_EN);

4309
	i915_enable_asle_pipestat(dev_priv);
4310 4311 4312 4313

	return 0;
}

4314
static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
4315 4316 4317
{
	u32 hotplug_en;

4318 4319
	assert_spin_locked(&dev_priv->irq_lock);

4320 4321
	/* Note HDMI and DP share hotplug bits */
	/* enable bits are the same for all generations */
4322
	hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
4323 4324 4325 4326
	/* Programming the CRT detection parameters tends
	   to generate a spurious hotplug event about three
	   seconds later.  So just do it once.
	*/
4327
	if (IS_G4X(dev_priv))
4328 4329 4330 4331
		hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
	hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;

	/* Ignore TV since it's buggy */
4332
	i915_hotplug_interrupt_update_locked(dev_priv,
4333 4334 4335 4336
					     HOTPLUG_INT_EN_MASK |
					     CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
					     CRT_HOTPLUG_ACTIVATION_PERIOD_64,
					     hotplug_en);
4337 4338
}

4339
static irqreturn_t i965_irq_handler(int irq, void *arg)
4340
{
4341
	struct drm_device *dev = arg;
4342
	struct drm_i915_private *dev_priv = to_i915(dev);
4343 4344 4345
	u32 iir, new_iir;
	u32 pipe_stats[I915_MAX_PIPES];
	int ret = IRQ_NONE, pipe;
4346 4347 4348
	u32 flip_mask =
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4349

4350 4351 4352
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

4353 4354 4355
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
	disable_rpm_wakeref_asserts(dev_priv);

4356 4357 4358
	iir = I915_READ(IIR);

	for (;;) {
4359
		bool irq_received = (iir & ~flip_mask) != 0;
4360 4361
		bool blc_event = false;

4362 4363 4364 4365 4366
		/* Can't rely on pipestat interrupt bit in iir as it might
		 * have been cleared after the pipestat interrupt was received.
		 * It doesn't set the bit in iir again, but it still produces
		 * interrupts (for non-MSI).
		 */
4367
		spin_lock(&dev_priv->irq_lock);
4368
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4369
			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4370

4371
		for_each_pipe(dev_priv, pipe) {
4372
			i915_reg_t reg = PIPESTAT(pipe);
4373 4374 4375 4376 4377 4378 4379
			pipe_stats[pipe] = I915_READ(reg);

			/*
			 * Clear the PIPE*STAT regs before the IIR
			 */
			if (pipe_stats[pipe] & 0x8000ffff) {
				I915_WRITE(reg, pipe_stats[pipe]);
4380
				irq_received = true;
4381 4382
			}
		}
4383
		spin_unlock(&dev_priv->irq_lock);
4384 4385 4386 4387 4388 4389 4390

		if (!irq_received)
			break;

		ret = IRQ_HANDLED;

		/* Consume port.  Then clear IIR or we'll miss events */
4391 4392 4393
		if (iir & I915_DISPLAY_PORT_INTERRUPT) {
			u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
			if (hotplug_status)
4394
				i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4395
		}
4396

4397
		I915_WRITE(IIR, iir & ~flip_mask);
4398 4399 4400
		new_iir = I915_READ(IIR); /* Flush posted writes */

		if (iir & I915_USER_INTERRUPT)
4401
			notify_ring(dev_priv->engine[RCS]);
4402
		if (iir & I915_BSD_USER_INTERRUPT)
4403
			notify_ring(dev_priv->engine[VCS]);
4404

4405
		for_each_pipe(dev_priv, pipe) {
4406 4407 4408
			if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
			    i915_handle_vblank(dev_priv, pipe, pipe, iir))
				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
4409 4410 4411

			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
				blc_event = true;
4412 4413

			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4414
				i9xx_pipe_crc_irq_handler(dev_priv, pipe);
4415

4416 4417
			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
				intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
4418
		}
4419 4420

		if (blc_event || (iir & I915_ASLE_INTERRUPT))
4421
			intel_opregion_asle_intr(dev_priv);
4422

4423
		if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4424
			gmbus_irq_handler(dev_priv);
4425

4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443
		/* With MSI, interrupts are only generated when iir
		 * transitions from zero to nonzero.  If another bit got
		 * set while we were handling the existing iir bits, then
		 * we would never get another interrupt.
		 *
		 * This is fine on non-MSI as well, as if we hit this path
		 * we avoid exiting the interrupt handler only to generate
		 * another one.
		 *
		 * Note that for MSI this could cause a stray interrupt report
		 * if an interrupt landed in the time between writing IIR and
		 * the posting read.  This should be rare enough to never
		 * trigger the 99% of 100,000 interrupts test for disabling
		 * stray interrupts.
		 */
		iir = new_iir;
	}

4444 4445
	enable_rpm_wakeref_asserts(dev_priv);

4446 4447 4448 4449 4450
	return ret;
}

static void i965_irq_uninstall(struct drm_device * dev)
{
4451
	struct drm_i915_private *dev_priv = to_i915(dev);
4452 4453 4454 4455 4456
	int pipe;

	if (!dev_priv)
		return;

4457
	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4458
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4459 4460

	I915_WRITE(HWSTAM, 0xffffffff);
4461
	for_each_pipe(dev_priv, pipe)
4462 4463 4464 4465
		I915_WRITE(PIPESTAT(pipe), 0);
	I915_WRITE(IMR, 0xffffffff);
	I915_WRITE(IER, 0x0);

4466
	for_each_pipe(dev_priv, pipe)
4467 4468 4469 4470 4471
		I915_WRITE(PIPESTAT(pipe),
			   I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
	I915_WRITE(IIR, I915_READ(IIR));
}

4472 4473 4474 4475 4476 4477 4478
/**
 * intel_irq_init - initializes irq support
 * @dev_priv: i915 device instance
 *
 * This function initializes all the irq support including work items, timers
 * and all the vtables. It does not setup the interrupt itself though.
 */
4479
void intel_irq_init(struct drm_i915_private *dev_priv)
4480
{
4481
	struct drm_device *dev = &dev_priv->drm;
4482

4483 4484
	intel_hpd_init_work(dev_priv);

4485
	INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
4486
	INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4487

4488
	/* Let's track the enabled rps events */
4489
	if (IS_VALLEYVIEW(dev_priv))
4490
		/* WaGsvRC0ResidencyMethod:vlv */
4491
		dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
4492 4493
	else
		dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4494

4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506
	dev_priv->rps.pm_intr_keep = 0;

	/*
	 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
	 * if GEN6_PM_UP_EI_EXPIRED is masked.
	 *
	 * TODO: verify if this can be reproduced on VLV,CHV.
	 */
	if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
		dev_priv->rps.pm_intr_keep |= GEN6_PM_RP_UP_EI_EXPIRED;

	if (INTEL_INFO(dev_priv)->gen >= 8)
4507
		dev_priv->rps.pm_intr_keep |= GEN8_PMINTR_REDIRECT_TO_GUC;
4508

4509 4510
	INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
			  i915_hangcheck_elapsed);
4511

4512
	if (IS_GEN2(dev_priv)) {
4513
		/* Gen2 doesn't have a hardware frame counter */
4514
		dev->max_vblank_count = 0;
4515
		dev->driver->get_vblank_counter = drm_vblank_no_hw_counter;
4516
	} else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
4517
		dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4518
		dev->driver->get_vblank_counter = g4x_get_vblank_counter;
4519 4520 4521
	} else {
		dev->driver->get_vblank_counter = i915_get_vblank_counter;
		dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4522 4523
	}

4524 4525 4526 4527 4528
	/*
	 * Opt out of the vblank disable timer on everything except gen2.
	 * Gen2 doesn't have a hardware frame counter and so depends on
	 * vblank interrupts to produce sane vblank seuquence numbers.
	 */
4529
	if (!IS_GEN2(dev_priv))
4530 4531
		dev->vblank_disable_immediate = true;

4532 4533
	dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
	dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4534

4535
	if (IS_CHERRYVIEW(dev_priv)) {
4536 4537 4538 4539
		dev->driver->irq_handler = cherryview_irq_handler;
		dev->driver->irq_preinstall = cherryview_irq_preinstall;
		dev->driver->irq_postinstall = cherryview_irq_postinstall;
		dev->driver->irq_uninstall = cherryview_irq_uninstall;
4540 4541
		dev->driver->enable_vblank = i965_enable_vblank;
		dev->driver->disable_vblank = i965_disable_vblank;
4542
		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4543
	} else if (IS_VALLEYVIEW(dev_priv)) {
J
Jesse Barnes 已提交
4544 4545 4546 4547
		dev->driver->irq_handler = valleyview_irq_handler;
		dev->driver->irq_preinstall = valleyview_irq_preinstall;
		dev->driver->irq_postinstall = valleyview_irq_postinstall;
		dev->driver->irq_uninstall = valleyview_irq_uninstall;
4548 4549
		dev->driver->enable_vblank = i965_enable_vblank;
		dev->driver->disable_vblank = i965_disable_vblank;
4550
		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4551
	} else if (INTEL_INFO(dev_priv)->gen >= 8) {
4552
		dev->driver->irq_handler = gen8_irq_handler;
4553
		dev->driver->irq_preinstall = gen8_irq_reset;
4554 4555 4556 4557
		dev->driver->irq_postinstall = gen8_irq_postinstall;
		dev->driver->irq_uninstall = gen8_irq_uninstall;
		dev->driver->enable_vblank = gen8_enable_vblank;
		dev->driver->disable_vblank = gen8_disable_vblank;
4558
		if (IS_BROXTON(dev_priv))
4559
			dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
4560
		else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv))
4561 4562
			dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
		else
4563
			dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4564
	} else if (HAS_PCH_SPLIT(dev_priv)) {
4565
		dev->driver->irq_handler = ironlake_irq_handler;
4566
		dev->driver->irq_preinstall = ironlake_irq_reset;
4567 4568 4569 4570
		dev->driver->irq_postinstall = ironlake_irq_postinstall;
		dev->driver->irq_uninstall = ironlake_irq_uninstall;
		dev->driver->enable_vblank = ironlake_enable_vblank;
		dev->driver->disable_vblank = ironlake_disable_vblank;
4571
		dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4572
	} else {
4573
		if (IS_GEN2(dev_priv)) {
C
Chris Wilson 已提交
4574 4575 4576 4577
			dev->driver->irq_preinstall = i8xx_irq_preinstall;
			dev->driver->irq_postinstall = i8xx_irq_postinstall;
			dev->driver->irq_handler = i8xx_irq_handler;
			dev->driver->irq_uninstall = i8xx_irq_uninstall;
4578 4579
			dev->driver->enable_vblank = i8xx_enable_vblank;
			dev->driver->disable_vblank = i8xx_disable_vblank;
4580
		} else if (IS_GEN3(dev_priv)) {
4581 4582 4583 4584
			dev->driver->irq_preinstall = i915_irq_preinstall;
			dev->driver->irq_postinstall = i915_irq_postinstall;
			dev->driver->irq_uninstall = i915_irq_uninstall;
			dev->driver->irq_handler = i915_irq_handler;
4585 4586
			dev->driver->enable_vblank = i8xx_enable_vblank;
			dev->driver->disable_vblank = i8xx_disable_vblank;
C
Chris Wilson 已提交
4587
		} else {
4588 4589 4590 4591
			dev->driver->irq_preinstall = i965_irq_preinstall;
			dev->driver->irq_postinstall = i965_irq_postinstall;
			dev->driver->irq_uninstall = i965_irq_uninstall;
			dev->driver->irq_handler = i965_irq_handler;
4592 4593
			dev->driver->enable_vblank = i965_enable_vblank;
			dev->driver->disable_vblank = i965_disable_vblank;
C
Chris Wilson 已提交
4594
		}
4595 4596
		if (I915_HAS_HOTPLUG(dev_priv))
			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4597 4598
	}
}
4599

4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610
/**
 * intel_irq_install - enables the hardware interrupt
 * @dev_priv: i915 device instance
 *
 * This function enables the hardware interrupt handling, but leaves the hotplug
 * handling still disabled. It is called after intel_irq_init().
 *
 * In the driver load and resume code we need working interrupts in a few places
 * but don't want to deal with the hassle of concurrent probe and hotplug
 * workers. Hence the split into this two-stage approach.
 */
4611 4612 4613 4614 4615 4616 4617 4618 4619
int intel_irq_install(struct drm_i915_private *dev_priv)
{
	/*
	 * We enable some interrupt sources in our postinstall hooks, so mark
	 * interrupts as enabled _before_ actually enabling them to avoid
	 * special cases in our ordering checks.
	 */
	dev_priv->pm.irqs_enabled = true;

4620
	return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq);
4621 4622
}

4623 4624 4625 4626 4627 4628 4629
/**
 * intel_irq_uninstall - finilizes all irq handling
 * @dev_priv: i915 device instance
 *
 * This stops interrupt and hotplug handling and unregisters and frees all
 * resources acquired in the init functions.
 */
4630 4631
void intel_irq_uninstall(struct drm_i915_private *dev_priv)
{
4632
	drm_irq_uninstall(&dev_priv->drm);
4633 4634 4635 4636
	intel_hpd_cancel_work(dev_priv);
	dev_priv->pm.irqs_enabled = false;
}

4637 4638 4639 4640 4641 4642 4643
/**
 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
 * @dev_priv: i915 device instance
 *
 * This function is used to disable interrupts at runtime, both in the runtime
 * pm and the system suspend/resume code.
 */
4644
void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4645
{
4646
	dev_priv->drm.driver->irq_uninstall(&dev_priv->drm);
4647
	dev_priv->pm.irqs_enabled = false;
4648
	synchronize_irq(dev_priv->drm.irq);
4649 4650
}

4651 4652 4653 4654 4655 4656 4657
/**
 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
 * @dev_priv: i915 device instance
 *
 * This function is used to enable interrupts at runtime, both in the runtime
 * pm and the system suspend/resume code.
 */
4658
void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4659
{
4660
	dev_priv->pm.irqs_enabled = true;
4661 4662
	dev_priv->drm.driver->irq_preinstall(&dev_priv->drm);
	dev_priv->drm.driver->irq_postinstall(&dev_priv->drm);
4663
}