i915_irq.c 114.7 KB
Newer Older
D
Dave Airlie 已提交
1
/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
L
Linus Torvalds 已提交
2
 */
D
Dave Airlie 已提交
3
/*
L
Linus Torvalds 已提交
4 5
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
 * All Rights Reserved.
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
D
Dave Airlie 已提交
27
 */
L
Linus Torvalds 已提交
28

29 30
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

31
#include <linux/sysrq.h>
32
#include <linux/slab.h>
33
#include <linux/circ_buf.h>
34 35
#include <drm/drmP.h>
#include <drm/i915_drm.h>
L
Linus Torvalds 已提交
36
#include "i915_drv.h"
C
Chris Wilson 已提交
37
#include "i915_trace.h"
J
Jesse Barnes 已提交
38
#include "intel_drv.h"
L
Linus Torvalds 已提交
39

40 41 42 43 44 45 46 47
/**
 * DOC: interrupt handling
 *
 * These functions provide the basic support for enabling and disabling the
 * interrupt handling support. There's a lot more functionality in i915_irq.c
 * and related files, but that will be described in separate chapters.
 */

48 49 50 51
static const u32 hpd_ilk[HPD_NUM_PINS] = {
	[HPD_PORT_A] = DE_DP_A_HOTPLUG,
};

52 53 54 55
static const u32 hpd_ivb[HPD_NUM_PINS] = {
	[HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
};

56 57 58 59
static const u32 hpd_bdw[HPD_NUM_PINS] = {
	[HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
};

60
static const u32 hpd_ibx[HPD_NUM_PINS] = {
61 62 63 64 65 66 67
	[HPD_CRT] = SDE_CRT_HOTPLUG,
	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG
};

68
static const u32 hpd_cpt[HPD_NUM_PINS] = {
69
	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
70
	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
71 72 73 74 75
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
};

X
Xiong Zhang 已提交
76
static const u32 hpd_spt[HPD_NUM_PINS] = {
77
	[HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
X
Xiong Zhang 已提交
78 79 80 81 82 83
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
	[HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
};

84
static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
85 86 87 88 89 90 91 92
	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
};

93
static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
94 95 96 97 98 99 100 101
	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
};

102
static const u32 hpd_status_i915[HPD_NUM_PINS] = {
103 104 105 106 107 108 109 110
	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
};

111 112
/* BXT hpd list */
static const u32 hpd_bxt[HPD_NUM_PINS] = {
113
	[HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
114 115 116 117
	[HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
	[HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
};

118
/* IIR can theoretically queue up two events. Be paranoid. */
119
#define GEN8_IRQ_RESET_NDX(type, which) do { \
120 121 122 123 124 125 126 127 128
	I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
	POSTING_READ(GEN8_##type##_IMR(which)); \
	I915_WRITE(GEN8_##type##_IER(which), 0); \
	I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
	POSTING_READ(GEN8_##type##_IIR(which)); \
	I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
	POSTING_READ(GEN8_##type##_IIR(which)); \
} while (0)

V
Ville Syrjälä 已提交
129
#define GEN3_IRQ_RESET(type) do { \
P
Paulo Zanoni 已提交
130
	I915_WRITE(type##IMR, 0xffffffff); \
131
	POSTING_READ(type##IMR); \
P
Paulo Zanoni 已提交
132
	I915_WRITE(type##IER, 0); \
133 134 135 136
	I915_WRITE(type##IIR, 0xffffffff); \
	POSTING_READ(type##IIR); \
	I915_WRITE(type##IIR, 0xffffffff); \
	POSTING_READ(type##IIR); \
P
Paulo Zanoni 已提交
137 138
} while (0)

139 140 141 142 143 144 145 146 147 148
#define GEN2_IRQ_RESET(type) do { \
	I915_WRITE16(type##IMR, 0xffff); \
	POSTING_READ16(type##IMR); \
	I915_WRITE16(type##IER, 0); \
	I915_WRITE16(type##IIR, 0xffff); \
	POSTING_READ16(type##IIR); \
	I915_WRITE16(type##IIR, 0xffff); \
	POSTING_READ16(type##IIR); \
} while (0)

149 150 151
/*
 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
 */
V
Ville Syrjälä 已提交
152
static void gen3_assert_iir_is_zero(struct drm_i915_private *dev_priv,
153
				    i915_reg_t reg)
154 155 156 157 158 159 160
{
	u32 val = I915_READ(reg);

	if (val == 0)
		return;

	WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
161
	     i915_mmio_reg_offset(reg), val);
162 163 164 165 166
	I915_WRITE(reg, 0xffffffff);
	POSTING_READ(reg);
	I915_WRITE(reg, 0xffffffff);
	POSTING_READ(reg);
}
167

168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183
static void gen2_assert_iir_is_zero(struct drm_i915_private *dev_priv,
				    i915_reg_t reg)
{
	u16 val = I915_READ16(reg);

	if (val == 0)
		return;

	WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
	     i915_mmio_reg_offset(reg), val);
	I915_WRITE16(reg, 0xffff);
	POSTING_READ16(reg);
	I915_WRITE16(reg, 0xffff);
	POSTING_READ16(reg);
}

P
Paulo Zanoni 已提交
184
#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
V
Ville Syrjälä 已提交
185
	gen3_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
P
Paulo Zanoni 已提交
186
	I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
187 188
	I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
	POSTING_READ(GEN8_##type##_IMR(which)); \
P
Paulo Zanoni 已提交
189 190
} while (0)

V
Ville Syrjälä 已提交
191 192
#define GEN3_IRQ_INIT(type, imr_val, ier_val) do { \
	gen3_assert_iir_is_zero(dev_priv, type##IIR); \
P
Paulo Zanoni 已提交
193
	I915_WRITE(type##IER, (ier_val)); \
194 195
	I915_WRITE(type##IMR, (imr_val)); \
	POSTING_READ(type##IMR); \
P
Paulo Zanoni 已提交
196 197
} while (0)

198 199 200 201 202 203 204
#define GEN2_IRQ_INIT(type, imr_val, ier_val) do { \
	gen2_assert_iir_is_zero(dev_priv, type##IIR); \
	I915_WRITE16(type##IER, (ier_val)); \
	I915_WRITE16(type##IMR, (imr_val)); \
	POSTING_READ16(type##IMR); \
} while (0)

205
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
206
static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
207

208 209 210 211 212 213 214 215
/* For display hotplug interrupt */
static inline void
i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
				     uint32_t mask,
				     uint32_t bits)
{
	uint32_t val;

216
	lockdep_assert_held(&dev_priv->irq_lock);
217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245
	WARN_ON(bits & ~mask);

	val = I915_READ(PORT_HOTPLUG_EN);
	val &= ~mask;
	val |= bits;
	I915_WRITE(PORT_HOTPLUG_EN, val);
}

/**
 * i915_hotplug_interrupt_update - update hotplug interrupt enable
 * @dev_priv: driver private
 * @mask: bits to update
 * @bits: bits to enable
 * NOTE: the HPD enable bits are modified both inside and outside
 * of an interrupt context. To avoid that read-modify-write cycles
 * interfer, these bits are protected by a spinlock. Since this
 * function is usually not called from a context where the lock is
 * held already, this function acquires the lock itself. A non-locking
 * version is also available.
 */
void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
				   uint32_t mask,
				   uint32_t bits)
{
	spin_lock_irq(&dev_priv->irq_lock);
	i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
	spin_unlock_irq(&dev_priv->irq_lock);
}

246 247 248 249 250 251
/**
 * ilk_update_display_irq - update DEIMR
 * @dev_priv: driver private
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
252 253 254
void ilk_update_display_irq(struct drm_i915_private *dev_priv,
			    uint32_t interrupt_mask,
			    uint32_t enabled_irq_mask)
255
{
256 257
	uint32_t new_val;

258
	lockdep_assert_held(&dev_priv->irq_lock);
259

260 261
	WARN_ON(enabled_irq_mask & ~interrupt_mask);

262
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
263 264
		return;

265 266 267 268 269 270
	new_val = dev_priv->irq_mask;
	new_val &= ~interrupt_mask;
	new_val |= (~enabled_irq_mask & interrupt_mask);

	if (new_val != dev_priv->irq_mask) {
		dev_priv->irq_mask = new_val;
271
		I915_WRITE(DEIMR, dev_priv->irq_mask);
272
		POSTING_READ(DEIMR);
273 274 275
	}
}

P
Paulo Zanoni 已提交
276 277 278 279 280 281 282 283 284 285
/**
 * ilk_update_gt_irq - update GTIMR
 * @dev_priv: driver private
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
			      uint32_t interrupt_mask,
			      uint32_t enabled_irq_mask)
{
286
	lockdep_assert_held(&dev_priv->irq_lock);
P
Paulo Zanoni 已提交
287

288 289
	WARN_ON(enabled_irq_mask & ~interrupt_mask);

290
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
291 292
		return;

P
Paulo Zanoni 已提交
293 294 295 296 297
	dev_priv->gt_irq_mask &= ~interrupt_mask;
	dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
}

298
void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
P
Paulo Zanoni 已提交
299 300
{
	ilk_update_gt_irq(dev_priv, mask, mask);
301
	POSTING_READ_FW(GTIMR);
P
Paulo Zanoni 已提交
302 303
}

304
void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
P
Paulo Zanoni 已提交
305 306 307 308
{
	ilk_update_gt_irq(dev_priv, mask, 0);
}

309
static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
310
{
311
	return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
312 313
}

314
static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv)
315
{
316
	return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
317 318
}

319
static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
320
{
321
	return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
322 323
}

P
Paulo Zanoni 已提交
324
/**
325 326 327 328 329
 * snb_update_pm_irq - update GEN6_PMIMR
 * @dev_priv: driver private
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
P
Paulo Zanoni 已提交
330 331 332 333
static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
			      uint32_t interrupt_mask,
			      uint32_t enabled_irq_mask)
{
334
	uint32_t new_val;
P
Paulo Zanoni 已提交
335

336 337
	WARN_ON(enabled_irq_mask & ~interrupt_mask);

338
	lockdep_assert_held(&dev_priv->irq_lock);
P
Paulo Zanoni 已提交
339

340
	new_val = dev_priv->pm_imr;
341 342 343
	new_val &= ~interrupt_mask;
	new_val |= (~enabled_irq_mask & interrupt_mask);

344 345 346
	if (new_val != dev_priv->pm_imr) {
		dev_priv->pm_imr = new_val;
		I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_imr);
347
		POSTING_READ(gen6_pm_imr(dev_priv));
348
	}
P
Paulo Zanoni 已提交
349 350
}

351
void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
P
Paulo Zanoni 已提交
352
{
353 354 355
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
		return;

P
Paulo Zanoni 已提交
356 357 358
	snb_update_pm_irq(dev_priv, mask, mask);
}

359
static void __gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
P
Paulo Zanoni 已提交
360 361 362 363
{
	snb_update_pm_irq(dev_priv, mask, 0);
}

364
void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
365 366 367 368
{
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
		return;

369
	__gen6_mask_pm_irq(dev_priv, mask);
370 371
}

372
static void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
I
Imre Deak 已提交
373
{
374
	i915_reg_t reg = gen6_pm_iir(dev_priv);
I
Imre Deak 已提交
375

376
	lockdep_assert_held(&dev_priv->irq_lock);
377 378 379

	I915_WRITE(reg, reset_mask);
	I915_WRITE(reg, reset_mask);
I
Imre Deak 已提交
380
	POSTING_READ(reg);
381 382
}

383
static void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask)
384
{
385
	lockdep_assert_held(&dev_priv->irq_lock);
386 387 388 389 390 391 392

	dev_priv->pm_ier |= enable_mask;
	I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
	gen6_unmask_pm_irq(dev_priv, enable_mask);
	/* unmask_pm_irq provides an implicit barrier (POSTING_READ) */
}

393
static void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask)
394
{
395
	lockdep_assert_held(&dev_priv->irq_lock);
396 397 398 399 400 401 402 403 404 405 406

	dev_priv->pm_ier &= ~disable_mask;
	__gen6_mask_pm_irq(dev_priv, disable_mask);
	I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
	/* though a barrier is missing here, but don't really need a one */
}

void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
{
	spin_lock_irq(&dev_priv->irq_lock);
	gen6_reset_pm_iir(dev_priv, dev_priv->pm_rps_events);
407
	dev_priv->rps.pm_iir = 0;
I
Imre Deak 已提交
408 409 410
	spin_unlock_irq(&dev_priv->irq_lock);
}

411
void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
412
{
413 414 415
	if (READ_ONCE(dev_priv->rps.interrupts_enabled))
		return;

416
	spin_lock_irq(&dev_priv->irq_lock);
417 418
	WARN_ON_ONCE(dev_priv->rps.pm_iir);
	WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
I
Imre Deak 已提交
419
	dev_priv->rps.interrupts_enabled = true;
420
	gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
421

422 423 424
	spin_unlock_irq(&dev_priv->irq_lock);
}

425
void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
426
{
427 428 429
	if (!READ_ONCE(dev_priv->rps.interrupts_enabled))
		return;

I
Imre Deak 已提交
430 431
	spin_lock_irq(&dev_priv->irq_lock);
	dev_priv->rps.interrupts_enabled = false;
432

433
	I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
434

435
	gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
436 437

	spin_unlock_irq(&dev_priv->irq_lock);
438
	synchronize_irq(dev_priv->drm.irq);
439 440

	/* Now that we will not be generating any more work, flush any
441
	 * outstanding tasks. As we are called on the RPS idle path,
442 443 444 445 446
	 * we will reset the GPU to minimum frequencies, so the current
	 * state of the worker can be discarded.
	 */
	cancel_work_sync(&dev_priv->rps.work);
	gen6_reset_rps_interrupts(dev_priv);
447 448
}

449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480
void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv)
{
	spin_lock_irq(&dev_priv->irq_lock);
	gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events);
	spin_unlock_irq(&dev_priv->irq_lock);
}

void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv)
{
	spin_lock_irq(&dev_priv->irq_lock);
	if (!dev_priv->guc.interrupts_enabled) {
		WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) &
				       dev_priv->pm_guc_events);
		dev_priv->guc.interrupts_enabled = true;
		gen6_enable_pm_irq(dev_priv, dev_priv->pm_guc_events);
	}
	spin_unlock_irq(&dev_priv->irq_lock);
}

void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv)
{
	spin_lock_irq(&dev_priv->irq_lock);
	dev_priv->guc.interrupts_enabled = false;

	gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events);

	spin_unlock_irq(&dev_priv->irq_lock);
	synchronize_irq(dev_priv->drm.irq);

	gen9_reset_guc_interrupts(dev_priv);
}

481
/**
482 483 484 485 486
 * bdw_update_port_irq - update DE port interrupt
 * @dev_priv: driver private
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
487 488 489 490 491 492 493
static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
				uint32_t interrupt_mask,
				uint32_t enabled_irq_mask)
{
	uint32_t new_val;
	uint32_t old_val;

494
	lockdep_assert_held(&dev_priv->irq_lock);
495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512

	WARN_ON(enabled_irq_mask & ~interrupt_mask);

	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
		return;

	old_val = I915_READ(GEN8_DE_PORT_IMR);

	new_val = old_val;
	new_val &= ~interrupt_mask;
	new_val |= (~enabled_irq_mask & interrupt_mask);

	if (new_val != old_val) {
		I915_WRITE(GEN8_DE_PORT_IMR, new_val);
		POSTING_READ(GEN8_DE_PORT_IMR);
	}
}

513 514 515 516 517 518 519 520 521 522 523 524 525 526
/**
 * bdw_update_pipe_irq - update DE pipe interrupt
 * @dev_priv: driver private
 * @pipe: pipe whose interrupt to update
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
			 enum pipe pipe,
			 uint32_t interrupt_mask,
			 uint32_t enabled_irq_mask)
{
	uint32_t new_val;

527
	lockdep_assert_held(&dev_priv->irq_lock);
528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544

	WARN_ON(enabled_irq_mask & ~interrupt_mask);

	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
		return;

	new_val = dev_priv->de_irq_mask[pipe];
	new_val &= ~interrupt_mask;
	new_val |= (~enabled_irq_mask & interrupt_mask);

	if (new_val != dev_priv->de_irq_mask[pipe]) {
		dev_priv->de_irq_mask[pipe] = new_val;
		I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
		POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
	}
}

545 546 547 548 549 550
/**
 * ibx_display_interrupt_update - update SDEIMR
 * @dev_priv: driver private
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
551 552 553
void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
				  uint32_t interrupt_mask,
				  uint32_t enabled_irq_mask)
554 555 556 557 558
{
	uint32_t sdeimr = I915_READ(SDEIMR);
	sdeimr &= ~interrupt_mask;
	sdeimr |= (~enabled_irq_mask & interrupt_mask);

559 560
	WARN_ON(enabled_irq_mask & ~interrupt_mask);

561
	lockdep_assert_held(&dev_priv->irq_lock);
562

563
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
564 565
		return;

566 567 568
	I915_WRITE(SDEIMR, sdeimr);
	POSTING_READ(SDEIMR);
}
569

570 571
u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
			      enum pipe pipe)
572
{
573 574
	u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
	u32 enable_mask = status_mask << 16;
575

576
	lockdep_assert_held(&dev_priv->irq_lock);
577

578 579
	if (INTEL_GEN(dev_priv) < 5)
		goto out;
580 581

	/*
582 583
	 * On pipe A we don't support the PSR interrupt yet,
	 * on pipe B and C the same bit MBZ.
584 585 586
	 */
	if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
		return 0;
587 588 589 590 591 592
	/*
	 * On pipe B and C we don't support the PSR interrupt yet, on pipe
	 * A the same bit is for perf counters which we don't use either.
	 */
	if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
		return 0;
593 594 595 596 597 598 599 600 601

	enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
			 SPRITE0_FLIP_DONE_INT_EN_VLV |
			 SPRITE1_FLIP_DONE_INT_EN_VLV);
	if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
		enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
	if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
		enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;

602 603 604 605 606 607
out:
	WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
		  status_mask & ~PIPESTAT_INT_STATUS_MASK,
		  "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
		  pipe_name(pipe), enable_mask, status_mask);

608 609 610
	return enable_mask;
}

611 612
void i915_enable_pipestat(struct drm_i915_private *dev_priv,
			  enum pipe pipe, u32 status_mask)
613
{
614
	i915_reg_t reg = PIPESTAT(pipe);
615 616
	u32 enable_mask;

617 618 619 620 621 622 623 624 625 626 627 628 629 630 631
	WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
		  "pipe %c: status_mask=0x%x\n",
		  pipe_name(pipe), status_mask);

	lockdep_assert_held(&dev_priv->irq_lock);
	WARN_ON(!intel_irqs_enabled(dev_priv));

	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
		return;

	dev_priv->pipestat_irq_mask[pipe] |= status_mask;
	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);

	I915_WRITE(reg, enable_mask | status_mask);
	POSTING_READ(reg);
632 633
}

634 635
void i915_disable_pipestat(struct drm_i915_private *dev_priv,
			   enum pipe pipe, u32 status_mask)
636
{
637
	i915_reg_t reg = PIPESTAT(pipe);
638 639
	u32 enable_mask;

640 641 642 643 644 645 646 647 648 649 650 651 652 653 654
	WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
		  "pipe %c: status_mask=0x%x\n",
		  pipe_name(pipe), status_mask);

	lockdep_assert_held(&dev_priv->irq_lock);
	WARN_ON(!intel_irqs_enabled(dev_priv));

	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
		return;

	dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);

	I915_WRITE(reg, enable_mask | status_mask);
	POSTING_READ(reg);
655 656
}

657
/**
658
 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
659
 * @dev_priv: i915 device private
660
 */
661
static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
662
{
663
	if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv))
664 665
		return;

666
	spin_lock_irq(&dev_priv->irq_lock);
667

668
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
669
	if (INTEL_GEN(dev_priv) >= 4)
670
		i915_enable_pipestat(dev_priv, PIPE_A,
671
				     PIPE_LEGACY_BLC_EVENT_STATUS);
672

673
	spin_unlock_irq(&dev_priv->irq_lock);
674 675
}

676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725
/*
 * This timing diagram depicts the video signal in and
 * around the vertical blanking period.
 *
 * Assumptions about the fictitious mode used in this example:
 *  vblank_start >= 3
 *  vsync_start = vblank_start + 1
 *  vsync_end = vblank_start + 2
 *  vtotal = vblank_start + 3
 *
 *           start of vblank:
 *           latch double buffered registers
 *           increment frame counter (ctg+)
 *           generate start of vblank interrupt (gen4+)
 *           |
 *           |          frame start:
 *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
 *           |          may be shifted forward 1-3 extra lines via PIPECONF
 *           |          |
 *           |          |  start of vsync:
 *           |          |  generate vsync interrupt
 *           |          |  |
 * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
 *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
 * ----va---> <-----------------vb--------------------> <--------va-------------
 *       |          |       <----vs----->                     |
 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
 *       |          |                                         |
 *       last visible pixel                                   first visible pixel
 *                  |                                         increment frame counter (gen3/4)
 *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
 *
 * x  = horizontal active
 * _  = horizontal blanking
 * hs = horizontal sync
 * va = vertical active
 * vb = vertical blanking
 * vs = vertical sync
 * vbs = vblank_start (number)
 *
 * Summary:
 * - most events happen at the start of horizontal sync
 * - frame start happens at the start of horizontal blank, 1-4 lines
 *   (depending on PIPECONF settings) after the start of vblank
 * - gen3/4 pixel and frame counter are synchronized with the start
 *   of horizontal active on the first line of vertical active
 */

726 727 728
/* Called from drm generic code, passed a 'crtc', which
 * we use as a pipe index
 */
729
static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
730
{
731
	struct drm_i915_private *dev_priv = to_i915(dev);
732
	i915_reg_t high_frame, low_frame;
733
	u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
734
	const struct drm_display_mode *mode = &dev->vblank[pipe].hwmode;
735
	unsigned long irqflags;
736

737 738 739 740 741
	htotal = mode->crtc_htotal;
	hsync_start = mode->crtc_hsync_start;
	vbl_start = mode->crtc_vblank_start;
	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
		vbl_start = DIV_ROUND_UP(vbl_start, 2);
742

743 744 745 746 747 748
	/* Convert to pixel count */
	vbl_start *= htotal;

	/* Start of vblank event occurs at start of hsync */
	vbl_start -= htotal - hsync_start;

749 750
	high_frame = PIPEFRAME(pipe);
	low_frame = PIPEFRAMEPIXEL(pipe);
751

752 753
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);

754 755 756 757 758 759
	/*
	 * High & low register fields aren't synchronized, so make sure
	 * we get a low value that's stable across two reads of the high
	 * register.
	 */
	do {
760 761 762
		high1 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
		low   = I915_READ_FW(low_frame);
		high2 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
763 764
	} while (high1 != high2);

765 766
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);

767
	high1 >>= PIPE_FRAME_HIGH_SHIFT;
768
	pixel = low & PIPE_PIXEL_MASK;
769
	low >>= PIPE_FRAME_LOW_SHIFT;
770 771 772 773 774 775

	/*
	 * The frame counter increments at beginning of active.
	 * Cook up a vblank counter by also checking the pixel
	 * counter against vblank start.
	 */
776
	return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
777 778
}

779
static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
780
{
781
	struct drm_i915_private *dev_priv = to_i915(dev);
782

783
	return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
784 785
}

786
/* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
787 788 789
static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
{
	struct drm_device *dev = crtc->base.dev;
790
	struct drm_i915_private *dev_priv = to_i915(dev);
791 792
	const struct drm_display_mode *mode;
	struct drm_vblank_crtc *vblank;
793
	enum pipe pipe = crtc->pipe;
794
	int position, vtotal;
795

796 797 798
	if (!crtc->active)
		return -1;

799 800 801
	vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
	mode = &vblank->hwmode;

802
	vtotal = mode->crtc_vtotal;
803 804 805
	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
		vtotal /= 2;

806
	if (IS_GEN2(dev_priv))
807
		position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
808
	else
809
		position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
810

811 812 813 814 815 816 817 818 819 820 821 822
	/*
	 * On HSW, the DSL reg (0x70000) appears to return 0 if we
	 * read it just before the start of vblank.  So try it again
	 * so we don't accidentally end up spanning a vblank frame
	 * increment, causing the pipe_update_end() code to squak at us.
	 *
	 * The nature of this problem means we can't simply check the ISR
	 * bit and return the vblank start value; nor can we use the scanline
	 * debug register in the transcoder as it appears to have the same
	 * problem.  We may need to extend this to include other platforms,
	 * but so far testing only shows the problem on HSW.
	 */
823
	if (HAS_DDI(dev_priv) && !position) {
824 825 826 827
		int i, temp;

		for (i = 0; i < 100; i++) {
			udelay(1);
828
			temp = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
829 830 831 832 833 834 835
			if (temp != position) {
				position = temp;
				break;
			}
		}
	}

836
	/*
837 838
	 * See update_scanline_offset() for the details on the
	 * scanline_offset adjustment.
839
	 */
840
	return (position + crtc->scanline_offset) % vtotal;
841 842
}

843 844 845 846
static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
				     bool in_vblank_irq, int *vpos, int *hpos,
				     ktime_t *stime, ktime_t *etime,
				     const struct drm_display_mode *mode)
847
{
848
	struct drm_i915_private *dev_priv = to_i915(dev);
849 850
	struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
								pipe);
851
	int position;
852
	int vbl_start, vbl_end, hsync_start, htotal, vtotal;
853
	unsigned long irqflags;
854

855
	if (WARN_ON(!mode->crtc_clock)) {
856
		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
857
				 "pipe %c\n", pipe_name(pipe));
858
		return false;
859 860
	}

861
	htotal = mode->crtc_htotal;
862
	hsync_start = mode->crtc_hsync_start;
863 864 865
	vtotal = mode->crtc_vtotal;
	vbl_start = mode->crtc_vblank_start;
	vbl_end = mode->crtc_vblank_end;
866

867 868 869 870 871 872
	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
		vbl_start = DIV_ROUND_UP(vbl_start, 2);
		vbl_end /= 2;
		vtotal /= 2;
	}

873 874 875 876 877 878
	/*
	 * Lock uncore.lock, as we will do multiple timing critical raw
	 * register reads, potentially with preemption disabled, so the
	 * following code must not block on uncore.lock.
	 */
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
879

880 881 882 883 884 885
	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */

	/* Get optional system timestamp before query. */
	if (stime)
		*stime = ktime_get();

886
	if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
887 888 889
		/* No obvious pixelcount register. Only query vertical
		 * scanout position from Display scan line register.
		 */
890
		position = __intel_get_crtc_scanline(intel_crtc);
891 892 893 894 895
	} else {
		/* Have access to pixelcount since start of frame.
		 * We can split this into vertical and horizontal
		 * scanout position.
		 */
896
		position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
897

898 899 900 901
		/* convert to pixel counts */
		vbl_start *= htotal;
		vbl_end *= htotal;
		vtotal *= htotal;
902

903 904 905 906 907 908 909 910 911 912 913 914
		/*
		 * In interlaced modes, the pixel counter counts all pixels,
		 * so one field will have htotal more pixels. In order to avoid
		 * the reported position from jumping backwards when the pixel
		 * counter is beyond the length of the shorter field, just
		 * clamp the position the length of the shorter field. This
		 * matches how the scanline counter based position works since
		 * the scanline counter doesn't count the two half lines.
		 */
		if (position >= vtotal)
			position = vtotal - 1;

915 916 917 918 919 920 921 922 923 924
		/*
		 * Start of vblank interrupt is triggered at start of hsync,
		 * just prior to the first active line of vblank. However we
		 * consider lines to start at the leading edge of horizontal
		 * active. So, should we get here before we've crossed into
		 * the horizontal active of the first line in vblank, we would
		 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
		 * always add htotal-hsync_start to the current pixel position.
		 */
		position = (position + htotal - hsync_start) % vtotal;
925 926
	}

927 928 929 930 931 932 933 934
	/* Get optional system timestamp after query. */
	if (etime)
		*etime = ktime_get();

	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */

	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);

935 936 937 938 939 940 941 942 943 944
	/*
	 * While in vblank, position will be negative
	 * counting up towards 0 at vbl_end. And outside
	 * vblank, position will be positive counting
	 * up since vbl_end.
	 */
	if (position >= vbl_start)
		position -= vbl_end;
	else
		position += vtotal - vbl_end;
945

946
	if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
947 948 949 950 951 952
		*vpos = position;
		*hpos = 0;
	} else {
		*vpos = position / htotal;
		*hpos = position - (*vpos * htotal);
	}
953

954
	return true;
955 956
}

957 958
int intel_get_crtc_scanline(struct intel_crtc *crtc)
{
959
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
960 961 962 963 964 965 966 967 968 969
	unsigned long irqflags;
	int position;

	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
	position = __intel_get_crtc_scanline(crtc);
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);

	return position;
}

970
static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
971
{
972
	u32 busy_up, busy_down, max_avg, min_avg;
973 974
	u8 new_delay;

975
	spin_lock(&mchdev_lock);
976

977 978
	I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));

979
	new_delay = dev_priv->ips.cur_delay;
980

981
	I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
982 983
	busy_up = I915_READ(RCPREVBSYTUPAVG);
	busy_down = I915_READ(RCPREVBSYTDNAVG);
984 985 986 987
	max_avg = I915_READ(RCBMAXAVG);
	min_avg = I915_READ(RCBMINAVG);

	/* Handle RCS change request from hw */
988
	if (busy_up > max_avg) {
989 990 991 992
		if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
			new_delay = dev_priv->ips.cur_delay - 1;
		if (new_delay < dev_priv->ips.max_delay)
			new_delay = dev_priv->ips.max_delay;
993
	} else if (busy_down < min_avg) {
994 995 996 997
		if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
			new_delay = dev_priv->ips.cur_delay + 1;
		if (new_delay > dev_priv->ips.min_delay)
			new_delay = dev_priv->ips.min_delay;
998 999
	}

1000
	if (ironlake_set_drps(dev_priv, new_delay))
1001
		dev_priv->ips.cur_delay = new_delay;
1002

1003
	spin_unlock(&mchdev_lock);
1004

1005 1006 1007
	return;
}

1008
static void notify_ring(struct intel_engine_cs *engine)
1009
{
1010 1011
	struct drm_i915_gem_request *rq = NULL;
	struct intel_wait *wait;
1012

1013
	atomic_inc(&engine->irq_count);
1014
	set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
1015

1016 1017
	spin_lock(&engine->breadcrumbs.irq_lock);
	wait = engine->breadcrumbs.irq_wait;
1018
	if (wait) {
1019 1020
		bool wakeup = engine->irq_seqno_barrier;

1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032
		/* We use a callback from the dma-fence to submit
		 * requests after waiting on our own requests. To
		 * ensure minimum delay in queuing the next request to
		 * hardware, signal the fence now rather than wait for
		 * the signaler to be woken up. We still wake up the
		 * waiter in order to handle the irq-seqno coherency
		 * issues (we may receive the interrupt before the
		 * seqno is written, see __i915_request_irq_complete())
		 * and to handle coalescing of multiple seqno updates
		 * and many waiters.
		 */
		if (i915_seqno_passed(intel_engine_get_seqno(engine),
1033
				      wait->seqno)) {
1034 1035
			struct drm_i915_gem_request *waiter = wait->request;

1036 1037
			wakeup = true;
			if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
1038 1039 1040
				      &waiter->fence.flags) &&
			    intel_wait_check_request(wait, waiter))
				rq = i915_gem_request_get(waiter);
1041
		}
1042

1043 1044
		if (wakeup)
			wake_up_process(wait->tsk);
1045 1046
	} else {
		__intel_engine_disarm_breadcrumbs(engine);
1047
	}
1048
	spin_unlock(&engine->breadcrumbs.irq_lock);
1049

1050
	if (rq) {
1051
		dma_fence_signal(&rq->fence);
1052 1053
		i915_gem_request_put(rq);
	}
1054 1055

	trace_intel_engine_notify(engine, wait);
1056 1057
}

1058 1059
static void vlv_c0_read(struct drm_i915_private *dev_priv,
			struct intel_rps_ei *ei)
1060
{
1061
	ei->ktime = ktime_get_raw();
1062 1063 1064
	ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
	ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
}
1065

1066
void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
1067
{
1068
	memset(&dev_priv->rps.ei, 0, sizeof(dev_priv->rps.ei));
1069
}
1070

1071 1072
static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
{
1073
	const struct intel_rps_ei *prev = &dev_priv->rps.ei;
1074 1075
	struct intel_rps_ei now;
	u32 events = 0;
1076

1077
	if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
1078
		return 0;
1079

1080
	vlv_c0_read(dev_priv, &now);
1081

1082
	if (prev->ktime) {
1083
		u64 time, c0;
1084
		u32 render, media;
1085

1086
		time = ktime_us_delta(now.ktime, prev->ktime);
1087

1088 1089 1090 1091 1092 1093 1094
		time *= dev_priv->czclk_freq;

		/* Workload can be split between render + media,
		 * e.g. SwapBuffers being blitted in X after being rendered in
		 * mesa. To account for this we need to combine both engines
		 * into our activity counter.
		 */
1095 1096 1097
		render = now.render_c0 - prev->render_c0;
		media = now.media_c0 - prev->media_c0;
		c0 = max(render, media);
1098
		c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */
1099 1100 1101 1102 1103

		if (c0 > time * dev_priv->rps.up_threshold)
			events = GEN6_PM_RP_UP_THRESHOLD;
		else if (c0 < time * dev_priv->rps.down_threshold)
			events = GEN6_PM_RP_DOWN_THRESHOLD;
1104 1105
	}

1106
	dev_priv->rps.ei = now;
1107
	return events;
1108 1109
}

1110
static void gen6_pm_rps_work(struct work_struct *work)
1111
{
1112 1113
	struct drm_i915_private *dev_priv =
		container_of(work, struct drm_i915_private, rps.work);
1114
	bool client_boost = false;
1115
	int new_delay, adj, min, max;
1116
	u32 pm_iir = 0;
1117

1118
	spin_lock_irq(&dev_priv->irq_lock);
1119 1120
	if (dev_priv->rps.interrupts_enabled) {
		pm_iir = fetch_and_zero(&dev_priv->rps.pm_iir);
1121
		client_boost = atomic_read(&dev_priv->rps.num_waiters);
I
Imre Deak 已提交
1122
	}
1123
	spin_unlock_irq(&dev_priv->irq_lock);
1124

1125
	/* Make sure we didn't queue anything we're not going to process. */
1126
	WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1127
	if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
1128
		goto out;
1129

1130
	mutex_lock(&dev_priv->rps.hw_lock);
1131

1132 1133
	pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);

1134
	adj = dev_priv->rps.last_adj;
1135
	new_delay = dev_priv->rps.cur_freq;
1136 1137
	min = dev_priv->rps.min_freq_softlimit;
	max = dev_priv->rps.max_freq_softlimit;
1138
	if (client_boost)
1139 1140 1141
		max = dev_priv->rps.max_freq;
	if (client_boost && new_delay < dev_priv->rps.boost_freq) {
		new_delay = dev_priv->rps.boost_freq;
1142 1143
		adj = 0;
	} else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1144 1145
		if (adj > 0)
			adj *= 2;
1146 1147
		else /* CHV needs even encode values */
			adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
1148 1149 1150

		if (new_delay >= dev_priv->rps.max_freq_softlimit)
			adj = 0;
1151
	} else if (client_boost) {
1152
		adj = 0;
1153
	} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1154 1155
		if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
			new_delay = dev_priv->rps.efficient_freq;
1156
		else if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit)
1157
			new_delay = dev_priv->rps.min_freq_softlimit;
1158 1159 1160 1161
		adj = 0;
	} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
		if (adj < 0)
			adj *= 2;
1162 1163
		else /* CHV needs even encode values */
			adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
1164 1165 1166

		if (new_delay <= dev_priv->rps.min_freq_softlimit)
			adj = 0;
1167
	} else { /* unknown event */
1168
		adj = 0;
1169
	}
1170

1171 1172
	dev_priv->rps.last_adj = adj;

1173 1174 1175
	/* sysfs frequency interfaces may have snuck in while servicing the
	 * interrupt
	 */
1176
	new_delay += adj;
1177
	new_delay = clamp_t(int, new_delay, min, max);
1178

1179 1180 1181 1182
	if (intel_set_rps(dev_priv, new_delay)) {
		DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n");
		dev_priv->rps.last_adj = 0;
	}
1183

1184
	mutex_unlock(&dev_priv->rps.hw_lock);
1185 1186 1187 1188 1189 1190 1191

out:
	/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
	spin_lock_irq(&dev_priv->irq_lock);
	if (dev_priv->rps.interrupts_enabled)
		gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events);
	spin_unlock_irq(&dev_priv->irq_lock);
1192 1193
}

1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205

/**
 * ivybridge_parity_work - Workqueue called when a parity error interrupt
 * occurred.
 * @work: workqueue struct
 *
 * Doesn't actually do anything except notify userspace. As a consequence of
 * this event, userspace should try to remap the bad rows since statistically
 * it is likely the same row is more likely to go bad again.
 */
static void ivybridge_parity_work(struct work_struct *work)
{
1206
	struct drm_i915_private *dev_priv =
1207
		container_of(work, typeof(*dev_priv), l3_parity.error_work);
1208
	u32 error_status, row, bank, subbank;
1209
	char *parity_event[6];
1210
	uint32_t misccpctl;
1211
	uint8_t slice = 0;
1212 1213 1214 1215 1216

	/* We must turn off DOP level clock gating to access the L3 registers.
	 * In order to prevent a get/put style interface, acquire struct mutex
	 * any time we access those registers.
	 */
1217
	mutex_lock(&dev_priv->drm.struct_mutex);
1218

1219 1220 1221 1222
	/* If we've screwed up tracking, just let the interrupt fire again */
	if (WARN_ON(!dev_priv->l3_parity.which_slice))
		goto out;

1223 1224 1225 1226
	misccpctl = I915_READ(GEN7_MISCCPCTL);
	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
	POSTING_READ(GEN7_MISCCPCTL);

1227
	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1228
		i915_reg_t reg;
1229

1230
		slice--;
1231
		if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv)))
1232
			break;
1233

1234
		dev_priv->l3_parity.which_slice &= ~(1<<slice);
1235

1236
		reg = GEN7_L3CDERRST1(slice);
1237

1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252
		error_status = I915_READ(reg);
		row = GEN7_PARITY_ERROR_ROW(error_status);
		bank = GEN7_PARITY_ERROR_BANK(error_status);
		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);

		I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
		POSTING_READ(reg);

		parity_event[0] = I915_L3_PARITY_UEVENT "=1";
		parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
		parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
		parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
		parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
		parity_event[5] = NULL;

1253
		kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
1254
				   KOBJ_CHANGE, parity_event);
1255

1256 1257
		DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
			  slice, row, bank, subbank);
1258

1259 1260 1261 1262 1263
		kfree(parity_event[4]);
		kfree(parity_event[3]);
		kfree(parity_event[2]);
		kfree(parity_event[1]);
	}
1264

1265
	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1266

1267 1268
out:
	WARN_ON(dev_priv->l3_parity.which_slice);
1269
	spin_lock_irq(&dev_priv->irq_lock);
1270
	gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1271
	spin_unlock_irq(&dev_priv->irq_lock);
1272

1273
	mutex_unlock(&dev_priv->drm.struct_mutex);
1274 1275
}

1276 1277
static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv,
					       u32 iir)
1278
{
1279
	if (!HAS_L3_DPF(dev_priv))
1280 1281
		return;

1282
	spin_lock(&dev_priv->irq_lock);
1283
	gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1284
	spin_unlock(&dev_priv->irq_lock);
1285

1286
	iir &= GT_PARITY_ERROR(dev_priv);
1287 1288 1289 1290 1291 1292
	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
		dev_priv->l3_parity.which_slice |= 1 << 1;

	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
		dev_priv->l3_parity.which_slice |= 1 << 0;

1293
	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1294 1295
}

1296
static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
1297 1298
			       u32 gt_iir)
{
1299
	if (gt_iir & GT_RENDER_USER_INTERRUPT)
1300
		notify_ring(dev_priv->engine[RCS]);
1301
	if (gt_iir & ILK_BSD_USER_INTERRUPT)
1302
		notify_ring(dev_priv->engine[VCS]);
1303 1304
}

1305
static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
1306 1307
			       u32 gt_iir)
{
1308
	if (gt_iir & GT_RENDER_USER_INTERRUPT)
1309
		notify_ring(dev_priv->engine[RCS]);
1310
	if (gt_iir & GT_BSD_USER_INTERRUPT)
1311
		notify_ring(dev_priv->engine[VCS]);
1312
	if (gt_iir & GT_BLT_USER_INTERRUPT)
1313
		notify_ring(dev_priv->engine[BCS]);
1314

1315 1316
	if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
		      GT_BSD_CS_ERROR_INTERRUPT |
1317 1318
		      GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
		DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
1319

1320 1321
	if (gt_iir & GT_PARITY_ERROR(dev_priv))
		ivybridge_parity_error_irq_handler(dev_priv, gt_iir);
1322 1323
}

1324
static void
1325
gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
1326
{
1327
	struct intel_engine_execlists * const execlists = &engine->execlists;
1328
	bool tasklet = false;
1329 1330

	if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift)) {
1331
		if (port_count(&execlists->port[0])) {
1332
			__set_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
1333 1334
			tasklet = true;
		}
1335
	}
1336 1337 1338

	if (iir & (GT_RENDER_USER_INTERRUPT << test_shift)) {
		notify_ring(engine);
1339
		tasklet |= i915_modparams.enable_guc_submission;
1340 1341 1342
	}

	if (tasklet)
1343
		tasklet_hi_schedule(&execlists->irq_tasklet);
1344 1345
}

1346 1347 1348
static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv,
				   u32 master_ctl,
				   u32 gt_iir[4])
1349 1350 1351 1352
{
	irqreturn_t ret = IRQ_NONE;

	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1353 1354 1355
		gt_iir[0] = I915_READ_FW(GEN8_GT_IIR(0));
		if (gt_iir[0]) {
			I915_WRITE_FW(GEN8_GT_IIR(0), gt_iir[0]);
1356 1357 1358 1359 1360
			ret = IRQ_HANDLED;
		} else
			DRM_ERROR("The master control interrupt lied (GT0)!\n");
	}

1361
	if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1362 1363 1364
		gt_iir[1] = I915_READ_FW(GEN8_GT_IIR(1));
		if (gt_iir[1]) {
			I915_WRITE_FW(GEN8_GT_IIR(1), gt_iir[1]);
1365
			ret = IRQ_HANDLED;
1366
		} else
1367
			DRM_ERROR("The master control interrupt lied (GT1)!\n");
1368 1369
	}

1370
	if (master_ctl & GEN8_GT_VECS_IRQ) {
1371 1372 1373
		gt_iir[3] = I915_READ_FW(GEN8_GT_IIR(3));
		if (gt_iir[3]) {
			I915_WRITE_FW(GEN8_GT_IIR(3), gt_iir[3]);
1374 1375 1376 1377 1378
			ret = IRQ_HANDLED;
		} else
			DRM_ERROR("The master control interrupt lied (GT3)!\n");
	}

1379
	if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
1380
		gt_iir[2] = I915_READ_FW(GEN8_GT_IIR(2));
1381 1382
		if (gt_iir[2] & (dev_priv->pm_rps_events |
				 dev_priv->pm_guc_events)) {
1383
			I915_WRITE_FW(GEN8_GT_IIR(2),
1384 1385
				      gt_iir[2] & (dev_priv->pm_rps_events |
						   dev_priv->pm_guc_events));
1386
			ret = IRQ_HANDLED;
1387 1388 1389 1390
		} else
			DRM_ERROR("The master control interrupt lied (PM)!\n");
	}

1391 1392 1393
	return ret;
}

1394 1395 1396 1397
static void gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
				u32 gt_iir[4])
{
	if (gt_iir[0]) {
1398
		gen8_cs_irq_handler(dev_priv->engine[RCS],
1399
				    gt_iir[0], GEN8_RCS_IRQ_SHIFT);
1400
		gen8_cs_irq_handler(dev_priv->engine[BCS],
1401 1402 1403 1404
				    gt_iir[0], GEN8_BCS_IRQ_SHIFT);
	}

	if (gt_iir[1]) {
1405
		gen8_cs_irq_handler(dev_priv->engine[VCS],
1406
				    gt_iir[1], GEN8_VCS1_IRQ_SHIFT);
1407
		gen8_cs_irq_handler(dev_priv->engine[VCS2],
1408 1409 1410 1411
				    gt_iir[1], GEN8_VCS2_IRQ_SHIFT);
	}

	if (gt_iir[3])
1412
		gen8_cs_irq_handler(dev_priv->engine[VECS],
1413 1414 1415 1416
				    gt_iir[3], GEN8_VECS_IRQ_SHIFT);

	if (gt_iir[2] & dev_priv->pm_rps_events)
		gen6_rps_irq_handler(dev_priv, gt_iir[2]);
1417 1418 1419

	if (gt_iir[2] & dev_priv->pm_guc_events)
		gen9_guc_irq_handler(dev_priv, gt_iir[2]);
1420 1421
}

1422 1423 1424 1425
static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
{
	switch (port) {
	case PORT_A:
1426
		return val & PORTA_HOTPLUG_LONG_DETECT;
1427 1428 1429 1430 1431 1432 1433 1434 1435
	case PORT_B:
		return val & PORTB_HOTPLUG_LONG_DETECT;
	case PORT_C:
		return val & PORTC_HOTPLUG_LONG_DETECT;
	default:
		return false;
	}
}

1436 1437 1438 1439 1440 1441 1442 1443 1444 1445
static bool spt_port_hotplug2_long_detect(enum port port, u32 val)
{
	switch (port) {
	case PORT_E:
		return val & PORTE_HOTPLUG_LONG_DETECT;
	default:
		return false;
	}
}

1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461
static bool spt_port_hotplug_long_detect(enum port port, u32 val)
{
	switch (port) {
	case PORT_A:
		return val & PORTA_HOTPLUG_LONG_DETECT;
	case PORT_B:
		return val & PORTB_HOTPLUG_LONG_DETECT;
	case PORT_C:
		return val & PORTC_HOTPLUG_LONG_DETECT;
	case PORT_D:
		return val & PORTD_HOTPLUG_LONG_DETECT;
	default:
		return false;
	}
}

1462 1463 1464 1465 1466 1467 1468 1469 1470 1471
static bool ilk_port_hotplug_long_detect(enum port port, u32 val)
{
	switch (port) {
	case PORT_A:
		return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
	default:
		return false;
	}
}

1472
static bool pch_port_hotplug_long_detect(enum port port, u32 val)
1473 1474 1475
{
	switch (port) {
	case PORT_B:
1476
		return val & PORTB_HOTPLUG_LONG_DETECT;
1477
	case PORT_C:
1478
		return val & PORTC_HOTPLUG_LONG_DETECT;
1479
	case PORT_D:
1480 1481 1482
		return val & PORTD_HOTPLUG_LONG_DETECT;
	default:
		return false;
1483 1484 1485
	}
}

1486
static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
1487 1488 1489
{
	switch (port) {
	case PORT_B:
1490
		return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1491
	case PORT_C:
1492
		return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1493
	case PORT_D:
1494 1495 1496
		return val & PORTD_HOTPLUG_INT_LONG_PULSE;
	default:
		return false;
1497 1498 1499
	}
}

1500 1501 1502 1503 1504 1505 1506
/*
 * Get a bit mask of pins that have triggered, and which ones may be long.
 * This can be called multiple times with the same masks to accumulate
 * hotplug detection results from several registers.
 *
 * Note that the caller is expected to zero out the masks initially.
 */
1507
static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
1508
			     u32 hotplug_trigger, u32 dig_hotplug_reg,
1509 1510
			     const u32 hpd[HPD_NUM_PINS],
			     bool long_pulse_detect(enum port port, u32 val))
1511
{
1512
	enum port port;
1513 1514 1515
	int i;

	for_each_hpd_pin(i) {
1516 1517
		if ((hpd[i] & hotplug_trigger) == 0)
			continue;
1518

1519 1520
		*pin_mask |= BIT(i);

1521 1522
		port = intel_hpd_pin_to_port(i);
		if (port == PORT_NONE)
1523 1524
			continue;

1525
		if (long_pulse_detect(port, dig_hotplug_reg))
1526
			*long_mask |= BIT(i);
1527 1528 1529 1530 1531 1532 1533
	}

	DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
			 hotplug_trigger, dig_hotplug_reg, *pin_mask);

}

1534
static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
1535
{
1536
	wake_up_all(&dev_priv->gmbus_wait_queue);
1537 1538
}

1539
static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
1540
{
1541
	wake_up_all(&dev_priv->gmbus_wait_queue);
1542 1543
}

1544
#if defined(CONFIG_DEBUG_FS)
1545 1546
static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
					 enum pipe pipe,
1547 1548 1549
					 uint32_t crc0, uint32_t crc1,
					 uint32_t crc2, uint32_t crc3,
					 uint32_t crc4)
1550 1551 1552
{
	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
	struct intel_pipe_crc_entry *entry;
T
Tomeu Vizoso 已提交
1553 1554 1555
	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
	struct drm_driver *driver = dev_priv->drm.driver;
	uint32_t crcs[5];
1556
	int head, tail;
1557

1558
	spin_lock(&pipe_crc->lock);
T
Tomeu Vizoso 已提交
1559 1560 1561 1562 1563 1564
	if (pipe_crc->source) {
		if (!pipe_crc->entries) {
			spin_unlock(&pipe_crc->lock);
			DRM_DEBUG_KMS("spurious interrupt\n");
			return;
		}
1565

T
Tomeu Vizoso 已提交
1566 1567
		head = pipe_crc->head;
		tail = pipe_crc->tail;
1568

T
Tomeu Vizoso 已提交
1569 1570 1571 1572 1573
		if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
			spin_unlock(&pipe_crc->lock);
			DRM_ERROR("CRC buffer overflowing\n");
			return;
		}
1574

T
Tomeu Vizoso 已提交
1575
		entry = &pipe_crc->entries[head];
1576

T
Tomeu Vizoso 已提交
1577 1578 1579 1580 1581 1582
		entry->frame = driver->get_vblank_counter(&dev_priv->drm, pipe);
		entry->crc[0] = crc0;
		entry->crc[1] = crc1;
		entry->crc[2] = crc2;
		entry->crc[3] = crc3;
		entry->crc[4] = crc4;
1583

T
Tomeu Vizoso 已提交
1584 1585
		head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
		pipe_crc->head = head;
1586

T
Tomeu Vizoso 已提交
1587
		spin_unlock(&pipe_crc->lock);
1588

T
Tomeu Vizoso 已提交
1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610
		wake_up_interruptible(&pipe_crc->wq);
	} else {
		/*
		 * For some not yet identified reason, the first CRC is
		 * bonkers. So let's just wait for the next vblank and read
		 * out the buggy result.
		 *
		 * On CHV sometimes the second CRC is bonkers as well, so
		 * don't trust that one either.
		 */
		if (pipe_crc->skipped == 0 ||
		    (IS_CHERRYVIEW(dev_priv) && pipe_crc->skipped == 1)) {
			pipe_crc->skipped++;
			spin_unlock(&pipe_crc->lock);
			return;
		}
		spin_unlock(&pipe_crc->lock);
		crcs[0] = crc0;
		crcs[1] = crc1;
		crcs[2] = crc2;
		crcs[3] = crc3;
		crcs[4] = crc4;
1611
		drm_crtc_add_crc_entry(&crtc->base, true,
1612
				       drm_crtc_accurate_vblank_count(&crtc->base),
1613
				       crcs);
T
Tomeu Vizoso 已提交
1614
	}
1615
}
1616 1617
#else
static inline void
1618 1619
display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
			     enum pipe pipe,
1620 1621 1622 1623 1624
			     uint32_t crc0, uint32_t crc1,
			     uint32_t crc2, uint32_t crc3,
			     uint32_t crc4) {}
#endif

1625

1626 1627
static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
				     enum pipe pipe)
D
Daniel Vetter 已提交
1628
{
1629
	display_pipe_crc_irq_handler(dev_priv, pipe,
1630 1631
				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
				     0, 0, 0, 0);
D
Daniel Vetter 已提交
1632 1633
}

1634 1635
static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
				     enum pipe pipe)
1636
{
1637
	display_pipe_crc_irq_handler(dev_priv, pipe,
1638 1639 1640 1641 1642
				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
				     I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
				     I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
				     I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
				     I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1643
}
1644

1645 1646
static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
				      enum pipe pipe)
1647
{
1648 1649
	uint32_t res1, res2;

1650
	if (INTEL_GEN(dev_priv) >= 3)
1651 1652 1653 1654
		res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
	else
		res1 = 0;

1655
	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1656 1657 1658
		res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
	else
		res2 = 0;
1659

1660
	display_pipe_crc_irq_handler(dev_priv, pipe,
1661 1662 1663 1664
				     I915_READ(PIPE_CRC_RES_RED(pipe)),
				     I915_READ(PIPE_CRC_RES_GREEN(pipe)),
				     I915_READ(PIPE_CRC_RES_BLUE(pipe)),
				     res1, res2);
1665
}
1666

1667 1668 1669 1670
/* The RPS events need forcewake, so we add them to a work queue and mask their
 * IMR bits until the work is done. Other interrupts can be processed without
 * the work queue. */
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1671
{
1672
	if (pm_iir & dev_priv->pm_rps_events) {
1673
		spin_lock(&dev_priv->irq_lock);
1674
		gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
I
Imre Deak 已提交
1675 1676
		if (dev_priv->rps.interrupts_enabled) {
			dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1677
			schedule_work(&dev_priv->rps.work);
I
Imre Deak 已提交
1678
		}
1679
		spin_unlock(&dev_priv->irq_lock);
1680 1681
	}

1682
	if (INTEL_GEN(dev_priv) >= 8)
1683 1684
		return;

1685
	if (HAS_VEBOX(dev_priv)) {
1686
		if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1687
			notify_ring(dev_priv->engine[VECS]);
B
Ben Widawsky 已提交
1688

1689 1690
		if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
			DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
B
Ben Widawsky 已提交
1691
	}
1692 1693
}

1694 1695 1696
static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir)
{
	if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT) {
1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709
		/* Sample the log buffer flush related bits & clear them out now
		 * itself from the message identity register to minimize the
		 * probability of losing a flush interrupt, when there are back
		 * to back flush interrupts.
		 * There can be a new flush interrupt, for different log buffer
		 * type (like for ISR), whilst Host is handling one (for DPC).
		 * Since same bit is used in message register for ISR & DPC, it
		 * could happen that GuC sets the bit for 2nd interrupt but Host
		 * clears out the bit on handling the 1st interrupt.
		 */
		u32 msg, flush;

		msg = I915_READ(SOFT_SCRATCH(15));
1710 1711
		flush = msg & (INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED |
			       INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER);
1712 1713 1714 1715 1716
		if (flush) {
			/* Clear the message bits that are handled */
			I915_WRITE(SOFT_SCRATCH(15), msg & ~flush);

			/* Handle flush interrupt in bottom half */
1717 1718
			queue_work(dev_priv->guc.log.runtime.flush_wq,
				   &dev_priv->guc.log.runtime.flush_work);
1719 1720

			dev_priv->guc.log.flush_interrupt_count++;
1721 1722 1723 1724 1725
		} else {
			/* Not clearing of unhandled event bits won't result in
			 * re-triggering of the interrupt.
			 */
		}
1726 1727 1728
	}
}

1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741
static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
{
	enum pipe pipe;

	for_each_pipe(dev_priv, pipe) {
		I915_WRITE(PIPESTAT(pipe),
			   PIPESTAT_INT_STATUS_MASK |
			   PIPE_FIFO_UNDERRUN_STATUS);

		dev_priv->pipestat_irq_mask[pipe] = 0;
	}
}

1742 1743
static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
				  u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1744 1745 1746
{
	int pipe;

1747
	spin_lock(&dev_priv->irq_lock);
1748 1749 1750 1751 1752 1753

	if (!dev_priv->display_irqs_enabled) {
		spin_unlock(&dev_priv->irq_lock);
		return;
	}

1754
	for_each_pipe(dev_priv, pipe) {
1755
		i915_reg_t reg;
1756
		u32 status_mask, enable_mask, iir_bit = 0;
1757

1758 1759 1760 1761 1762 1763 1764
		/*
		 * PIPESTAT bits get signalled even when the interrupt is
		 * disabled with the mask bits, and some of the status bits do
		 * not generate interrupts at all (like the underrun bit). Hence
		 * we need to be careful that we only handle what we want to
		 * handle.
		 */
1765 1766

		/* fifo underruns are filterered in the underrun handler. */
1767
		status_mask = PIPE_FIFO_UNDERRUN_STATUS;
1768 1769 1770 1771 1772 1773 1774 1775

		switch (pipe) {
		case PIPE_A:
			iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
			break;
		case PIPE_B:
			iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
			break;
1776 1777 1778
		case PIPE_C:
			iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
			break;
1779 1780
		}
		if (iir & iir_bit)
1781
			status_mask |= dev_priv->pipestat_irq_mask[pipe];
1782

1783
		if (!status_mask)
1784 1785 1786
			continue;

		reg = PIPESTAT(pipe);
1787 1788
		pipe_stats[pipe] = I915_READ(reg) & status_mask;
		enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
1789 1790 1791 1792

		/*
		 * Clear the PIPE*STAT regs before the IIR
		 */
1793 1794
		if (pipe_stats[pipe])
			I915_WRITE(reg, enable_mask | pipe_stats[pipe]);
1795
	}
1796
	spin_unlock(&dev_priv->irq_lock);
1797 1798
}

1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866
static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
				      u16 iir, u32 pipe_stats[I915_MAX_PIPES])
{
	enum pipe pipe;

	for_each_pipe(dev_priv, pipe) {
		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
			drm_handle_vblank(&dev_priv->drm, pipe);

		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
			i9xx_pipe_crc_irq_handler(dev_priv, pipe);

		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
	}
}

static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
{
	bool blc_event = false;
	enum pipe pipe;

	for_each_pipe(dev_priv, pipe) {
		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
			drm_handle_vblank(&dev_priv->drm, pipe);

		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
			blc_event = true;

		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
			i9xx_pipe_crc_irq_handler(dev_priv, pipe);

		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
	}

	if (blc_event || (iir & I915_ASLE_INTERRUPT))
		intel_opregion_asle_intr(dev_priv);
}

static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
{
	bool blc_event = false;
	enum pipe pipe;

	for_each_pipe(dev_priv, pipe) {
		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
			drm_handle_vblank(&dev_priv->drm, pipe);

		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
			blc_event = true;

		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
			i9xx_pipe_crc_irq_handler(dev_priv, pipe);

		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
	}

	if (blc_event || (iir & I915_ASLE_INTERRUPT))
		intel_opregion_asle_intr(dev_priv);

	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
		gmbus_irq_handler(dev_priv);
}

1867
static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1868 1869 1870
					    u32 pipe_stats[I915_MAX_PIPES])
{
	enum pipe pipe;
1871

1872
	for_each_pipe(dev_priv, pipe) {
1873 1874
		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
			drm_handle_vblank(&dev_priv->drm, pipe);
1875 1876

		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1877
			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1878

1879 1880
		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1881 1882 1883
	}

	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1884
		gmbus_irq_handler(dev_priv);
1885 1886
}

1887
static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
1888 1889 1890
{
	u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);

1891 1892
	if (hotplug_status)
		I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1893

1894 1895 1896
	return hotplug_status;
}

1897
static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1898 1899 1900
				 u32 hotplug_status)
{
	u32 pin_mask = 0, long_mask = 0;
1901

1902 1903
	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
	    IS_CHERRYVIEW(dev_priv)) {
1904
		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1905

1906 1907 1908 1909 1910
		if (hotplug_trigger) {
			intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
					   hotplug_trigger, hpd_status_g4x,
					   i9xx_port_hotplug_long_detect);

1911
			intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1912
		}
1913 1914

		if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1915
			dp_aux_irq_handler(dev_priv);
1916 1917
	} else {
		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1918

1919 1920
		if (hotplug_trigger) {
			intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1921
					   hotplug_trigger, hpd_status_i915,
1922
					   i9xx_port_hotplug_long_detect);
1923
			intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1924
		}
1925
	}
1926 1927
}

1928
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
J
Jesse Barnes 已提交
1929
{
1930
	struct drm_device *dev = arg;
1931
	struct drm_i915_private *dev_priv = to_i915(dev);
J
Jesse Barnes 已提交
1932 1933
	irqreturn_t ret = IRQ_NONE;

1934 1935 1936
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

1937 1938 1939
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
	disable_rpm_wakeref_asserts(dev_priv);

1940
	do {
1941
		u32 iir, gt_iir, pm_iir;
1942
		u32 pipe_stats[I915_MAX_PIPES] = {};
1943
		u32 hotplug_status = 0;
1944
		u32 ier = 0;
1945

J
Jesse Barnes 已提交
1946 1947
		gt_iir = I915_READ(GTIIR);
		pm_iir = I915_READ(GEN6_PMIIR);
1948
		iir = I915_READ(VLV_IIR);
J
Jesse Barnes 已提交
1949 1950

		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1951
			break;
J
Jesse Barnes 已提交
1952 1953 1954

		ret = IRQ_HANDLED;

1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967
		/*
		 * Theory on interrupt generation, based on empirical evidence:
		 *
		 * x = ((VLV_IIR & VLV_IER) ||
		 *      (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
		 *       (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
		 *
		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
		 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
		 * guarantee the CPU interrupt will be raised again even if we
		 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
		 * bits this time around.
		 */
1968
		I915_WRITE(VLV_MASTER_IER, 0);
1969 1970
		ier = I915_READ(VLV_IER);
		I915_WRITE(VLV_IER, 0);
1971 1972 1973 1974 1975 1976

		if (gt_iir)
			I915_WRITE(GTIIR, gt_iir);
		if (pm_iir)
			I915_WRITE(GEN6_PMIIR, pm_iir);

1977
		if (iir & I915_DISPLAY_PORT_INTERRUPT)
1978
			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1979

1980 1981
		/* Call regardless, as some status bits might not be
		 * signalled in iir */
1982
		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1983

1984 1985 1986 1987
		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
			   I915_LPE_PIPE_B_INTERRUPT))
			intel_lpe_audio_irq_handler(dev_priv);

1988 1989 1990 1991 1992 1993
		/*
		 * VLV_IIR is single buffered, and reflects the level
		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
		 */
		if (iir)
			I915_WRITE(VLV_IIR, iir);
1994

1995
		I915_WRITE(VLV_IER, ier);
1996 1997
		I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
		POSTING_READ(VLV_MASTER_IER);
1998

1999
		if (gt_iir)
2000
			snb_gt_irq_handler(dev_priv, gt_iir);
2001 2002 2003
		if (pm_iir)
			gen6_rps_irq_handler(dev_priv, pm_iir);

2004
		if (hotplug_status)
2005
			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
2006

2007
		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
2008
	} while (0);
J
Jesse Barnes 已提交
2009

2010 2011
	enable_rpm_wakeref_asserts(dev_priv);

J
Jesse Barnes 已提交
2012 2013 2014
	return ret;
}

2015 2016
static irqreturn_t cherryview_irq_handler(int irq, void *arg)
{
2017
	struct drm_device *dev = arg;
2018
	struct drm_i915_private *dev_priv = to_i915(dev);
2019 2020
	irqreturn_t ret = IRQ_NONE;

2021 2022 2023
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

2024 2025 2026
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
	disable_rpm_wakeref_asserts(dev_priv);

2027
	do {
2028
		u32 master_ctl, iir;
2029
		u32 gt_iir[4] = {};
2030
		u32 pipe_stats[I915_MAX_PIPES] = {};
2031
		u32 hotplug_status = 0;
2032 2033
		u32 ier = 0;

2034 2035
		master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
		iir = I915_READ(VLV_IIR);
2036

2037 2038
		if (master_ctl == 0 && iir == 0)
			break;
2039

2040 2041
		ret = IRQ_HANDLED;

2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054
		/*
		 * Theory on interrupt generation, based on empirical evidence:
		 *
		 * x = ((VLV_IIR & VLV_IER) ||
		 *      ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
		 *       (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
		 *
		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
		 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
		 * guarantee the CPU interrupt will be raised again even if we
		 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
		 * bits this time around.
		 */
2055
		I915_WRITE(GEN8_MASTER_IRQ, 0);
2056 2057
		ier = I915_READ(VLV_IER);
		I915_WRITE(VLV_IER, 0);
2058

2059
		gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
2060

2061
		if (iir & I915_DISPLAY_PORT_INTERRUPT)
2062
			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
2063

2064 2065
		/* Call regardless, as some status bits might not be
		 * signalled in iir */
2066
		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
2067

2068 2069 2070 2071 2072
		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
			   I915_LPE_PIPE_B_INTERRUPT |
			   I915_LPE_PIPE_C_INTERRUPT))
			intel_lpe_audio_irq_handler(dev_priv);

2073 2074 2075 2076 2077 2078 2079
		/*
		 * VLV_IIR is single buffered, and reflects the level
		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
		 */
		if (iir)
			I915_WRITE(VLV_IIR, iir);

2080
		I915_WRITE(VLV_IER, ier);
2081
		I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2082
		POSTING_READ(GEN8_MASTER_IRQ);
2083

2084 2085
		gen8_gt_irq_handler(dev_priv, gt_iir);

2086
		if (hotplug_status)
2087
			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
2088

2089
		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
2090
	} while (0);
2091

2092 2093
	enable_rpm_wakeref_asserts(dev_priv);

2094 2095 2096
	return ret;
}

2097 2098
static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
				u32 hotplug_trigger,
2099 2100 2101 2102
				const u32 hpd[HPD_NUM_PINS])
{
	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;

2103 2104 2105 2106 2107 2108
	/*
	 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
	 * unless we touch the hotplug register, even if hotplug_trigger is
	 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
	 * errors.
	 */
2109
	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2110 2111 2112 2113 2114 2115 2116 2117
	if (!hotplug_trigger) {
		u32 mask = PORTA_HOTPLUG_STATUS_MASK |
			PORTD_HOTPLUG_STATUS_MASK |
			PORTC_HOTPLUG_STATUS_MASK |
			PORTB_HOTPLUG_STATUS_MASK;
		dig_hotplug_reg &= ~mask;
	}

2118
	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2119 2120
	if (!hotplug_trigger)
		return;
2121 2122 2123 2124 2125

	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
			   dig_hotplug_reg, hpd,
			   pch_port_hotplug_long_detect);

2126
	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2127 2128
}

2129
static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2130
{
2131
	int pipe;
2132
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
2133

2134
	ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
2135

2136 2137 2138
	if (pch_iir & SDE_AUDIO_POWER_MASK) {
		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
			       SDE_AUDIO_POWER_SHIFT);
2139
		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
2140 2141
				 port_name(port));
	}
2142

2143
	if (pch_iir & SDE_AUX_MASK)
2144
		dp_aux_irq_handler(dev_priv);
2145

2146
	if (pch_iir & SDE_GMBUS)
2147
		gmbus_irq_handler(dev_priv);
2148 2149 2150 2151 2152 2153 2154 2155 2156 2157

	if (pch_iir & SDE_AUDIO_HDCP_MASK)
		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");

	if (pch_iir & SDE_AUDIO_TRANS_MASK)
		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");

	if (pch_iir & SDE_POISON)
		DRM_ERROR("PCH poison interrupt\n");

2158
	if (pch_iir & SDE_FDI_MASK)
2159
		for_each_pipe(dev_priv, pipe)
2160 2161 2162
			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
					 pipe_name(pipe),
					 I915_READ(FDI_RX_IIR(pipe)));
2163 2164 2165 2166 2167 2168 2169 2170

	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");

	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");

	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
2171
		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
2172 2173

	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
2174
		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
2175 2176
}

2177
static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
2178 2179
{
	u32 err_int = I915_READ(GEN7_ERR_INT);
D
Daniel Vetter 已提交
2180
	enum pipe pipe;
2181

2182 2183 2184
	if (err_int & ERR_INT_POISON)
		DRM_ERROR("Poison interrupt\n");

2185
	for_each_pipe(dev_priv, pipe) {
2186 2187
		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2188

D
Daniel Vetter 已提交
2189
		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
2190 2191
			if (IS_IVYBRIDGE(dev_priv))
				ivb_pipe_crc_irq_handler(dev_priv, pipe);
D
Daniel Vetter 已提交
2192
			else
2193
				hsw_pipe_crc_irq_handler(dev_priv, pipe);
D
Daniel Vetter 已提交
2194 2195
		}
	}
2196

2197 2198 2199
	I915_WRITE(GEN7_ERR_INT, err_int);
}

2200
static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
2201 2202 2203
{
	u32 serr_int = I915_READ(SERR_INT);

2204 2205 2206
	if (serr_int & SERR_INT_POISON)
		DRM_ERROR("PCH poison interrupt\n");

2207
	if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
2208
		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
2209 2210

	if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
2211
		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
2212 2213

	if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
2214
		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_C);
2215 2216

	I915_WRITE(SERR_INT, serr_int);
2217 2218
}

2219
static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2220 2221
{
	int pipe;
2222
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
2223

2224
	ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
2225

2226 2227 2228 2229 2230 2231
	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
			       SDE_AUDIO_POWER_SHIFT_CPT);
		DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
				 port_name(port));
	}
2232 2233

	if (pch_iir & SDE_AUX_MASK_CPT)
2234
		dp_aux_irq_handler(dev_priv);
2235 2236

	if (pch_iir & SDE_GMBUS_CPT)
2237
		gmbus_irq_handler(dev_priv);
2238 2239 2240 2241 2242 2243 2244 2245

	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
		DRM_DEBUG_DRIVER("Audio CP request interrupt\n");

	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
		DRM_DEBUG_DRIVER("Audio CP change interrupt\n");

	if (pch_iir & SDE_FDI_MASK_CPT)
2246
		for_each_pipe(dev_priv, pipe)
2247 2248 2249
			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
					 pipe_name(pipe),
					 I915_READ(FDI_RX_IIR(pipe)));
2250 2251

	if (pch_iir & SDE_ERROR_CPT)
2252
		cpt_serr_int_handler(dev_priv);
2253 2254
}

2255
static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269
{
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
		~SDE_PORTE_HOTPLUG_SPT;
	u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
	u32 pin_mask = 0, long_mask = 0;

	if (hotplug_trigger) {
		u32 dig_hotplug_reg;

		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
		I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);

		intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
				   dig_hotplug_reg, hpd_spt,
2270
				   spt_port_hotplug_long_detect);
2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284
	}

	if (hotplug2_trigger) {
		u32 dig_hotplug_reg;

		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
		I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);

		intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger,
				   dig_hotplug_reg, hpd_spt,
				   spt_port_hotplug2_long_detect);
	}

	if (pin_mask)
2285
		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2286 2287

	if (pch_iir & SDE_GMBUS_CPT)
2288
		gmbus_irq_handler(dev_priv);
2289 2290
}

2291 2292
static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
				u32 hotplug_trigger,
2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303
				const u32 hpd[HPD_NUM_PINS])
{
	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;

	dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);

	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
			   dig_hotplug_reg, hpd,
			   ilk_port_hotplug_long_detect);

2304
	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2305 2306
}

2307 2308
static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
				    u32 de_iir)
2309
{
2310
	enum pipe pipe;
2311 2312
	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;

2313
	if (hotplug_trigger)
2314
		ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk);
2315 2316

	if (de_iir & DE_AUX_CHANNEL_A)
2317
		dp_aux_irq_handler(dev_priv);
2318 2319

	if (de_iir & DE_GSE)
2320
		intel_opregion_asle_intr(dev_priv);
2321 2322 2323 2324

	if (de_iir & DE_POISON)
		DRM_ERROR("Poison interrupt\n");

2325
	for_each_pipe(dev_priv, pipe) {
2326 2327
		if (de_iir & DE_PIPE_VBLANK(pipe))
			drm_handle_vblank(&dev_priv->drm, pipe);
2328

2329
		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2330
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2331

2332
		if (de_iir & DE_PIPE_CRC_DONE(pipe))
2333
			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2334 2335 2336 2337 2338 2339
	}

	/* check event from PCH */
	if (de_iir & DE_PCH_EVENT) {
		u32 pch_iir = I915_READ(SDEIIR);

2340 2341
		if (HAS_PCH_CPT(dev_priv))
			cpt_irq_handler(dev_priv, pch_iir);
2342
		else
2343
			ibx_irq_handler(dev_priv, pch_iir);
2344 2345 2346 2347 2348

		/* should clear PCH hotplug event before clear CPU irq */
		I915_WRITE(SDEIIR, pch_iir);
	}

2349 2350
	if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT)
		ironlake_rps_change_irq_handler(dev_priv);
2351 2352
}

2353 2354
static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
				    u32 de_iir)
2355
{
2356
	enum pipe pipe;
2357 2358
	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;

2359
	if (hotplug_trigger)
2360
		ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb);
2361 2362

	if (de_iir & DE_ERR_INT_IVB)
2363
		ivb_err_int_handler(dev_priv);
2364 2365

	if (de_iir & DE_AUX_CHANNEL_A_IVB)
2366
		dp_aux_irq_handler(dev_priv);
2367 2368

	if (de_iir & DE_GSE_IVB)
2369
		intel_opregion_asle_intr(dev_priv);
2370

2371
	for_each_pipe(dev_priv, pipe) {
2372 2373
		if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
			drm_handle_vblank(&dev_priv->drm, pipe);
2374 2375 2376
	}

	/* check event from PCH */
2377
	if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
2378 2379
		u32 pch_iir = I915_READ(SDEIIR);

2380
		cpt_irq_handler(dev_priv, pch_iir);
2381 2382 2383 2384 2385 2386

		/* clear PCH hotplug event before clear CPU irq */
		I915_WRITE(SDEIIR, pch_iir);
	}
}

2387 2388 2389 2390 2391 2392 2393 2394
/*
 * To handle irqs with the minimum potential races with fresh interrupts, we:
 * 1 - Disable Master Interrupt Control.
 * 2 - Find the source(s) of the interrupt.
 * 3 - Clear the Interrupt Identity bits (IIR).
 * 4 - Process the interrupt(s) that had bits set in the IIRs.
 * 5 - Re-enable Master Interrupt Control.
 */
2395
static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2396
{
2397
	struct drm_device *dev = arg;
2398
	struct drm_i915_private *dev_priv = to_i915(dev);
2399
	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2400
	irqreturn_t ret = IRQ_NONE;
2401

2402 2403 2404
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

2405 2406 2407
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
	disable_rpm_wakeref_asserts(dev_priv);

2408 2409 2410
	/* disable master interrupt before clearing iir  */
	de_ier = I915_READ(DEIER);
	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2411
	POSTING_READ(DEIER);
2412

2413 2414 2415 2416 2417
	/* Disable south interrupts. We'll only write to SDEIIR once, so further
	 * interrupts will will be stored on its back queue, and then we'll be
	 * able to process them after we restore SDEIER (as soon as we restore
	 * it, we'll get an interrupt if SDEIIR still has something to process
	 * due to its back queue). */
2418
	if (!HAS_PCH_NOP(dev_priv)) {
2419 2420 2421 2422
		sde_ier = I915_READ(SDEIER);
		I915_WRITE(SDEIER, 0);
		POSTING_READ(SDEIER);
	}
2423

2424 2425
	/* Find, clear, then process each source of interrupt */

2426
	gt_iir = I915_READ(GTIIR);
2427
	if (gt_iir) {
2428 2429
		I915_WRITE(GTIIR, gt_iir);
		ret = IRQ_HANDLED;
2430
		if (INTEL_GEN(dev_priv) >= 6)
2431
			snb_gt_irq_handler(dev_priv, gt_iir);
2432
		else
2433
			ilk_gt_irq_handler(dev_priv, gt_iir);
2434 2435
	}

2436 2437
	de_iir = I915_READ(DEIIR);
	if (de_iir) {
2438 2439
		I915_WRITE(DEIIR, de_iir);
		ret = IRQ_HANDLED;
2440 2441
		if (INTEL_GEN(dev_priv) >= 7)
			ivb_display_irq_handler(dev_priv, de_iir);
2442
		else
2443
			ilk_display_irq_handler(dev_priv, de_iir);
2444 2445
	}

2446
	if (INTEL_GEN(dev_priv) >= 6) {
2447 2448 2449 2450
		u32 pm_iir = I915_READ(GEN6_PMIIR);
		if (pm_iir) {
			I915_WRITE(GEN6_PMIIR, pm_iir);
			ret = IRQ_HANDLED;
2451
			gen6_rps_irq_handler(dev_priv, pm_iir);
2452
		}
2453
	}
2454 2455 2456

	I915_WRITE(DEIER, de_ier);
	POSTING_READ(DEIER);
2457
	if (!HAS_PCH_NOP(dev_priv)) {
2458 2459 2460
		I915_WRITE(SDEIER, sde_ier);
		POSTING_READ(SDEIER);
	}
2461

2462 2463 2464
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
	enable_rpm_wakeref_asserts(dev_priv);

2465 2466 2467
	return ret;
}

2468 2469
static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
				u32 hotplug_trigger,
2470
				const u32 hpd[HPD_NUM_PINS])
2471
{
2472
	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2473

2474 2475
	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2476

2477
	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2478
			   dig_hotplug_reg, hpd,
2479
			   bxt_port_hotplug_long_detect);
2480

2481
	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2482 2483
}

2484 2485
static irqreturn_t
gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2486 2487
{
	irqreturn_t ret = IRQ_NONE;
2488
	u32 iir;
2489
	enum pipe pipe;
J
Jesse Barnes 已提交
2490

2491
	if (master_ctl & GEN8_DE_MISC_IRQ) {
2492 2493 2494
		iir = I915_READ(GEN8_DE_MISC_IIR);
		if (iir) {
			I915_WRITE(GEN8_DE_MISC_IIR, iir);
2495
			ret = IRQ_HANDLED;
2496
			if (iir & GEN8_DE_MISC_GSE)
2497
				intel_opregion_asle_intr(dev_priv);
2498 2499
			else
				DRM_ERROR("Unexpected DE Misc interrupt\n");
2500
		}
2501 2502
		else
			DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2503 2504
	}

2505
	if (master_ctl & GEN8_DE_PORT_IRQ) {
2506 2507 2508
		iir = I915_READ(GEN8_DE_PORT_IIR);
		if (iir) {
			u32 tmp_mask;
2509
			bool found = false;
2510

2511
			I915_WRITE(GEN8_DE_PORT_IIR, iir);
2512
			ret = IRQ_HANDLED;
J
Jesse Barnes 已提交
2513

2514
			tmp_mask = GEN8_AUX_CHANNEL_A;
2515
			if (INTEL_GEN(dev_priv) >= 9)
2516 2517 2518 2519 2520
				tmp_mask |= GEN9_AUX_CHANNEL_B |
					    GEN9_AUX_CHANNEL_C |
					    GEN9_AUX_CHANNEL_D;

			if (iir & tmp_mask) {
2521
				dp_aux_irq_handler(dev_priv);
2522 2523 2524
				found = true;
			}

2525
			if (IS_GEN9_LP(dev_priv)) {
2526 2527
				tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
				if (tmp_mask) {
2528 2529
					bxt_hpd_irq_handler(dev_priv, tmp_mask,
							    hpd_bxt);
2530 2531 2532 2533 2534
					found = true;
				}
			} else if (IS_BROADWELL(dev_priv)) {
				tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
				if (tmp_mask) {
2535 2536
					ilk_hpd_irq_handler(dev_priv,
							    tmp_mask, hpd_bdw);
2537 2538
					found = true;
				}
2539 2540
			}

2541
			if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
2542
				gmbus_irq_handler(dev_priv);
S
Shashank Sharma 已提交
2543 2544 2545
				found = true;
			}

2546
			if (!found)
2547
				DRM_ERROR("Unexpected DE Port interrupt\n");
2548
		}
2549 2550
		else
			DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2551 2552
	}

2553
	for_each_pipe(dev_priv, pipe) {
2554
		u32 fault_errors;
2555

2556 2557
		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
			continue;
2558

2559 2560 2561 2562 2563
		iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
		if (!iir) {
			DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
			continue;
		}
2564

2565 2566
		ret = IRQ_HANDLED;
		I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
2567

2568 2569
		if (iir & GEN8_PIPE_VBLANK)
			drm_handle_vblank(&dev_priv->drm, pipe);
2570

2571
		if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
2572
			hsw_pipe_crc_irq_handler(dev_priv, pipe);
2573

2574 2575
		if (iir & GEN8_PIPE_FIFO_UNDERRUN)
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2576

2577
		fault_errors = iir;
2578
		if (INTEL_GEN(dev_priv) >= 9)
2579 2580 2581
			fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
		else
			fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2582

2583
		if (fault_errors)
2584
			DRM_ERROR("Fault errors on pipe %c: 0x%08x\n",
2585 2586
				  pipe_name(pipe),
				  fault_errors);
2587 2588
	}

2589
	if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2590
	    master_ctl & GEN8_DE_PCH_IRQ) {
2591 2592 2593 2594 2595
		/*
		 * FIXME(BDW): Assume for now that the new interrupt handling
		 * scheme also closed the SDE interrupt handling race we've seen
		 * on older pch-split platforms. But this needs testing.
		 */
2596 2597 2598
		iir = I915_READ(SDEIIR);
		if (iir) {
			I915_WRITE(SDEIIR, iir);
2599
			ret = IRQ_HANDLED;
2600

2601 2602
			if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) ||
			    HAS_PCH_CNP(dev_priv))
2603
				spt_irq_handler(dev_priv, iir);
2604
			else
2605
				cpt_irq_handler(dev_priv, iir);
2606 2607 2608 2609 2610 2611 2612
		} else {
			/*
			 * Like on previous PCH there seems to be something
			 * fishy going on with forwarding PCH interrupts.
			 */
			DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
		}
2613 2614
	}

2615 2616 2617 2618 2619 2620
	return ret;
}

static irqreturn_t gen8_irq_handler(int irq, void *arg)
{
	struct drm_device *dev = arg;
2621
	struct drm_i915_private *dev_priv = to_i915(dev);
2622
	u32 master_ctl;
2623
	u32 gt_iir[4] = {};
2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639
	irqreturn_t ret;

	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

	master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
	master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
	if (!master_ctl)
		return IRQ_NONE;

	I915_WRITE_FW(GEN8_MASTER_IRQ, 0);

	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
	disable_rpm_wakeref_asserts(dev_priv);

	/* Find, clear, then process each source of interrupt */
2640 2641
	ret = gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
	gen8_gt_irq_handler(dev_priv, gt_iir);
2642 2643
	ret |= gen8_de_irq_handler(dev_priv, master_ctl);

2644 2645
	I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
	POSTING_READ_FW(GEN8_MASTER_IRQ);
2646

2647 2648
	enable_rpm_wakeref_asserts(dev_priv);

2649 2650 2651
	return ret;
}

2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691
struct wedge_me {
	struct delayed_work work;
	struct drm_i915_private *i915;
	const char *name;
};

static void wedge_me(struct work_struct *work)
{
	struct wedge_me *w = container_of(work, typeof(*w), work.work);

	dev_err(w->i915->drm.dev,
		"%s timed out, cancelling all in-flight rendering.\n",
		w->name);
	i915_gem_set_wedged(w->i915);
}

static void __init_wedge(struct wedge_me *w,
			 struct drm_i915_private *i915,
			 long timeout,
			 const char *name)
{
	w->i915 = i915;
	w->name = name;

	INIT_DELAYED_WORK_ONSTACK(&w->work, wedge_me);
	schedule_delayed_work(&w->work, timeout);
}

static void __fini_wedge(struct wedge_me *w)
{
	cancel_delayed_work_sync(&w->work);
	destroy_delayed_work_on_stack(&w->work);
	w->i915 = NULL;
}

#define i915_wedge_on_timeout(W, DEV, TIMEOUT)				\
	for (__init_wedge((W), (DEV), (TIMEOUT), __func__);		\
	     (W)->i915;							\
	     __fini_wedge((W)))

2692
/**
2693
 * i915_reset_device - do process context error handling work
2694
 * @dev_priv: i915 device private
2695 2696 2697 2698
 *
 * Fire an error uevent so userspace can see that a hang or error
 * was detected.
 */
2699
static void i915_reset_device(struct drm_i915_private *dev_priv)
2700
{
2701
	struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj;
2702 2703 2704
	char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
	char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
	char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2705
	struct wedge_me w;
2706

2707
	kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
2708

2709 2710 2711
	DRM_DEBUG_DRIVER("resetting chip\n");
	kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);

2712 2713 2714
	/* Use a watchdog to ensure that our reset completes */
	i915_wedge_on_timeout(&w, dev_priv, 5*HZ) {
		intel_prepare_reset(dev_priv);
2715

2716 2717 2718
		/* Signal that locked waiters should reset the GPU */
		set_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags);
		wake_up_all(&dev_priv->gpu_error.wait_queue);
2719

2720 2721
		/* Wait for anyone holding the lock to wakeup, without
		 * blocking indefinitely on struct_mutex.
2722
		 */
2723 2724
		do {
			if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
2725
				i915_reset(dev_priv, 0);
2726 2727 2728 2729 2730 2731
				mutex_unlock(&dev_priv->drm.struct_mutex);
			}
		} while (wait_on_bit_timeout(&dev_priv->gpu_error.flags,
					     I915_RESET_HANDOFF,
					     TASK_UNINTERRUPTIBLE,
					     1));
2732

2733 2734
		intel_finish_reset(dev_priv);
	}
2735

2736
	if (!test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
2737 2738
		kobject_uevent_env(kobj,
				   KOBJ_CHANGE, reset_done_event);
2739 2740
}

2741
static void i915_clear_error_registers(struct drm_i915_private *dev_priv)
2742
{
2743
	u32 eir;
2744

2745 2746
	if (!IS_GEN2(dev_priv))
		I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER));
2747

2748 2749 2750 2751
	if (INTEL_GEN(dev_priv) < 4)
		I915_WRITE(IPEIR, I915_READ(IPEIR));
	else
		I915_WRITE(IPEIR_I965, I915_READ(IPEIR_I965));
2752

2753
	I915_WRITE(EIR, I915_READ(EIR));
2754 2755 2756 2757 2758 2759
	eir = I915_READ(EIR);
	if (eir) {
		/*
		 * some errors might have become stuck,
		 * mask them.
		 */
2760
		DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
2761 2762 2763
		I915_WRITE(EMR, I915_READ(EMR) | eir);
		I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
	}
2764 2765 2766
}

/**
2767
 * i915_handle_error - handle a gpu error
2768
 * @dev_priv: i915 device private
2769
 * @engine_mask: mask representing engines that are hung
2770 2771
 * @fmt: Error message format string
 *
2772
 * Do some basic checking of register state at error time and
2773 2774 2775 2776 2777
 * dump it to the syslog.  Also call i915_capture_error_state() to make
 * sure we get a record and make it available in debugfs.  Fire a uevent
 * so userspace knows something bad happened (should trigger collection
 * of a ring dump etc.).
 */
2778 2779
void i915_handle_error(struct drm_i915_private *dev_priv,
		       u32 engine_mask,
2780
		       const char *fmt, ...)
2781
{
2782 2783
	struct intel_engine_cs *engine;
	unsigned int tmp;
2784 2785
	va_list args;
	char error_msg[80];
2786

2787 2788 2789 2790
	va_start(args, fmt);
	vscnprintf(error_msg, sizeof(error_msg), fmt, args);
	va_end(args);

2791 2792 2793 2794 2795 2796 2797 2798 2799
	/*
	 * In most cases it's guaranteed that we get here with an RPM
	 * reference held, for example because there is a pending GPU
	 * request that won't finish until the reset is done. This
	 * isn't the case at least when we get here by doing a
	 * simulated reset via debugfs, so get an RPM reference.
	 */
	intel_runtime_pm_get(dev_priv);

2800
	i915_capture_error_state(dev_priv, engine_mask, error_msg);
2801
	i915_clear_error_registers(dev_priv);
2802

2803 2804 2805 2806 2807 2808
	/*
	 * Try engine reset when available. We fall back to full reset if
	 * single reset fails.
	 */
	if (intel_has_reset_engine(dev_priv)) {
		for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
2809
			BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
2810 2811 2812 2813
			if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
					     &dev_priv->gpu_error.flags))
				continue;

2814
			if (i915_reset_engine(engine, 0) == 0)
2815 2816 2817 2818 2819 2820 2821 2822 2823
				engine_mask &= ~intel_engine_flag(engine);

			clear_bit(I915_RESET_ENGINE + engine->id,
				  &dev_priv->gpu_error.flags);
			wake_up_bit(&dev_priv->gpu_error.flags,
				    I915_RESET_ENGINE + engine->id);
		}
	}

2824
	if (!engine_mask)
2825
		goto out;
2826

2827
	/* Full reset needs the mutex, stop any other user trying to do so. */
2828 2829 2830 2831
	if (test_and_set_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags)) {
		wait_event(dev_priv->gpu_error.reset_queue,
			   !test_bit(I915_RESET_BACKOFF,
				     &dev_priv->gpu_error.flags));
2832
		goto out;
2833 2834
	}

2835 2836 2837 2838 2839 2840 2841 2842 2843
	/* Prevent any other reset-engine attempt. */
	for_each_engine(engine, dev_priv, tmp) {
		while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
					&dev_priv->gpu_error.flags))
			wait_on_bit(&dev_priv->gpu_error.flags,
				    I915_RESET_ENGINE + engine->id,
				    TASK_UNINTERRUPTIBLE);
	}

2844
	i915_reset_device(dev_priv);
2845

2846 2847 2848 2849 2850
	for_each_engine(engine, dev_priv, tmp) {
		clear_bit(I915_RESET_ENGINE + engine->id,
			  &dev_priv->gpu_error.flags);
	}

2851 2852
	clear_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags);
	wake_up_all(&dev_priv->gpu_error.reset_queue);
2853 2854 2855

out:
	intel_runtime_pm_put(dev_priv);
2856 2857
}

2858 2859 2860
/* Called from drm generic code, passed 'crtc' which
 * we use as a pipe index
 */
2861
static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe)
2862
{
2863
	struct drm_i915_private *dev_priv = to_i915(dev);
2864
	unsigned long irqflags;
2865

2866
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2867
	i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2868
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2869

2870 2871 2872
	return 0;
}

2873
static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe)
2874
{
2875
	struct drm_i915_private *dev_priv = to_i915(dev);
2876 2877 2878
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2879 2880
	i915_enable_pipestat(dev_priv, pipe,
			     PIPE_START_VBLANK_INTERRUPT_STATUS);
2881 2882 2883 2884 2885
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);

	return 0;
}

2886
static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
J
Jesse Barnes 已提交
2887
{
2888
	struct drm_i915_private *dev_priv = to_i915(dev);
J
Jesse Barnes 已提交
2889
	unsigned long irqflags;
2890
	uint32_t bit = INTEL_GEN(dev_priv) >= 7 ?
2891
		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
J
Jesse Barnes 已提交
2892 2893

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2894
	ilk_enable_display_irq(dev_priv, bit);
J
Jesse Barnes 已提交
2895 2896 2897 2898 2899
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);

	return 0;
}

2900
static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
2901
{
2902
	struct drm_i915_private *dev_priv = to_i915(dev);
2903 2904 2905
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2906
	bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2907
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2908

2909 2910 2911
	return 0;
}

2912 2913 2914
/* Called from drm generic code, passed 'crtc' which
 * we use as a pipe index
 */
2915
static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe)
2916
{
2917
	struct drm_i915_private *dev_priv = to_i915(dev);
2918
	unsigned long irqflags;
2919

2920
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2921
	i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2922 2923 2924
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

2925
static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe)
2926
{
2927
	struct drm_i915_private *dev_priv = to_i915(dev);
2928 2929 2930
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2931 2932
	i915_disable_pipestat(dev_priv, pipe,
			      PIPE_START_VBLANK_INTERRUPT_STATUS);
2933 2934 2935
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

2936
static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
J
Jesse Barnes 已提交
2937
{
2938
	struct drm_i915_private *dev_priv = to_i915(dev);
J
Jesse Barnes 已提交
2939
	unsigned long irqflags;
2940
	uint32_t bit = INTEL_GEN(dev_priv) >= 7 ?
2941
		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
J
Jesse Barnes 已提交
2942 2943

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2944
	ilk_disable_display_irq(dev_priv, bit);
J
Jesse Barnes 已提交
2945 2946 2947
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

2948
static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
2949
{
2950
	struct drm_i915_private *dev_priv = to_i915(dev);
2951 2952 2953
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2954
	bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2955 2956 2957
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

2958
static void ibx_irq_reset(struct drm_i915_private *dev_priv)
P
Paulo Zanoni 已提交
2959
{
2960
	if (HAS_PCH_NOP(dev_priv))
P
Paulo Zanoni 已提交
2961 2962
		return;

V
Ville Syrjälä 已提交
2963
	GEN3_IRQ_RESET(SDE);
2964

2965
	if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
2966
		I915_WRITE(SERR_INT, 0xffffffff);
P
Paulo Zanoni 已提交
2967
}
2968

P
Paulo Zanoni 已提交
2969 2970 2971 2972 2973 2974 2975 2976 2977 2978
/*
 * SDEIER is also touched by the interrupt handler to work around missed PCH
 * interrupts. Hence we can't update it after the interrupt handler is enabled -
 * instead we unconditionally enable all PCH interrupt sources here, but then
 * only unmask them as needed with SDEIMR.
 *
 * This function needs to be called before interrupts are enabled.
 */
static void ibx_irq_pre_postinstall(struct drm_device *dev)
{
2979
	struct drm_i915_private *dev_priv = to_i915(dev);
P
Paulo Zanoni 已提交
2980

2981
	if (HAS_PCH_NOP(dev_priv))
P
Paulo Zanoni 已提交
2982 2983 2984
		return;

	WARN_ON(I915_READ(SDEIER) != 0);
P
Paulo Zanoni 已提交
2985 2986 2987 2988
	I915_WRITE(SDEIER, 0xffffffff);
	POSTING_READ(SDEIER);
}

2989
static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv)
2990
{
V
Ville Syrjälä 已提交
2991
	GEN3_IRQ_RESET(GT);
2992
	if (INTEL_GEN(dev_priv) >= 6)
V
Ville Syrjälä 已提交
2993
		GEN3_IRQ_RESET(GEN6_PM);
2994 2995
}

2996 2997
static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
{
2998 2999 3000 3001 3002
	if (IS_CHERRYVIEW(dev_priv))
		I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
	else
		I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);

3003
	i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
3004 3005
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));

3006
	i9xx_pipestat_irq_reset(dev_priv);
3007

V
Ville Syrjälä 已提交
3008
	GEN3_IRQ_RESET(VLV_);
3009
	dev_priv->irq_mask = ~0;
3010 3011
}

3012 3013 3014
static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
{
	u32 pipestat_mask;
3015
	u32 enable_mask;
3016 3017
	enum pipe pipe;

3018
	pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
3019 3020 3021 3022 3023

	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
	for_each_pipe(dev_priv, pipe)
		i915_enable_pipestat(dev_priv, pipe, pipestat_mask);

3024 3025
	enable_mask = I915_DISPLAY_PORT_INTERRUPT |
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3026 3027 3028 3029
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
		I915_LPE_PIPE_A_INTERRUPT |
		I915_LPE_PIPE_B_INTERRUPT;

3030
	if (IS_CHERRYVIEW(dev_priv))
3031 3032
		enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
			I915_LPE_PIPE_C_INTERRUPT;
3033 3034 3035

	WARN_ON(dev_priv->irq_mask != ~0);

3036 3037
	dev_priv->irq_mask = ~enable_mask;

V
Ville Syrjälä 已提交
3038
	GEN3_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
3039 3040 3041 3042 3043 3044
}

/* drm_dma.h hooks
*/
static void ironlake_irq_reset(struct drm_device *dev)
{
3045
	struct drm_i915_private *dev_priv = to_i915(dev);
3046

3047 3048
	if (IS_GEN5(dev_priv))
		I915_WRITE(HWSTAM, 0xffffffff);
3049

V
Ville Syrjälä 已提交
3050
	GEN3_IRQ_RESET(DE);
3051
	if (IS_GEN7(dev_priv))
3052 3053
		I915_WRITE(GEN7_ERR_INT, 0xffffffff);

3054
	gen5_gt_irq_reset(dev_priv);
3055

3056
	ibx_irq_reset(dev_priv);
3057 3058
}

3059
static void valleyview_irq_reset(struct drm_device *dev)
J
Jesse Barnes 已提交
3060
{
3061
	struct drm_i915_private *dev_priv = to_i915(dev);
J
Jesse Barnes 已提交
3062

3063 3064 3065
	I915_WRITE(VLV_MASTER_IER, 0);
	POSTING_READ(VLV_MASTER_IER);

3066
	gen5_gt_irq_reset(dev_priv);
J
Jesse Barnes 已提交
3067

3068
	spin_lock_irq(&dev_priv->irq_lock);
3069 3070
	if (dev_priv->display_irqs_enabled)
		vlv_display_irq_reset(dev_priv);
3071
	spin_unlock_irq(&dev_priv->irq_lock);
J
Jesse Barnes 已提交
3072 3073
}

3074 3075 3076 3077 3078 3079 3080 3081
static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
{
	GEN8_IRQ_RESET_NDX(GT, 0);
	GEN8_IRQ_RESET_NDX(GT, 1);
	GEN8_IRQ_RESET_NDX(GT, 2);
	GEN8_IRQ_RESET_NDX(GT, 3);
}

P
Paulo Zanoni 已提交
3082
static void gen8_irq_reset(struct drm_device *dev)
3083
{
3084
	struct drm_i915_private *dev_priv = to_i915(dev);
3085 3086 3087 3088 3089
	int pipe;

	I915_WRITE(GEN8_MASTER_IRQ, 0);
	POSTING_READ(GEN8_MASTER_IRQ);

3090
	gen8_gt_irq_reset(dev_priv);
3091

3092
	for_each_pipe(dev_priv, pipe)
3093 3094
		if (intel_display_power_is_enabled(dev_priv,
						   POWER_DOMAIN_PIPE(pipe)))
3095
			GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3096

V
Ville Syrjälä 已提交
3097 3098 3099
	GEN3_IRQ_RESET(GEN8_DE_PORT_);
	GEN3_IRQ_RESET(GEN8_DE_MISC_);
	GEN3_IRQ_RESET(GEN8_PCU_);
3100

3101
	if (HAS_PCH_SPLIT(dev_priv))
3102
		ibx_irq_reset(dev_priv);
3103
}
3104

3105
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3106
				     u8 pipe_mask)
3107
{
3108
	uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3109
	enum pipe pipe;
3110

3111
	spin_lock_irq(&dev_priv->irq_lock);
3112 3113 3114 3115
	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
		GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
				  dev_priv->de_irq_mask[pipe],
				  ~dev_priv->de_irq_mask[pipe] | extra_ier);
3116
	spin_unlock_irq(&dev_priv->irq_lock);
3117 3118
}

3119
void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
3120
				     u8 pipe_mask)
3121
{
3122 3123
	enum pipe pipe;

3124
	spin_lock_irq(&dev_priv->irq_lock);
3125 3126
	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
		GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3127 3128 3129
	spin_unlock_irq(&dev_priv->irq_lock);

	/* make sure we're done processing display irqs */
3130
	synchronize_irq(dev_priv->drm.irq);
3131 3132
}

3133
static void cherryview_irq_reset(struct drm_device *dev)
3134
{
3135
	struct drm_i915_private *dev_priv = to_i915(dev);
3136 3137 3138 3139

	I915_WRITE(GEN8_MASTER_IRQ, 0);
	POSTING_READ(GEN8_MASTER_IRQ);

3140
	gen8_gt_irq_reset(dev_priv);
3141

V
Ville Syrjälä 已提交
3142
	GEN3_IRQ_RESET(GEN8_PCU_);
3143

3144
	spin_lock_irq(&dev_priv->irq_lock);
3145 3146
	if (dev_priv->display_irqs_enabled)
		vlv_display_irq_reset(dev_priv);
3147
	spin_unlock_irq(&dev_priv->irq_lock);
3148 3149
}

3150
static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
3151 3152 3153 3154 3155
				  const u32 hpd[HPD_NUM_PINS])
{
	struct intel_encoder *encoder;
	u32 enabled_irqs = 0;

3156
	for_each_intel_encoder(&dev_priv->drm, encoder)
3157 3158 3159 3160 3161 3162
		if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
			enabled_irqs |= hpd[encoder->hpd_pin];

	return enabled_irqs;
}

3163
static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
3164
{
3165
	u32 hotplug;
3166 3167 3168

	/*
	 * Enable digital hotplug on the PCH, and configure the DP short pulse
3169 3170
	 * duration to 2ms (which is the minimum in the Display Port spec).
	 * The pulse duration bits are reserved on LPT+.
3171
	 */
3172
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3173 3174 3175
	hotplug &= ~(PORTB_PULSE_DURATION_MASK |
		     PORTC_PULSE_DURATION_MASK |
		     PORTD_PULSE_DURATION_MASK);
3176
	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3177 3178
	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3179 3180 3181 3182
	/*
	 * When CPU and PCH are on the same package, port A
	 * HPD must be enabled in both north and south.
	 */
3183
	if (HAS_PCH_LPT_LP(dev_priv))
3184
		hotplug |= PORTA_HOTPLUG_ENABLE;
3185
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3186
}
X
Xiong Zhang 已提交
3187

3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204
static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug_irqs, enabled_irqs;

	if (HAS_PCH_IBX(dev_priv)) {
		hotplug_irqs = SDE_HOTPLUG_MASK;
		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
	} else {
		hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
	}

	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);

	ibx_hpd_detection_setup(dev_priv);
}

3205
static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3206
{
3207 3208 3209 3210 3211 3212 3213 3214 3215
	u32 val, hotplug;

	/* Display WA #1179 WaHardHangonHotPlug: cnp */
	if (HAS_PCH_CNP(dev_priv)) {
		val = I915_READ(SOUTH_CHICKEN1);
		val &= ~CHASSIS_CLK_REQ_DURATION_MASK;
		val |= CHASSIS_CLK_REQ_DURATION(0xf);
		I915_WRITE(SOUTH_CHICKEN1, val);
	}
3216 3217 3218

	/* Enable digital hotplug on the PCH */
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3219 3220 3221 3222
	hotplug |= PORTA_HOTPLUG_ENABLE |
		   PORTB_HOTPLUG_ENABLE |
		   PORTC_HOTPLUG_ENABLE |
		   PORTD_HOTPLUG_ENABLE;
3223 3224 3225 3226 3227
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);

	hotplug = I915_READ(PCH_PORT_HOTPLUG2);
	hotplug |= PORTE_HOTPLUG_ENABLE;
	I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3228 3229
}

3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241
static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug_irqs, enabled_irqs;

	hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);

	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);

	spt_hpd_detection_setup(dev_priv);
}

3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257
static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug;

	/*
	 * Enable digital hotplug on the CPU, and configure the DP short pulse
	 * duration to 2ms (which is the minimum in the Display Port spec)
	 * The pulse duration bits are reserved on HSW+.
	 */
	hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
	hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
	hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE |
		   DIGITAL_PORTA_PULSE_DURATION_2ms;
	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
}

3258
static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3259
{
3260
	u32 hotplug_irqs, enabled_irqs;
3261

3262
	if (INTEL_GEN(dev_priv) >= 8) {
3263
		hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
3264
		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw);
3265 3266

		bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3267
	} else if (INTEL_GEN(dev_priv) >= 7) {
3268
		hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
3269
		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb);
3270 3271

		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3272 3273
	} else {
		hotplug_irqs = DE_DP_A_HOTPLUG;
3274
		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk);
3275

3276 3277
		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
	}
3278

3279
	ilk_hpd_detection_setup(dev_priv);
3280

3281
	ibx_hpd_irq_setup(dev_priv);
3282 3283
}

3284 3285
static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv,
				      u32 enabled_irqs)
3286
{
3287
	u32 hotplug;
3288

3289
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3290 3291 3292
	hotplug |= PORTA_HOTPLUG_ENABLE |
		   PORTB_HOTPLUG_ENABLE |
		   PORTC_HOTPLUG_ENABLE;
3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311

	DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n",
		      hotplug, enabled_irqs);
	hotplug &= ~BXT_DDI_HPD_INVERT_MASK;

	/*
	 * For BXT invert bit has to be set based on AOB design
	 * for HPD detection logic, update it based on VBT fields.
	 */
	if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
		hotplug |= BXT_DDIA_HPD_INVERT;
	if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) &&
	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_B))
		hotplug |= BXT_DDIB_HPD_INVERT;
	if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) &&
	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_C))
		hotplug |= BXT_DDIC_HPD_INVERT;

3312
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3313 3314
}

3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331
static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
	__bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK);
}

static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug_irqs, enabled_irqs;

	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
	hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;

	bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);

	__bxt_hpd_detection_setup(dev_priv, enabled_irqs);
}

P
Paulo Zanoni 已提交
3332 3333
static void ibx_irq_postinstall(struct drm_device *dev)
{
3334
	struct drm_i915_private *dev_priv = to_i915(dev);
3335
	u32 mask;
3336

3337
	if (HAS_PCH_NOP(dev_priv))
D
Daniel Vetter 已提交
3338 3339
		return;

3340
	if (HAS_PCH_IBX(dev_priv))
3341
		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3342
	else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3343
		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3344 3345
	else
		mask = SDE_GMBUS_CPT;
3346

V
Ville Syrjälä 已提交
3347
	gen3_assert_iir_is_zero(dev_priv, SDEIIR);
P
Paulo Zanoni 已提交
3348
	I915_WRITE(SDEIMR, ~mask);
3349 3350 3351

	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
	    HAS_PCH_LPT(dev_priv))
3352
		ibx_hpd_detection_setup(dev_priv);
3353 3354
	else
		spt_hpd_detection_setup(dev_priv);
P
Paulo Zanoni 已提交
3355 3356
}

3357 3358
static void gen5_gt_irq_postinstall(struct drm_device *dev)
{
3359
	struct drm_i915_private *dev_priv = to_i915(dev);
3360 3361 3362 3363 3364
	u32 pm_irqs, gt_irqs;

	pm_irqs = gt_irqs = 0;

	dev_priv->gt_irq_mask = ~0;
3365
	if (HAS_L3_DPF(dev_priv)) {
3366
		/* L3 parity interrupt is always unmasked. */
3367 3368
		dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv);
		gt_irqs |= GT_PARITY_ERROR(dev_priv);
3369 3370 3371
	}

	gt_irqs |= GT_RENDER_USER_INTERRUPT;
3372
	if (IS_GEN5(dev_priv)) {
3373
		gt_irqs |= ILK_BSD_USER_INTERRUPT;
3374 3375 3376 3377
	} else {
		gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
	}

V
Ville Syrjälä 已提交
3378
	GEN3_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
3379

3380
	if (INTEL_GEN(dev_priv) >= 6) {
3381 3382 3383 3384
		/*
		 * RPS interrupts will get enabled/disabled on demand when RPS
		 * itself is enabled/disabled.
		 */
3385
		if (HAS_VEBOX(dev_priv)) {
3386
			pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3387 3388
			dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT;
		}
3389

3390
		dev_priv->pm_imr = 0xffffffff;
V
Ville Syrjälä 已提交
3391
		GEN3_IRQ_INIT(GEN6_PM, dev_priv->pm_imr, pm_irqs);
3392 3393 3394
	}
}

3395
static int ironlake_irq_postinstall(struct drm_device *dev)
3396
{
3397
	struct drm_i915_private *dev_priv = to_i915(dev);
3398 3399
	u32 display_mask, extra_mask;

3400
	if (INTEL_GEN(dev_priv) >= 7) {
3401
		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3402
				DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
3403
		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3404 3405
			      DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
			      DE_DP_A_HOTPLUG_IVB);
3406 3407
	} else {
		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3408 3409
				DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
				DE_PIPEA_CRC_DONE | DE_POISON);
3410 3411 3412
		extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
			      DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
			      DE_DP_A_HOTPLUG);
3413
	}
3414

3415
	dev_priv->irq_mask = ~display_mask;
3416

P
Paulo Zanoni 已提交
3417 3418
	ibx_irq_pre_postinstall(dev);

V
Ville Syrjälä 已提交
3419
	GEN3_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3420

3421
	gen5_gt_irq_postinstall(dev);
3422

3423 3424
	ilk_hpd_detection_setup(dev_priv);

P
Paulo Zanoni 已提交
3425
	ibx_irq_postinstall(dev);
3426

3427
	if (IS_IRONLAKE_M(dev_priv)) {
3428 3429 3430
		/* Enable PCU event interrupts
		 *
		 * spinlocking not required here for correctness since interrupt
3431 3432
		 * setup is guaranteed to run in single-threaded context. But we
		 * need it to make the assert_spin_locked happy. */
3433
		spin_lock_irq(&dev_priv->irq_lock);
3434
		ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
3435
		spin_unlock_irq(&dev_priv->irq_lock);
3436 3437
	}

3438 3439 3440
	return 0;
}

3441 3442
void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
{
3443
	lockdep_assert_held(&dev_priv->irq_lock);
3444 3445 3446 3447 3448 3449

	if (dev_priv->display_irqs_enabled)
		return;

	dev_priv->display_irqs_enabled = true;

3450 3451
	if (intel_irqs_enabled(dev_priv)) {
		vlv_display_irq_reset(dev_priv);
3452
		vlv_display_irq_postinstall(dev_priv);
3453
	}
3454 3455 3456 3457
}

void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
{
3458
	lockdep_assert_held(&dev_priv->irq_lock);
3459 3460 3461 3462 3463 3464

	if (!dev_priv->display_irqs_enabled)
		return;

	dev_priv->display_irqs_enabled = false;

3465
	if (intel_irqs_enabled(dev_priv))
3466
		vlv_display_irq_reset(dev_priv);
3467 3468
}

3469 3470 3471

static int valleyview_irq_postinstall(struct drm_device *dev)
{
3472
	struct drm_i915_private *dev_priv = to_i915(dev);
3473

3474
	gen5_gt_irq_postinstall(dev);
J
Jesse Barnes 已提交
3475

3476
	spin_lock_irq(&dev_priv->irq_lock);
3477 3478
	if (dev_priv->display_irqs_enabled)
		vlv_display_irq_postinstall(dev_priv);
3479 3480
	spin_unlock_irq(&dev_priv->irq_lock);

J
Jesse Barnes 已提交
3481
	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3482
	POSTING_READ(VLV_MASTER_IER);
3483 3484 3485 3486

	return 0;
}

3487 3488 3489 3490 3491
static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
{
	/* These are interrupts we'll toggle with the ring mask register */
	uint32_t gt_interrupts[] = {
		GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3492 3493 3494
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
			GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3495
		GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3496 3497 3498
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
			GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3499
		0,
3500 3501
		GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3502 3503
		};

3504 3505 3506
	if (HAS_L3_DPF(dev_priv))
		gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;

3507 3508
	dev_priv->pm_ier = 0x0;
	dev_priv->pm_imr = ~dev_priv->pm_ier;
3509 3510
	GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
	GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
3511 3512
	/*
	 * RPS interrupts will get enabled/disabled on demand when RPS itself
3513
	 * is enabled/disabled. Same wil be the case for GuC interrupts.
3514
	 */
3515
	GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_imr, dev_priv->pm_ier);
3516
	GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
3517 3518 3519 3520
}

static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
{
3521 3522
	uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
	uint32_t de_pipe_enables;
3523 3524
	u32 de_port_masked = GEN8_AUX_CHANNEL_A;
	u32 de_port_enables;
3525
	u32 de_misc_masked = GEN8_DE_MISC_GSE;
3526
	enum pipe pipe;
3527

3528
	if (INTEL_GEN(dev_priv) >= 9) {
3529
		de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3530 3531
		de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
				  GEN9_AUX_CHANNEL_D;
3532
		if (IS_GEN9_LP(dev_priv))
3533 3534
			de_port_masked |= BXT_DE_PORT_GMBUS;
	} else {
3535
		de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3536
	}
3537 3538 3539 3540

	de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
					   GEN8_PIPE_FIFO_UNDERRUN;

3541
	de_port_enables = de_port_masked;
3542
	if (IS_GEN9_LP(dev_priv))
3543 3544
		de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
	else if (IS_BROADWELL(dev_priv))
3545 3546
		de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;

3547 3548 3549
	dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
	dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
	dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3550

3551
	for_each_pipe(dev_priv, pipe)
3552
		if (intel_display_power_is_enabled(dev_priv,
3553 3554 3555 3556
				POWER_DOMAIN_PIPE(pipe)))
			GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
					  dev_priv->de_irq_mask[pipe],
					  de_pipe_enables);
3557

V
Ville Syrjälä 已提交
3558 3559
	GEN3_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
	GEN3_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
3560 3561 3562

	if (IS_GEN9_LP(dev_priv))
		bxt_hpd_detection_setup(dev_priv);
3563 3564
	else if (IS_BROADWELL(dev_priv))
		ilk_hpd_detection_setup(dev_priv);
3565 3566 3567 3568
}

static int gen8_irq_postinstall(struct drm_device *dev)
{
3569
	struct drm_i915_private *dev_priv = to_i915(dev);
3570

3571
	if (HAS_PCH_SPLIT(dev_priv))
3572
		ibx_irq_pre_postinstall(dev);
P
Paulo Zanoni 已提交
3573

3574 3575 3576
	gen8_gt_irq_postinstall(dev_priv);
	gen8_de_irq_postinstall(dev_priv);

3577
	if (HAS_PCH_SPLIT(dev_priv))
3578
		ibx_irq_postinstall(dev);
3579

3580
	I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3581 3582 3583 3584 3585
	POSTING_READ(GEN8_MASTER_IRQ);

	return 0;
}

3586 3587
static int cherryview_irq_postinstall(struct drm_device *dev)
{
3588
	struct drm_i915_private *dev_priv = to_i915(dev);
3589 3590 3591

	gen8_gt_irq_postinstall(dev_priv);

3592
	spin_lock_irq(&dev_priv->irq_lock);
3593 3594
	if (dev_priv->display_irqs_enabled)
		vlv_display_irq_postinstall(dev_priv);
3595 3596
	spin_unlock_irq(&dev_priv->irq_lock);

3597
	I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3598 3599 3600 3601 3602
	POSTING_READ(GEN8_MASTER_IRQ);

	return 0;
}

3603
static void i8xx_irq_reset(struct drm_device *dev)
L
Linus Torvalds 已提交
3604
{
3605
	struct drm_i915_private *dev_priv = to_i915(dev);
3606

3607 3608
	i9xx_pipestat_irq_reset(dev_priv);

3609 3610
	I915_WRITE16(HWSTAM, 0xffff);

3611
	GEN2_IRQ_RESET();
C
Chris Wilson 已提交
3612 3613 3614 3615
}

static int i8xx_irq_postinstall(struct drm_device *dev)
{
3616
	struct drm_i915_private *dev_priv = to_i915(dev);
3617
	u16 enable_mask;
C
Chris Wilson 已提交
3618

3619 3620
	I915_WRITE16(EMR, ~(I915_ERROR_PAGE_TABLE |
			    I915_ERROR_MEMORY_REFRESH));
C
Chris Wilson 已提交
3621 3622 3623 3624

	/* Unmask the interrupts that we always want on. */
	dev_priv->irq_mask =
		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3625
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
C
Chris Wilson 已提交
3626

3627 3628 3629 3630 3631 3632
	enable_mask =
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
		I915_USER_INTERRUPT;

	GEN2_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
C
Chris Wilson 已提交
3633

3634 3635
	/* Interrupt setup is already guaranteed to be single-threaded, this is
	 * just to make the assert_spin_locked check happy. */
3636
	spin_lock_irq(&dev_priv->irq_lock);
3637 3638
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3639
	spin_unlock_irq(&dev_priv->irq_lock);
3640

C
Chris Wilson 已提交
3641 3642 3643
	return 0;
}

3644
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
C
Chris Wilson 已提交
3645
{
3646
	struct drm_device *dev = arg;
3647
	struct drm_i915_private *dev_priv = to_i915(dev);
3648
	irqreturn_t ret = IRQ_NONE;
C
Chris Wilson 已提交
3649

3650 3651 3652
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

3653 3654 3655
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
	disable_rpm_wakeref_asserts(dev_priv);

3656
	do {
3657
		u32 pipe_stats[I915_MAX_PIPES] = {};
3658
		u16 iir;
3659

3660 3661 3662 3663 3664
		iir = I915_READ16(IIR);
		if (iir == 0)
			break;

		ret = IRQ_HANDLED;
C
Chris Wilson 已提交
3665

3666 3667 3668
		/* Call regardless, as some status bits might not be
		 * signalled in iir */
		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
C
Chris Wilson 已提交
3669

3670
		I915_WRITE16(IIR, iir);
C
Chris Wilson 已提交
3671 3672

		if (iir & I915_USER_INTERRUPT)
3673
			notify_ring(dev_priv->engine[RCS]);
C
Chris Wilson 已提交
3674

3675 3676
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
C
Chris Wilson 已提交
3677

3678 3679
		i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
	} while (0);
3680 3681

	enable_rpm_wakeref_asserts(dev_priv);
C
Chris Wilson 已提交
3682

3683
	return ret;
C
Chris Wilson 已提交
3684 3685
}

3686
static void i915_irq_reset(struct drm_device *dev)
3687
{
3688
	struct drm_i915_private *dev_priv = to_i915(dev);
3689

3690
	if (I915_HAS_HOTPLUG(dev_priv)) {
3691
		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3692 3693 3694
		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
	}

3695 3696
	i9xx_pipestat_irq_reset(dev_priv);

3697
	I915_WRITE(HWSTAM, 0xffffffff);
3698

3699
	GEN3_IRQ_RESET();
3700 3701 3702 3703
}

static int i915_irq_postinstall(struct drm_device *dev)
{
3704
	struct drm_i915_private *dev_priv = to_i915(dev);
3705
	u32 enable_mask;
3706

3707 3708
	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE |
			  I915_ERROR_MEMORY_REFRESH));
3709 3710 3711 3712 3713

	/* Unmask the interrupts that we always want on. */
	dev_priv->irq_mask =
		~(I915_ASLE_INTERRUPT |
		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3714
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
3715 3716 3717 3718 3719 3720 3721

	enable_mask =
		I915_ASLE_INTERRUPT |
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
		I915_USER_INTERRUPT;

3722
	if (I915_HAS_HOTPLUG(dev_priv)) {
3723 3724 3725 3726 3727 3728
		/* Enable in IER... */
		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
		/* and unmask in IMR */
		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
	}

3729
	GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
3730

3731 3732
	/* Interrupt setup is already guaranteed to be single-threaded, this is
	 * just to make the assert_spin_locked check happy. */
3733
	spin_lock_irq(&dev_priv->irq_lock);
3734 3735
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3736
	spin_unlock_irq(&dev_priv->irq_lock);
3737

3738 3739
	i915_enable_asle_pipestat(dev_priv);

3740 3741 3742
	return 0;
}

3743
static irqreturn_t i915_irq_handler(int irq, void *arg)
3744
{
3745
	struct drm_device *dev = arg;
3746
	struct drm_i915_private *dev_priv = to_i915(dev);
3747
	irqreturn_t ret = IRQ_NONE;
3748

3749 3750 3751
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

3752 3753 3754
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
	disable_rpm_wakeref_asserts(dev_priv);

3755
	do {
3756
		u32 pipe_stats[I915_MAX_PIPES] = {};
3757 3758
		u32 hotplug_status = 0;
		u32 iir;
3759

3760 3761 3762 3763 3764 3765 3766 3767 3768
		iir = I915_READ(IIR);
		if (iir == 0)
			break;

		ret = IRQ_HANDLED;

		if (I915_HAS_HOTPLUG(dev_priv) &&
		    iir & I915_DISPLAY_PORT_INTERRUPT)
			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
3769

3770 3771 3772
		/* Call regardless, as some status bits might not be
		 * signalled in iir */
		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
3773

3774
		I915_WRITE(IIR, iir);
3775 3776

		if (iir & I915_USER_INTERRUPT)
3777
			notify_ring(dev_priv->engine[RCS]);
3778

3779 3780
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3781

3782 3783 3784 3785 3786
		if (hotplug_status)
			i9xx_hpd_irq_handler(dev_priv, hotplug_status);

		i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
	} while (0);
3787

3788 3789
	enable_rpm_wakeref_asserts(dev_priv);

3790 3791 3792
	return ret;
}

3793
static void i965_irq_reset(struct drm_device *dev)
3794
{
3795
	struct drm_i915_private *dev_priv = to_i915(dev);
3796

3797
	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3798
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3799

3800 3801
	i9xx_pipestat_irq_reset(dev_priv);

3802
	I915_WRITE(HWSTAM, 0xffffffff);
3803

3804
	GEN3_IRQ_RESET();
3805 3806 3807 3808
}

static int i965_irq_postinstall(struct drm_device *dev)
{
3809
	struct drm_i915_private *dev_priv = to_i915(dev);
3810
	u32 enable_mask;
3811 3812
	u32 error_mask;

3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827
	/*
	 * Enable some error detection, note the instruction error mask
	 * bit is reserved, so we leave it masked.
	 */
	if (IS_G4X(dev_priv)) {
		error_mask = ~(GM45_ERROR_PAGE_TABLE |
			       GM45_ERROR_MEM_PRIV |
			       GM45_ERROR_CP_PRIV |
			       I915_ERROR_MEMORY_REFRESH);
	} else {
		error_mask = ~(I915_ERROR_PAGE_TABLE |
			       I915_ERROR_MEMORY_REFRESH);
	}
	I915_WRITE(EMR, error_mask);

3828
	/* Unmask the interrupts that we always want on. */
3829 3830 3831 3832 3833 3834
	dev_priv->irq_mask =
		~(I915_ASLE_INTERRUPT |
		  I915_DISPLAY_PORT_INTERRUPT |
		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
		  I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3835

3836 3837 3838 3839 3840 3841 3842
	enable_mask =
		I915_ASLE_INTERRUPT |
		I915_DISPLAY_PORT_INTERRUPT |
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
		I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
		I915_USER_INTERRUPT;
3843

3844
	if (IS_G4X(dev_priv))
3845
		enable_mask |= I915_BSD_USER_INTERRUPT;
3846

3847 3848
	GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask);

3849 3850
	/* Interrupt setup is already guaranteed to be single-threaded, this is
	 * just to make the assert_spin_locked check happy. */
3851
	spin_lock_irq(&dev_priv->irq_lock);
3852 3853 3854
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3855
	spin_unlock_irq(&dev_priv->irq_lock);
3856

3857
	i915_enable_asle_pipestat(dev_priv);
3858 3859 3860 3861

	return 0;
}

3862
static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
3863 3864 3865
{
	u32 hotplug_en;

3866
	lockdep_assert_held(&dev_priv->irq_lock);
3867

3868 3869
	/* Note HDMI and DP share hotplug bits */
	/* enable bits are the same for all generations */
3870
	hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
3871 3872 3873 3874
	/* Programming the CRT detection parameters tends
	   to generate a spurious hotplug event about three
	   seconds later.  So just do it once.
	*/
3875
	if (IS_G4X(dev_priv))
3876 3877 3878 3879
		hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
	hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;

	/* Ignore TV since it's buggy */
3880
	i915_hotplug_interrupt_update_locked(dev_priv,
3881 3882 3883 3884
					     HOTPLUG_INT_EN_MASK |
					     CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
					     CRT_HOTPLUG_ACTIVATION_PERIOD_64,
					     hotplug_en);
3885 3886
}

3887
static irqreturn_t i965_irq_handler(int irq, void *arg)
3888
{
3889
	struct drm_device *dev = arg;
3890
	struct drm_i915_private *dev_priv = to_i915(dev);
3891
	irqreturn_t ret = IRQ_NONE;
3892

3893 3894 3895
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

3896 3897 3898
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
	disable_rpm_wakeref_asserts(dev_priv);

3899
	do {
3900
		u32 pipe_stats[I915_MAX_PIPES] = {};
3901 3902
		u32 hotplug_status = 0;
		u32 iir;
3903

3904 3905
		iir = I915_READ(IIR);
		if (iir == 0)
3906 3907 3908 3909
			break;

		ret = IRQ_HANDLED;

3910 3911 3912 3913 3914 3915
		if (iir & I915_DISPLAY_PORT_INTERRUPT)
			hotplug_status = i9xx_hpd_irq_ack(dev_priv);

		/* Call regardless, as some status bits might not be
		 * signalled in iir */
		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
3916

3917
		I915_WRITE(IIR, iir);
3918 3919

		if (iir & I915_USER_INTERRUPT)
3920
			notify_ring(dev_priv->engine[RCS]);
3921

3922
		if (iir & I915_BSD_USER_INTERRUPT)
3923
			notify_ring(dev_priv->engine[VCS]);
3924

3925 3926
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3927

3928 3929 3930 3931 3932
		if (hotplug_status)
			i9xx_hpd_irq_handler(dev_priv, hotplug_status);

		i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
	} while (0);
3933

3934 3935
	enable_rpm_wakeref_asserts(dev_priv);

3936 3937 3938
	return ret;
}

3939 3940 3941 3942 3943 3944 3945
/**
 * intel_irq_init - initializes irq support
 * @dev_priv: i915 device instance
 *
 * This function initializes all the irq support including work items, timers
 * and all the vtables. It does not setup the interrupt itself though.
 */
3946
void intel_irq_init(struct drm_i915_private *dev_priv)
3947
{
3948
	struct drm_device *dev = &dev_priv->drm;
3949
	int i;
3950

3951 3952
	intel_hpd_init_work(dev_priv);

3953
	INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
3954

3955
	INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
3956 3957
	for (i = 0; i < MAX_L3_SLICES; ++i)
		dev_priv->l3_parity.remap_info[i] = NULL;
3958

3959
	if (HAS_GUC_SCHED(dev_priv))
3960 3961
		dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT;

3962
	/* Let's track the enabled rps events */
3963
	if (IS_VALLEYVIEW(dev_priv))
3964
		/* WaGsvRC0ResidencyMethod:vlv */
3965
		dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
3966 3967
	else
		dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
3968

3969
	dev_priv->rps.pm_intrmsk_mbz = 0;
3970 3971

	/*
3972
	 * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer
3973 3974 3975 3976
	 * if GEN6_PM_UP_EI_EXPIRED is masked.
	 *
	 * TODO: verify if this can be reproduced on VLV,CHV.
	 */
3977
	if (INTEL_GEN(dev_priv) <= 7)
3978
		dev_priv->rps.pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
3979

3980
	if (INTEL_GEN(dev_priv) >= 8)
3981
		dev_priv->rps.pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
3982

3983
	if (IS_GEN2(dev_priv)) {
3984
		/* Gen2 doesn't have a hardware frame counter */
3985
		dev->max_vblank_count = 0;
3986
	} else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
3987
		dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
3988
		dev->driver->get_vblank_counter = g4x_get_vblank_counter;
3989 3990 3991
	} else {
		dev->driver->get_vblank_counter = i915_get_vblank_counter;
		dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
3992 3993
	}

3994 3995 3996 3997 3998
	/*
	 * Opt out of the vblank disable timer on everything except gen2.
	 * Gen2 doesn't have a hardware frame counter and so depends on
	 * vblank interrupts to produce sane vblank seuquence numbers.
	 */
3999
	if (!IS_GEN2(dev_priv))
4000 4001
		dev->vblank_disable_immediate = true;

4002 4003 4004 4005 4006 4007 4008 4009 4010 4011
	/* Most platforms treat the display irq block as an always-on
	 * power domain. vlv/chv can disable it at runtime and need
	 * special care to avoid writing any of the display block registers
	 * outside of the power domain. We defer setting up the display irqs
	 * in this case to the runtime pm.
	 */
	dev_priv->display_irqs_enabled = true;
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
		dev_priv->display_irqs_enabled = false;

L
Lyude 已提交
4012 4013
	dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;

4014
	dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos;
4015
	dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4016

4017
	if (IS_CHERRYVIEW(dev_priv)) {
4018
		dev->driver->irq_handler = cherryview_irq_handler;
4019
		dev->driver->irq_preinstall = cherryview_irq_reset;
4020
		dev->driver->irq_postinstall = cherryview_irq_postinstall;
4021
		dev->driver->irq_uninstall = cherryview_irq_reset;
4022 4023
		dev->driver->enable_vblank = i965_enable_vblank;
		dev->driver->disable_vblank = i965_disable_vblank;
4024
		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4025
	} else if (IS_VALLEYVIEW(dev_priv)) {
J
Jesse Barnes 已提交
4026
		dev->driver->irq_handler = valleyview_irq_handler;
4027
		dev->driver->irq_preinstall = valleyview_irq_reset;
J
Jesse Barnes 已提交
4028
		dev->driver->irq_postinstall = valleyview_irq_postinstall;
4029
		dev->driver->irq_uninstall = valleyview_irq_reset;
4030 4031
		dev->driver->enable_vblank = i965_enable_vblank;
		dev->driver->disable_vblank = i965_disable_vblank;
4032
		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4033
	} else if (INTEL_GEN(dev_priv) >= 8) {
4034
		dev->driver->irq_handler = gen8_irq_handler;
4035
		dev->driver->irq_preinstall = gen8_irq_reset;
4036
		dev->driver->irq_postinstall = gen8_irq_postinstall;
4037
		dev->driver->irq_uninstall = gen8_irq_reset;
4038 4039
		dev->driver->enable_vblank = gen8_enable_vblank;
		dev->driver->disable_vblank = gen8_disable_vblank;
4040
		if (IS_GEN9_LP(dev_priv))
4041
			dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
4042 4043
		else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) ||
			 HAS_PCH_CNP(dev_priv))
4044 4045
			dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
		else
4046
			dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4047
	} else if (HAS_PCH_SPLIT(dev_priv)) {
4048
		dev->driver->irq_handler = ironlake_irq_handler;
4049
		dev->driver->irq_preinstall = ironlake_irq_reset;
4050
		dev->driver->irq_postinstall = ironlake_irq_postinstall;
4051
		dev->driver->irq_uninstall = ironlake_irq_reset;
4052 4053
		dev->driver->enable_vblank = ironlake_enable_vblank;
		dev->driver->disable_vblank = ironlake_disable_vblank;
4054
		dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4055
	} else {
4056
		if (IS_GEN2(dev_priv)) {
4057
			dev->driver->irq_preinstall = i8xx_irq_reset;
C
Chris Wilson 已提交
4058 4059
			dev->driver->irq_postinstall = i8xx_irq_postinstall;
			dev->driver->irq_handler = i8xx_irq_handler;
4060
			dev->driver->irq_uninstall = i8xx_irq_reset;
4061 4062
			dev->driver->enable_vblank = i8xx_enable_vblank;
			dev->driver->disable_vblank = i8xx_disable_vblank;
4063
		} else if (IS_GEN3(dev_priv)) {
4064
			dev->driver->irq_preinstall = i915_irq_reset;
4065
			dev->driver->irq_postinstall = i915_irq_postinstall;
4066
			dev->driver->irq_uninstall = i915_irq_reset;
4067
			dev->driver->irq_handler = i915_irq_handler;
4068 4069
			dev->driver->enable_vblank = i8xx_enable_vblank;
			dev->driver->disable_vblank = i8xx_disable_vblank;
C
Chris Wilson 已提交
4070
		} else {
4071
			dev->driver->irq_preinstall = i965_irq_reset;
4072
			dev->driver->irq_postinstall = i965_irq_postinstall;
4073
			dev->driver->irq_uninstall = i965_irq_reset;
4074
			dev->driver->irq_handler = i965_irq_handler;
4075 4076
			dev->driver->enable_vblank = i965_enable_vblank;
			dev->driver->disable_vblank = i965_disable_vblank;
C
Chris Wilson 已提交
4077
		}
4078 4079
		if (I915_HAS_HOTPLUG(dev_priv))
			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4080 4081
	}
}
4082

4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096
/**
 * intel_irq_fini - deinitializes IRQ support
 * @i915: i915 device instance
 *
 * This function deinitializes all the IRQ support.
 */
void intel_irq_fini(struct drm_i915_private *i915)
{
	int i;

	for (i = 0; i < MAX_L3_SLICES; ++i)
		kfree(i915->l3_parity.remap_info[i]);
}

4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107
/**
 * intel_irq_install - enables the hardware interrupt
 * @dev_priv: i915 device instance
 *
 * This function enables the hardware interrupt handling, but leaves the hotplug
 * handling still disabled. It is called after intel_irq_init().
 *
 * In the driver load and resume code we need working interrupts in a few places
 * but don't want to deal with the hassle of concurrent probe and hotplug
 * workers. Hence the split into this two-stage approach.
 */
4108 4109 4110 4111 4112 4113 4114 4115 4116
int intel_irq_install(struct drm_i915_private *dev_priv)
{
	/*
	 * We enable some interrupt sources in our postinstall hooks, so mark
	 * interrupts as enabled _before_ actually enabling them to avoid
	 * special cases in our ordering checks.
	 */
	dev_priv->pm.irqs_enabled = true;

4117
	return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq);
4118 4119
}

4120 4121 4122 4123 4124 4125 4126
/**
 * intel_irq_uninstall - finilizes all irq handling
 * @dev_priv: i915 device instance
 *
 * This stops interrupt and hotplug handling and unregisters and frees all
 * resources acquired in the init functions.
 */
4127 4128
void intel_irq_uninstall(struct drm_i915_private *dev_priv)
{
4129
	drm_irq_uninstall(&dev_priv->drm);
4130 4131 4132 4133
	intel_hpd_cancel_work(dev_priv);
	dev_priv->pm.irqs_enabled = false;
}

4134 4135 4136 4137 4138 4139 4140
/**
 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
 * @dev_priv: i915 device instance
 *
 * This function is used to disable interrupts at runtime, both in the runtime
 * pm and the system suspend/resume code.
 */
4141
void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4142
{
4143
	dev_priv->drm.driver->irq_uninstall(&dev_priv->drm);
4144
	dev_priv->pm.irqs_enabled = false;
4145
	synchronize_irq(dev_priv->drm.irq);
4146 4147
}

4148 4149 4150 4151 4152 4153 4154
/**
 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
 * @dev_priv: i915 device instance
 *
 * This function is used to enable interrupts at runtime, both in the runtime
 * pm and the system suspend/resume code.
 */
4155
void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4156
{
4157
	dev_priv->pm.irqs_enabled = true;
4158 4159
	dev_priv->drm.driver->irq_preinstall(&dev_priv->drm);
	dev_priv->drm.driver->irq_postinstall(&dev_priv->drm);
4160
}