i915_irq.c 127.1 KB
Newer Older
D
Dave Airlie 已提交
1
/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
L
Linus Torvalds 已提交
2
 */
D
Dave Airlie 已提交
3
/*
L
Linus Torvalds 已提交
4 5
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
 * All Rights Reserved.
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
D
Dave Airlie 已提交
27
 */
L
Linus Torvalds 已提交
28

29 30
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

31
#include <linux/sysrq.h>
32
#include <linux/slab.h>
33
#include <linux/circ_buf.h>
34 35
#include <drm/drmP.h>
#include <drm/i915_drm.h>
L
Linus Torvalds 已提交
36
#include "i915_drv.h"
C
Chris Wilson 已提交
37
#include "i915_trace.h"
J
Jesse Barnes 已提交
38
#include "intel_drv.h"
L
Linus Torvalds 已提交
39

40 41 42 43 44 45 46 47
/**
 * DOC: interrupt handling
 *
 * These functions provide the basic support for enabling and disabling the
 * interrupt handling support. There's a lot more functionality in i915_irq.c
 * and related files, but that will be described in separate chapters.
 */

48 49 50 51 52 53 54 55 56 57
static const u32 hpd_ibx[] = {
	[HPD_CRT] = SDE_CRT_HOTPLUG,
	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG
};

static const u32 hpd_cpt[] = {
	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
58
	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
59 60 61 62 63 64 65 66 67 68 69 70 71 72
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
};

static const u32 hpd_mask_i915[] = {
	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
};

73
static const u32 hpd_status_g4x[] = {
74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
};

static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
};

91
/* IIR can theoretically queue up two events. Be paranoid. */
92
#define GEN8_IRQ_RESET_NDX(type, which) do { \
93 94 95 96 97 98 99 100 101
	I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
	POSTING_READ(GEN8_##type##_IMR(which)); \
	I915_WRITE(GEN8_##type##_IER(which), 0); \
	I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
	POSTING_READ(GEN8_##type##_IIR(which)); \
	I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
	POSTING_READ(GEN8_##type##_IIR(which)); \
} while (0)

102
#define GEN5_IRQ_RESET(type) do { \
P
Paulo Zanoni 已提交
103
	I915_WRITE(type##IMR, 0xffffffff); \
104
	POSTING_READ(type##IMR); \
P
Paulo Zanoni 已提交
105
	I915_WRITE(type##IER, 0); \
106 107 108 109
	I915_WRITE(type##IIR, 0xffffffff); \
	POSTING_READ(type##IIR); \
	I915_WRITE(type##IIR, 0xffffffff); \
	POSTING_READ(type##IIR); \
P
Paulo Zanoni 已提交
110 111
} while (0)

112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
/*
 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
 */
#define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \
	u32 val = I915_READ(reg); \
	if (val) { \
		WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \
		     (reg), val); \
		I915_WRITE((reg), 0xffffffff); \
		POSTING_READ(reg); \
		I915_WRITE((reg), 0xffffffff); \
		POSTING_READ(reg); \
	} \
} while (0)

P
Paulo Zanoni 已提交
127
#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
128
	GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \
P
Paulo Zanoni 已提交
129
	I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
130 131
	I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
	POSTING_READ(GEN8_##type##_IMR(which)); \
P
Paulo Zanoni 已提交
132 133 134
} while (0)

#define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
135
	GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \
P
Paulo Zanoni 已提交
136
	I915_WRITE(type##IER, (ier_val)); \
137 138
	I915_WRITE(type##IMR, (imr_val)); \
	POSTING_READ(type##IMR); \
P
Paulo Zanoni 已提交
139 140
} while (0)

141 142
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);

143
/* For display hotplug interrupt */
144
void
145
ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
146
{
147 148
	assert_spin_locked(&dev_priv->irq_lock);

149
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
150 151
		return;

152 153 154
	if ((dev_priv->irq_mask & mask) != 0) {
		dev_priv->irq_mask &= ~mask;
		I915_WRITE(DEIMR, dev_priv->irq_mask);
155
		POSTING_READ(DEIMR);
156 157 158
	}
}

159
void
160
ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
161
{
162 163
	assert_spin_locked(&dev_priv->irq_lock);

164
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
165 166
		return;

167 168 169
	if ((dev_priv->irq_mask & mask) != mask) {
		dev_priv->irq_mask |= mask;
		I915_WRITE(DEIMR, dev_priv->irq_mask);
170
		POSTING_READ(DEIMR);
171 172 173
	}
}

P
Paulo Zanoni 已提交
174 175 176 177 178 179 180 181 182 183 184 185
/**
 * ilk_update_gt_irq - update GTIMR
 * @dev_priv: driver private
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
			      uint32_t interrupt_mask,
			      uint32_t enabled_irq_mask)
{
	assert_spin_locked(&dev_priv->irq_lock);

186
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
187 188
		return;

P
Paulo Zanoni 已提交
189 190 191 192 193 194
	dev_priv->gt_irq_mask &= ~interrupt_mask;
	dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
	POSTING_READ(GTIMR);
}

195
void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
P
Paulo Zanoni 已提交
196 197 198 199
{
	ilk_update_gt_irq(dev_priv, mask, mask);
}

200
void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
P
Paulo Zanoni 已提交
201 202 203 204
{
	ilk_update_gt_irq(dev_priv, mask, 0);
}

205 206 207 208 209
static u32 gen6_pm_iir(struct drm_i915_private *dev_priv)
{
	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
}

210 211 212 213 214
static u32 gen6_pm_imr(struct drm_i915_private *dev_priv)
{
	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
}

215 216 217 218 219
static u32 gen6_pm_ier(struct drm_i915_private *dev_priv)
{
	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
}

P
Paulo Zanoni 已提交
220 221 222 223 224 225 226 227 228 229
/**
  * snb_update_pm_irq - update GEN6_PMIMR
  * @dev_priv: driver private
  * @interrupt_mask: mask of interrupt bits to update
  * @enabled_irq_mask: mask of interrupt bits to enable
  */
static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
			      uint32_t interrupt_mask,
			      uint32_t enabled_irq_mask)
{
230
	uint32_t new_val;
P
Paulo Zanoni 已提交
231 232 233

	assert_spin_locked(&dev_priv->irq_lock);

234
	new_val = dev_priv->pm_irq_mask;
235 236 237
	new_val &= ~interrupt_mask;
	new_val |= (~enabled_irq_mask & interrupt_mask);

238 239
	if (new_val != dev_priv->pm_irq_mask) {
		dev_priv->pm_irq_mask = new_val;
240 241
		I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
		POSTING_READ(gen6_pm_imr(dev_priv));
242
	}
P
Paulo Zanoni 已提交
243 244
}

245
void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
P
Paulo Zanoni 已提交
246
{
247 248 249
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
		return;

P
Paulo Zanoni 已提交
250 251 252
	snb_update_pm_irq(dev_priv, mask, mask);
}

253 254
static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
				  uint32_t mask)
P
Paulo Zanoni 已提交
255 256 257 258
{
	snb_update_pm_irq(dev_priv, mask, 0);
}

259 260 261 262 263 264 265 266
void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
{
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
		return;

	__gen6_disable_pm_irq(dev_priv, mask);
}

I
Imre Deak 已提交
267 268 269 270 271 272 273 274 275 276 277 278
void gen6_reset_rps_interrupts(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	uint32_t reg = gen6_pm_iir(dev_priv);

	spin_lock_irq(&dev_priv->irq_lock);
	I915_WRITE(reg, dev_priv->pm_rps_events);
	I915_WRITE(reg, dev_priv->pm_rps_events);
	POSTING_READ(reg);
	spin_unlock_irq(&dev_priv->irq_lock);
}

279 280 281 282 283 284
void gen6_enable_rps_interrupts(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;

	spin_lock_irq(&dev_priv->irq_lock);
	WARN_ON(dev_priv->rps.pm_iir);
I
Imre Deak 已提交
285
	WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
I
Imre Deak 已提交
286
	dev_priv->rps.interrupts_enabled = true;
287 288 289 290 291 292 293 294
	gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
	spin_unlock_irq(&dev_priv->irq_lock);
}

void gen6_disable_rps_interrupts(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;

I
Imre Deak 已提交
295 296 297 298 299 300
	spin_lock_irq(&dev_priv->irq_lock);
	dev_priv->rps.interrupts_enabled = false;
	spin_unlock_irq(&dev_priv->irq_lock);

	cancel_work_sync(&dev_priv->rps.work);

301 302
	spin_lock_irq(&dev_priv->irq_lock);

303 304
	I915_WRITE(GEN6_PMINTRMSK, INTEL_INFO(dev_priv)->gen >= 8 ?
		   ~GEN8_PMINTR_REDIRECT_TO_NON_DISP : ~0);
305 306

	__gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
307 308
	I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
				~dev_priv->pm_rps_events);
309 310
	I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events);
	I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events);
311 312 313

	dev_priv->rps.pm_iir = 0;

314
	spin_unlock_irq(&dev_priv->irq_lock);
315 316
}

317 318 319 320 321 322
/**
 * ibx_display_interrupt_update - update SDEIMR
 * @dev_priv: driver private
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
323 324 325
void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
				  uint32_t interrupt_mask,
				  uint32_t enabled_irq_mask)
326 327 328 329 330 331 332
{
	uint32_t sdeimr = I915_READ(SDEIMR);
	sdeimr &= ~interrupt_mask;
	sdeimr |= (~enabled_irq_mask & interrupt_mask);

	assert_spin_locked(&dev_priv->irq_lock);

333
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
334 335
		return;

336 337 338
	I915_WRITE(SDEIMR, sdeimr);
	POSTING_READ(SDEIMR);
}
339

D
Daniel Vetter 已提交
340
static void
341 342
__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
		       u32 enable_mask, u32 status_mask)
343
{
344
	u32 reg = PIPESTAT(pipe);
345
	u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
346

347
	assert_spin_locked(&dev_priv->irq_lock);
348
	WARN_ON(!intel_irqs_enabled(dev_priv));
349

350 351 352 353
	if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
		      pipe_name(pipe), enable_mask, status_mask))
354 355 356
		return;

	if ((pipestat & enable_mask) == enable_mask)
357 358
		return;

359 360
	dev_priv->pipestat_irq_mask[pipe] |= status_mask;

361
	/* Enable the interrupt, clear any pending status */
362
	pipestat |= enable_mask | status_mask;
363 364
	I915_WRITE(reg, pipestat);
	POSTING_READ(reg);
365 366
}

D
Daniel Vetter 已提交
367
static void
368 369
__i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
		        u32 enable_mask, u32 status_mask)
370
{
371
	u32 reg = PIPESTAT(pipe);
372
	u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
373

374
	assert_spin_locked(&dev_priv->irq_lock);
375
	WARN_ON(!intel_irqs_enabled(dev_priv));
376

377 378 379 380
	if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
		      pipe_name(pipe), enable_mask, status_mask))
381 382
		return;

383 384 385
	if ((pipestat & enable_mask) == 0)
		return;

386 387
	dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;

388
	pipestat &= ~enable_mask;
389 390
	I915_WRITE(reg, pipestat);
	POSTING_READ(reg);
391 392
}

393 394 395 396 397
static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
{
	u32 enable_mask = status_mask << 16;

	/*
398 399
	 * On pipe A we don't support the PSR interrupt yet,
	 * on pipe B and C the same bit MBZ.
400 401 402
	 */
	if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
		return 0;
403 404 405 406 407 408
	/*
	 * On pipe B and C we don't support the PSR interrupt yet, on pipe
	 * A the same bit is for perf counters which we don't use either.
	 */
	if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
		return 0;
409 410 411 412 413 414 415 416 417 418 419 420

	enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
			 SPRITE0_FLIP_DONE_INT_EN_VLV |
			 SPRITE1_FLIP_DONE_INT_EN_VLV);
	if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
		enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
	if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
		enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;

	return enable_mask;
}

421 422 423 424 425 426
void
i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
		     u32 status_mask)
{
	u32 enable_mask;

427 428 429 430 431
	if (IS_VALLEYVIEW(dev_priv->dev))
		enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
							   status_mask);
	else
		enable_mask = status_mask << 16;
432 433 434 435 436 437 438 439 440
	__i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
}

void
i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
		      u32 status_mask)
{
	u32 enable_mask;

441 442 443 444 445
	if (IS_VALLEYVIEW(dev_priv->dev))
		enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
							   status_mask);
	else
		enable_mask = status_mask << 16;
446 447 448
	__i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
}

449
/**
450
 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
451
 */
452
static void i915_enable_asle_pipestat(struct drm_device *dev)
453
{
454
	struct drm_i915_private *dev_priv = dev->dev_private;
455

456 457 458
	if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
		return;

459
	spin_lock_irq(&dev_priv->irq_lock);
460

461
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
462
	if (INTEL_INFO(dev)->gen >= 4)
463
		i915_enable_pipestat(dev_priv, PIPE_A,
464
				     PIPE_LEGACY_BLC_EVENT_STATUS);
465

466
	spin_unlock_irq(&dev_priv->irq_lock);
467 468
}

469 470 471 472 473 474 475 476 477 478 479 480
/**
 * i915_pipe_enabled - check if a pipe is enabled
 * @dev: DRM device
 * @pipe: pipe to check
 *
 * Reading certain registers when the pipe is disabled can hang the chip.
 * Use this routine to make sure the PLL is running and the pipe is active
 * before reading such registers if unsure.
 */
static int
i915_pipe_enabled(struct drm_device *dev, int pipe)
{
481
	struct drm_i915_private *dev_priv = dev->dev_private;
482

483 484 485 486
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
		/* Locking is horribly broken here, but whatever. */
		struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
487

488 489 490 491
		return intel_crtc->active;
	} else {
		return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
	}
492 493
}

494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543
/*
 * This timing diagram depicts the video signal in and
 * around the vertical blanking period.
 *
 * Assumptions about the fictitious mode used in this example:
 *  vblank_start >= 3
 *  vsync_start = vblank_start + 1
 *  vsync_end = vblank_start + 2
 *  vtotal = vblank_start + 3
 *
 *           start of vblank:
 *           latch double buffered registers
 *           increment frame counter (ctg+)
 *           generate start of vblank interrupt (gen4+)
 *           |
 *           |          frame start:
 *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
 *           |          may be shifted forward 1-3 extra lines via PIPECONF
 *           |          |
 *           |          |  start of vsync:
 *           |          |  generate vsync interrupt
 *           |          |  |
 * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
 *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
 * ----va---> <-----------------vb--------------------> <--------va-------------
 *       |          |       <----vs----->                     |
 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
 *       |          |                                         |
 *       last visible pixel                                   first visible pixel
 *                  |                                         increment frame counter (gen3/4)
 *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
 *
 * x  = horizontal active
 * _  = horizontal blanking
 * hs = horizontal sync
 * va = vertical active
 * vb = vertical blanking
 * vs = vertical sync
 * vbs = vblank_start (number)
 *
 * Summary:
 * - most events happen at the start of horizontal sync
 * - frame start happens at the start of horizontal blank, 1-4 lines
 *   (depending on PIPECONF settings) after the start of vblank
 * - gen3/4 pixel and frame counter are synchronized with the start
 *   of horizontal active on the first line of vertical active
 */

544 545 546 547 548 549
static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
{
	/* Gen2 doesn't have a hardware frame counter */
	return 0;
}

550 551 552
/* Called from drm generic code, passed a 'crtc', which
 * we use as a pipe index
 */
553
static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
554
{
555
	struct drm_i915_private *dev_priv = dev->dev_private;
556 557
	unsigned long high_frame;
	unsigned long low_frame;
558
	u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
559 560

	if (!i915_pipe_enabled(dev, pipe)) {
561
		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
562
				"pipe %c\n", pipe_name(pipe));
563 564 565
		return 0;
	}

566 567 568 569 570 571
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
		struct intel_crtc *intel_crtc =
			to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
		const struct drm_display_mode *mode =
			&intel_crtc->config.adjusted_mode;

572 573 574 575 576
		htotal = mode->crtc_htotal;
		hsync_start = mode->crtc_hsync_start;
		vbl_start = mode->crtc_vblank_start;
		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
			vbl_start = DIV_ROUND_UP(vbl_start, 2);
577
	} else {
578
		enum transcoder cpu_transcoder = (enum transcoder) pipe;
579 580

		htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
581
		hsync_start = (I915_READ(HSYNC(cpu_transcoder))  & 0x1fff) + 1;
582
		vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;
583 584 585
		if ((I915_READ(PIPECONF(cpu_transcoder)) &
		     PIPECONF_INTERLACE_MASK) != PIPECONF_PROGRESSIVE)
			vbl_start = DIV_ROUND_UP(vbl_start, 2);
586 587
	}

588 589 590 591 592 593
	/* Convert to pixel count */
	vbl_start *= htotal;

	/* Start of vblank event occurs at start of hsync */
	vbl_start -= htotal - hsync_start;

594 595
	high_frame = PIPEFRAME(pipe);
	low_frame = PIPEFRAMEPIXEL(pipe);
596

597 598 599 600 601 602
	/*
	 * High & low register fields aren't synchronized, so make sure
	 * we get a low value that's stable across two reads of the high
	 * register.
	 */
	do {
603
		high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
604
		low   = I915_READ(low_frame);
605
		high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
606 607
	} while (high1 != high2);

608
	high1 >>= PIPE_FRAME_HIGH_SHIFT;
609
	pixel = low & PIPE_PIXEL_MASK;
610
	low >>= PIPE_FRAME_LOW_SHIFT;
611 612 613 614 615 616

	/*
	 * The frame counter increments at beginning of active.
	 * Cook up a vblank counter by also checking the pixel
	 * counter against vblank start.
	 */
617
	return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
618 619
}

620
static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
621
{
622
	struct drm_i915_private *dev_priv = dev->dev_private;
623
	int reg = PIPE_FRMCOUNT_GM45(pipe);
624 625

	if (!i915_pipe_enabled(dev, pipe)) {
626
		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
627
				 "pipe %c\n", pipe_name(pipe));
628 629 630 631 632 633
		return 0;
	}

	return I915_READ(reg);
}

634 635 636
/* raw reads, only for fast reads of display block, no need for forcewake etc. */
#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))

637 638 639 640 641 642
static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
{
	struct drm_device *dev = crtc->base.dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
	enum pipe pipe = crtc->pipe;
643
	int position, vtotal;
644

645
	vtotal = mode->crtc_vtotal;
646 647 648 649 650 651 652 653 654
	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
		vtotal /= 2;

	if (IS_GEN2(dev))
		position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
	else
		position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;

	/*
655 656
	 * See update_scanline_offset() for the details on the
	 * scanline_offset adjustment.
657
	 */
658
	return (position + crtc->scanline_offset) % vtotal;
659 660
}

661
static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
662 663
				    unsigned int flags, int *vpos, int *hpos,
				    ktime_t *stime, ktime_t *etime)
664
{
665 666 667 668
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
	const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
669
	int position;
670
	int vbl_start, vbl_end, hsync_start, htotal, vtotal;
671 672
	bool in_vbl = true;
	int ret = 0;
673
	unsigned long irqflags;
674

675
	if (!intel_crtc->active) {
676
		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
677
				 "pipe %c\n", pipe_name(pipe));
678 679 680
		return 0;
	}

681
	htotal = mode->crtc_htotal;
682
	hsync_start = mode->crtc_hsync_start;
683 684 685
	vtotal = mode->crtc_vtotal;
	vbl_start = mode->crtc_vblank_start;
	vbl_end = mode->crtc_vblank_end;
686

687 688 689 690 691 692
	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
		vbl_start = DIV_ROUND_UP(vbl_start, 2);
		vbl_end /= 2;
		vtotal /= 2;
	}

693 694
	ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;

695 696 697 698 699 700
	/*
	 * Lock uncore.lock, as we will do multiple timing critical raw
	 * register reads, potentially with preemption disabled, so the
	 * following code must not block on uncore.lock.
	 */
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
701

702 703 704 705 706 707
	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */

	/* Get optional system timestamp before query. */
	if (stime)
		*stime = ktime_get();

708
	if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
709 710 711
		/* No obvious pixelcount register. Only query vertical
		 * scanout position from Display scan line register.
		 */
712
		position = __intel_get_crtc_scanline(intel_crtc);
713 714 715 716 717
	} else {
		/* Have access to pixelcount since start of frame.
		 * We can split this into vertical and horizontal
		 * scanout position.
		 */
718
		position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
719

720 721 722 723
		/* convert to pixel counts */
		vbl_start *= htotal;
		vbl_end *= htotal;
		vtotal *= htotal;
724

725 726 727 728 729 730 731 732 733 734 735 736
		/*
		 * In interlaced modes, the pixel counter counts all pixels,
		 * so one field will have htotal more pixels. In order to avoid
		 * the reported position from jumping backwards when the pixel
		 * counter is beyond the length of the shorter field, just
		 * clamp the position the length of the shorter field. This
		 * matches how the scanline counter based position works since
		 * the scanline counter doesn't count the two half lines.
		 */
		if (position >= vtotal)
			position = vtotal - 1;

737 738 739 740 741 742 743 744 745 746
		/*
		 * Start of vblank interrupt is triggered at start of hsync,
		 * just prior to the first active line of vblank. However we
		 * consider lines to start at the leading edge of horizontal
		 * active. So, should we get here before we've crossed into
		 * the horizontal active of the first line in vblank, we would
		 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
		 * always add htotal-hsync_start to the current pixel position.
		 */
		position = (position + htotal - hsync_start) % vtotal;
747 748
	}

749 750 751 752 753 754 755 756
	/* Get optional system timestamp after query. */
	if (etime)
		*etime = ktime_get();

	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */

	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);

757 758 759 760 761 762 763 764 765 766 767 768
	in_vbl = position >= vbl_start && position < vbl_end;

	/*
	 * While in vblank, position will be negative
	 * counting up towards 0 at vbl_end. And outside
	 * vblank, position will be positive counting
	 * up since vbl_end.
	 */
	if (position >= vbl_start)
		position -= vbl_end;
	else
		position += vtotal - vbl_end;
769

770
	if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
771 772 773 774 775 776
		*vpos = position;
		*hpos = 0;
	} else {
		*vpos = position / htotal;
		*hpos = position - (*vpos * htotal);
	}
777 778 779

	/* In vblank? */
	if (in_vbl)
780
		ret |= DRM_SCANOUTPOS_IN_VBLANK;
781 782 783 784

	return ret;
}

785 786 787 788 789 790 791 792 793 794 795 796 797
int intel_get_crtc_scanline(struct intel_crtc *crtc)
{
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
	unsigned long irqflags;
	int position;

	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
	position = __intel_get_crtc_scanline(crtc);
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);

	return position;
}

798
static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
799 800 801 802
			      int *max_error,
			      struct timeval *vblank_time,
			      unsigned flags)
{
803
	struct drm_crtc *crtc;
804

805
	if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
806
		DRM_ERROR("Invalid crtc %d\n", pipe);
807 808 809 810
		return -EINVAL;
	}

	/* Get drm_crtc to timestamp: */
811 812 813 814 815 816 817 818 819 820
	crtc = intel_get_crtc_for_pipe(dev, pipe);
	if (crtc == NULL) {
		DRM_ERROR("Invalid crtc %d\n", pipe);
		return -EINVAL;
	}

	if (!crtc->enabled) {
		DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
		return -EBUSY;
	}
821 822

	/* Helper routine in DRM core does all the work: */
823 824
	return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
						     vblank_time, flags,
825 826
						     crtc,
						     &to_intel_crtc(crtc)->config.adjusted_mode);
827 828
}

829 830
static bool intel_hpd_irq_event(struct drm_device *dev,
				struct drm_connector *connector)
831 832 833 834 835 836 837
{
	enum drm_connector_status old_status;

	WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
	old_status = connector->status;

	connector->status = connector->funcs->detect(connector, false);
838 839 840 841
	if (old_status == connector->status)
		return false;

	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
842
		      connector->base.id,
843
		      connector->name,
844 845 846 847
		      drm_get_connector_status_name(old_status),
		      drm_get_connector_status_name(connector->status));

	return true;
848 849
}

850 851 852 853 854 855 856 857 858
static void i915_digport_work_func(struct work_struct *work)
{
	struct drm_i915_private *dev_priv =
		container_of(work, struct drm_i915_private, dig_port_work);
	u32 long_port_mask, short_port_mask;
	struct intel_digital_port *intel_dig_port;
	int i, ret;
	u32 old_bits = 0;

859
	spin_lock_irq(&dev_priv->irq_lock);
860 861 862 863
	long_port_mask = dev_priv->long_hpd_port_mask;
	dev_priv->long_hpd_port_mask = 0;
	short_port_mask = dev_priv->short_hpd_port_mask;
	dev_priv->short_hpd_port_mask = 0;
864
	spin_unlock_irq(&dev_priv->irq_lock);
865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888

	for (i = 0; i < I915_MAX_PORTS; i++) {
		bool valid = false;
		bool long_hpd = false;
		intel_dig_port = dev_priv->hpd_irq_port[i];
		if (!intel_dig_port || !intel_dig_port->hpd_pulse)
			continue;

		if (long_port_mask & (1 << i))  {
			valid = true;
			long_hpd = true;
		} else if (short_port_mask & (1 << i))
			valid = true;

		if (valid) {
			ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd);
			if (ret == true) {
				/* if we get true fallback to old school hpd */
				old_bits |= (1 << intel_dig_port->base.hpd_pin);
			}
		}
	}

	if (old_bits) {
889
		spin_lock_irq(&dev_priv->irq_lock);
890
		dev_priv->hpd_event_bits |= old_bits;
891
		spin_unlock_irq(&dev_priv->irq_lock);
892 893 894 895
		schedule_work(&dev_priv->hotplug_work);
	}
}

896 897 898
/*
 * Handle hotplug events outside the interrupt handler proper.
 */
899 900
#define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)

901 902
static void i915_hotplug_work_func(struct work_struct *work)
{
903 904
	struct drm_i915_private *dev_priv =
		container_of(work, struct drm_i915_private, hotplug_work);
905
	struct drm_device *dev = dev_priv->dev;
906
	struct drm_mode_config *mode_config = &dev->mode_config;
907 908 909 910
	struct intel_connector *intel_connector;
	struct intel_encoder *intel_encoder;
	struct drm_connector *connector;
	bool hpd_disabled = false;
911
	bool changed = false;
912
	u32 hpd_event_bits;
913

914
	mutex_lock(&mode_config->mutex);
915 916
	DRM_DEBUG_KMS("running encoder hotplug functions\n");

917
	spin_lock_irq(&dev_priv->irq_lock);
918 919 920

	hpd_event_bits = dev_priv->hpd_event_bits;
	dev_priv->hpd_event_bits = 0;
921 922
	list_for_each_entry(connector, &mode_config->connector_list, head) {
		intel_connector = to_intel_connector(connector);
923 924
		if (!intel_connector->encoder)
			continue;
925 926 927 928 929 930
		intel_encoder = intel_connector->encoder;
		if (intel_encoder->hpd_pin > HPD_NONE &&
		    dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
		    connector->polled == DRM_CONNECTOR_POLL_HPD) {
			DRM_INFO("HPD interrupt storm detected on connector %s: "
				 "switching from hotplug detection to polling\n",
931
				connector->name);
932 933 934 935 936
			dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
			connector->polled = DRM_CONNECTOR_POLL_CONNECT
				| DRM_CONNECTOR_POLL_DISCONNECT;
			hpd_disabled = true;
		}
937 938
		if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
			DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
939
				      connector->name, intel_encoder->hpd_pin);
940
		}
941 942 943 944
	}
	 /* if there were no outputs to poll, poll was disabled,
	  * therefore make sure it's enabled when disabling HPD on
	  * some connectors */
945
	if (hpd_disabled) {
946
		drm_kms_helper_poll_enable(dev);
947 948
		mod_delayed_work(system_wq, &dev_priv->hotplug_reenable_work,
				 msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
949
	}
950

951
	spin_unlock_irq(&dev_priv->irq_lock);
952

953 954
	list_for_each_entry(connector, &mode_config->connector_list, head) {
		intel_connector = to_intel_connector(connector);
955 956
		if (!intel_connector->encoder)
			continue;
957 958 959 960 961 962 963 964
		intel_encoder = intel_connector->encoder;
		if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
			if (intel_encoder->hot_plug)
				intel_encoder->hot_plug(intel_encoder);
			if (intel_hpd_irq_event(dev, connector))
				changed = true;
		}
	}
965 966
	mutex_unlock(&mode_config->mutex);

967 968
	if (changed)
		drm_kms_helper_hotplug_event(dev);
969 970
}

971
static void ironlake_rps_change_irq_handler(struct drm_device *dev)
972
{
973
	struct drm_i915_private *dev_priv = dev->dev_private;
974
	u32 busy_up, busy_down, max_avg, min_avg;
975 976
	u8 new_delay;

977
	spin_lock(&mchdev_lock);
978

979 980
	I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));

981
	new_delay = dev_priv->ips.cur_delay;
982

983
	I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
984 985
	busy_up = I915_READ(RCPREVBSYTUPAVG);
	busy_down = I915_READ(RCPREVBSYTDNAVG);
986 987 988 989
	max_avg = I915_READ(RCBMAXAVG);
	min_avg = I915_READ(RCBMINAVG);

	/* Handle RCS change request from hw */
990
	if (busy_up > max_avg) {
991 992 993 994
		if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
			new_delay = dev_priv->ips.cur_delay - 1;
		if (new_delay < dev_priv->ips.max_delay)
			new_delay = dev_priv->ips.max_delay;
995
	} else if (busy_down < min_avg) {
996 997 998 999
		if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
			new_delay = dev_priv->ips.cur_delay + 1;
		if (new_delay > dev_priv->ips.min_delay)
			new_delay = dev_priv->ips.min_delay;
1000 1001
	}

1002
	if (ironlake_set_drps(dev, new_delay))
1003
		dev_priv->ips.cur_delay = new_delay;
1004

1005
	spin_unlock(&mchdev_lock);
1006

1007 1008 1009
	return;
}

1010
static void notify_ring(struct drm_device *dev,
1011
			struct intel_engine_cs *ring)
1012
{
1013
	if (!intel_ring_initialized(ring))
1014 1015
		return;

1016
	trace_i915_gem_request_complete(ring);
1017

1018 1019 1020
	wake_up_all(&ring->irq_queue);
}

1021
static u32 vlv_c0_residency(struct drm_i915_private *dev_priv,
1022
			    struct intel_rps_ei *rps_ei)
1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034
{
	u32 cz_ts, cz_freq_khz;
	u32 render_count, media_count;
	u32 elapsed_render, elapsed_media, elapsed_time;
	u32 residency = 0;

	cz_ts = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
	cz_freq_khz = DIV_ROUND_CLOSEST(dev_priv->mem_freq * 1000, 4);

	render_count = I915_READ(VLV_RENDER_C0_COUNT_REG);
	media_count = I915_READ(VLV_MEDIA_C0_COUNT_REG);

1035 1036 1037 1038
	if (rps_ei->cz_clock == 0) {
		rps_ei->cz_clock = cz_ts;
		rps_ei->render_c0 = render_count;
		rps_ei->media_c0 = media_count;
1039 1040 1041 1042

		return dev_priv->rps.cur_freq;
	}

1043 1044
	elapsed_time = cz_ts - rps_ei->cz_clock;
	rps_ei->cz_clock = cz_ts;
1045

1046 1047
	elapsed_render = render_count - rps_ei->render_c0;
	rps_ei->render_c0 = render_count;
1048

1049 1050
	elapsed_media = media_count - rps_ei->media_c0;
	rps_ei->media_c0 = media_count;
1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075

	/* Convert all the counters into common unit of milli sec */
	elapsed_time /= VLV_CZ_CLOCK_TO_MILLI_SEC;
	elapsed_render /=  cz_freq_khz;
	elapsed_media /= cz_freq_khz;

	/*
	 * Calculate overall C0 residency percentage
	 * only if elapsed time is non zero
	 */
	if (elapsed_time) {
		residency =
			((max(elapsed_render, elapsed_media) * 100)
				/ elapsed_time);
	}

	return residency;
}

/**
 * vlv_calc_delay_from_C0_counters - Increase/Decrease freq based on GPU
 * busy-ness calculated from C0 counters of render & media power wells
 * @dev_priv: DRM device private
 *
 */
1076
static int vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv)
1077 1078
{
	u32 residency_C0_up = 0, residency_C0_down = 0;
1079
	int new_delay, adj;
1080 1081 1082 1083 1084 1085

	dev_priv->rps.ei_interrupt_count++;

	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));


1086 1087 1088
	if (dev_priv->rps.up_ei.cz_clock == 0) {
		vlv_c0_residency(dev_priv, &dev_priv->rps.up_ei);
		vlv_c0_residency(dev_priv, &dev_priv->rps.down_ei);
1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102
		return dev_priv->rps.cur_freq;
	}


	/*
	 * To down throttle, C0 residency should be less than down threshold
	 * for continous EI intervals. So calculate down EI counters
	 * once in VLV_INT_COUNT_FOR_DOWN_EI
	 */
	if (dev_priv->rps.ei_interrupt_count == VLV_INT_COUNT_FOR_DOWN_EI) {

		dev_priv->rps.ei_interrupt_count = 0;

		residency_C0_down = vlv_c0_residency(dev_priv,
1103
						     &dev_priv->rps.down_ei);
1104 1105
	} else {
		residency_C0_up = vlv_c0_residency(dev_priv,
1106
						   &dev_priv->rps.up_ei);
1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145
	}

	new_delay = dev_priv->rps.cur_freq;

	adj = dev_priv->rps.last_adj;
	/* C0 residency is greater than UP threshold. Increase Frequency */
	if (residency_C0_up >= VLV_RP_UP_EI_THRESHOLD) {
		if (adj > 0)
			adj *= 2;
		else
			adj = 1;

		if (dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit)
			new_delay = dev_priv->rps.cur_freq + adj;

		/*
		 * For better performance, jump directly
		 * to RPe if we're below it.
		 */
		if (new_delay < dev_priv->rps.efficient_freq)
			new_delay = dev_priv->rps.efficient_freq;

	} else if (!dev_priv->rps.ei_interrupt_count &&
			(residency_C0_down < VLV_RP_DOWN_EI_THRESHOLD)) {
		if (adj < 0)
			adj *= 2;
		else
			adj = -1;
		/*
		 * This means, C0 residency is less than down threshold over
		 * a period of VLV_INT_COUNT_FOR_DOWN_EI. So, reduce the freq
		 */
		if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit)
			new_delay = dev_priv->rps.cur_freq + adj;
	}

	return new_delay;
}

1146
static void gen6_pm_rps_work(struct work_struct *work)
1147
{
1148 1149
	struct drm_i915_private *dev_priv =
		container_of(work, struct drm_i915_private, rps.work);
P
Paulo Zanoni 已提交
1150
	u32 pm_iir;
1151
	int new_delay, adj;
1152

1153
	spin_lock_irq(&dev_priv->irq_lock);
I
Imre Deak 已提交
1154 1155 1156 1157 1158
	/* Speed up work cancelation during disabling rps interrupts. */
	if (!dev_priv->rps.interrupts_enabled) {
		spin_unlock_irq(&dev_priv->irq_lock);
		return;
	}
1159 1160
	pm_iir = dev_priv->rps.pm_iir;
	dev_priv->rps.pm_iir = 0;
1161 1162
	/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
	gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1163
	spin_unlock_irq(&dev_priv->irq_lock);
1164

1165
	/* Make sure we didn't queue anything we're not going to process. */
1166
	WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1167

1168
	if ((pm_iir & dev_priv->pm_rps_events) == 0)
1169 1170
		return;

1171
	mutex_lock(&dev_priv->rps.hw_lock);
1172

1173
	adj = dev_priv->rps.last_adj;
1174
	if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1175 1176
		if (adj > 0)
			adj *= 2;
1177 1178 1179 1180
		else {
			/* CHV needs even encode values */
			adj = IS_CHERRYVIEW(dev_priv->dev) ? 2 : 1;
		}
1181
		new_delay = dev_priv->rps.cur_freq + adj;
1182 1183 1184 1185 1186

		/*
		 * For better performance, jump directly
		 * to RPe if we're below it.
		 */
1187 1188
		if (new_delay < dev_priv->rps.efficient_freq)
			new_delay = dev_priv->rps.efficient_freq;
1189
	} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1190 1191
		if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
			new_delay = dev_priv->rps.efficient_freq;
1192
		else
1193
			new_delay = dev_priv->rps.min_freq_softlimit;
1194
		adj = 0;
1195 1196
	} else if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
		new_delay = vlv_calc_delay_from_C0_counters(dev_priv);
1197 1198 1199
	} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
		if (adj < 0)
			adj *= 2;
1200 1201 1202 1203
		else {
			/* CHV needs even encode values */
			adj = IS_CHERRYVIEW(dev_priv->dev) ? -2 : -1;
		}
1204
		new_delay = dev_priv->rps.cur_freq + adj;
1205
	} else { /* unknown event */
1206
		new_delay = dev_priv->rps.cur_freq;
1207
	}
1208

1209 1210 1211
	/* sysfs frequency interfaces may have snuck in while servicing the
	 * interrupt
	 */
1212
	new_delay = clamp_t(int, new_delay,
1213 1214
			    dev_priv->rps.min_freq_softlimit,
			    dev_priv->rps.max_freq_softlimit);
1215

1216
	dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq;
1217 1218 1219 1220 1221

	if (IS_VALLEYVIEW(dev_priv->dev))
		valleyview_set_rps(dev_priv->dev, new_delay);
	else
		gen6_set_rps(dev_priv->dev, new_delay);
1222

1223
	mutex_unlock(&dev_priv->rps.hw_lock);
1224 1225
}

1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237

/**
 * ivybridge_parity_work - Workqueue called when a parity error interrupt
 * occurred.
 * @work: workqueue struct
 *
 * Doesn't actually do anything except notify userspace. As a consequence of
 * this event, userspace should try to remap the bad rows since statistically
 * it is likely the same row is more likely to go bad again.
 */
static void ivybridge_parity_work(struct work_struct *work)
{
1238 1239
	struct drm_i915_private *dev_priv =
		container_of(work, struct drm_i915_private, l3_parity.error_work);
1240
	u32 error_status, row, bank, subbank;
1241
	char *parity_event[6];
1242
	uint32_t misccpctl;
1243
	uint8_t slice = 0;
1244 1245 1246 1247 1248 1249 1250

	/* We must turn off DOP level clock gating to access the L3 registers.
	 * In order to prevent a get/put style interface, acquire struct mutex
	 * any time we access those registers.
	 */
	mutex_lock(&dev_priv->dev->struct_mutex);

1251 1252 1253 1254
	/* If we've screwed up tracking, just let the interrupt fire again */
	if (WARN_ON(!dev_priv->l3_parity.which_slice))
		goto out;

1255 1256 1257 1258
	misccpctl = I915_READ(GEN7_MISCCPCTL);
	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
	POSTING_READ(GEN7_MISCCPCTL);

1259 1260
	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
		u32 reg;
1261

1262 1263 1264
		slice--;
		if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
			break;
1265

1266
		dev_priv->l3_parity.which_slice &= ~(1<<slice);
1267

1268
		reg = GEN7_L3CDERRST1 + (slice * 0x200);
1269

1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284
		error_status = I915_READ(reg);
		row = GEN7_PARITY_ERROR_ROW(error_status);
		bank = GEN7_PARITY_ERROR_BANK(error_status);
		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);

		I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
		POSTING_READ(reg);

		parity_event[0] = I915_L3_PARITY_UEVENT "=1";
		parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
		parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
		parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
		parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
		parity_event[5] = NULL;

1285
		kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
1286
				   KOBJ_CHANGE, parity_event);
1287

1288 1289
		DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
			  slice, row, bank, subbank);
1290

1291 1292 1293 1294 1295
		kfree(parity_event[4]);
		kfree(parity_event[3]);
		kfree(parity_event[2]);
		kfree(parity_event[1]);
	}
1296

1297
	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1298

1299 1300
out:
	WARN_ON(dev_priv->l3_parity.which_slice);
1301
	spin_lock_irq(&dev_priv->irq_lock);
1302
	gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
1303
	spin_unlock_irq(&dev_priv->irq_lock);
1304 1305

	mutex_unlock(&dev_priv->dev->struct_mutex);
1306 1307
}

1308
static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
1309
{
1310
	struct drm_i915_private *dev_priv = dev->dev_private;
1311

1312
	if (!HAS_L3_DPF(dev))
1313 1314
		return;

1315
	spin_lock(&dev_priv->irq_lock);
1316
	gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
1317
	spin_unlock(&dev_priv->irq_lock);
1318

1319 1320 1321 1322 1323 1324 1325
	iir &= GT_PARITY_ERROR(dev);
	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
		dev_priv->l3_parity.which_slice |= 1 << 1;

	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
		dev_priv->l3_parity.which_slice |= 1 << 0;

1326
	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1327 1328
}

1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339
static void ilk_gt_irq_handler(struct drm_device *dev,
			       struct drm_i915_private *dev_priv,
			       u32 gt_iir)
{
	if (gt_iir &
	    (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
		notify_ring(dev, &dev_priv->ring[RCS]);
	if (gt_iir & ILK_BSD_USER_INTERRUPT)
		notify_ring(dev, &dev_priv->ring[VCS]);
}

1340 1341 1342 1343 1344
static void snb_gt_irq_handler(struct drm_device *dev,
			       struct drm_i915_private *dev_priv,
			       u32 gt_iir)
{

1345 1346
	if (gt_iir &
	    (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1347
		notify_ring(dev, &dev_priv->ring[RCS]);
1348
	if (gt_iir & GT_BSD_USER_INTERRUPT)
1349
		notify_ring(dev, &dev_priv->ring[VCS]);
1350
	if (gt_iir & GT_BLT_USER_INTERRUPT)
1351 1352
		notify_ring(dev, &dev_priv->ring[BCS]);

1353 1354
	if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
		      GT_BSD_CS_ERROR_INTERRUPT |
1355 1356
		      GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
		DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
1357

1358 1359
	if (gt_iir & GT_PARITY_ERROR(dev))
		ivybridge_parity_error_irq_handler(dev, gt_iir);
1360 1361
}

1362 1363 1364 1365
static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
				       struct drm_i915_private *dev_priv,
				       u32 master_ctl)
{
1366
	struct intel_engine_cs *ring;
1367 1368 1369 1370 1371 1372 1373
	u32 rcs, bcs, vcs;
	uint32_t tmp = 0;
	irqreturn_t ret = IRQ_NONE;

	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
		tmp = I915_READ(GEN8_GT_IIR(0));
		if (tmp) {
1374
			I915_WRITE(GEN8_GT_IIR(0), tmp);
1375
			ret = IRQ_HANDLED;
1376

1377
			rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
1378
			ring = &dev_priv->ring[RCS];
1379
			if (rcs & GT_RENDER_USER_INTERRUPT)
1380 1381 1382 1383 1384 1385
				notify_ring(dev, ring);
			if (rcs & GT_CONTEXT_SWITCH_INTERRUPT)
				intel_execlists_handle_ctx_events(ring);

			bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
			ring = &dev_priv->ring[BCS];
1386
			if (bcs & GT_RENDER_USER_INTERRUPT)
1387 1388 1389
				notify_ring(dev, ring);
			if (bcs & GT_CONTEXT_SWITCH_INTERRUPT)
				intel_execlists_handle_ctx_events(ring);
1390 1391 1392 1393
		} else
			DRM_ERROR("The master control interrupt lied (GT0)!\n");
	}

1394
	if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1395 1396
		tmp = I915_READ(GEN8_GT_IIR(1));
		if (tmp) {
1397
			I915_WRITE(GEN8_GT_IIR(1), tmp);
1398
			ret = IRQ_HANDLED;
1399

1400
			vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
1401
			ring = &dev_priv->ring[VCS];
1402
			if (vcs & GT_RENDER_USER_INTERRUPT)
1403
				notify_ring(dev, ring);
1404
			if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1405 1406
				intel_execlists_handle_ctx_events(ring);

1407
			vcs = tmp >> GEN8_VCS2_IRQ_SHIFT;
1408
			ring = &dev_priv->ring[VCS2];
1409
			if (vcs & GT_RENDER_USER_INTERRUPT)
1410
				notify_ring(dev, ring);
1411
			if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1412
				intel_execlists_handle_ctx_events(ring);
1413 1414 1415 1416
		} else
			DRM_ERROR("The master control interrupt lied (GT1)!\n");
	}

1417 1418 1419 1420 1421
	if (master_ctl & GEN8_GT_PM_IRQ) {
		tmp = I915_READ(GEN8_GT_IIR(2));
		if (tmp & dev_priv->pm_rps_events) {
			I915_WRITE(GEN8_GT_IIR(2),
				   tmp & dev_priv->pm_rps_events);
1422
			ret = IRQ_HANDLED;
1423
			gen6_rps_irq_handler(dev_priv, tmp);
1424 1425 1426 1427
		} else
			DRM_ERROR("The master control interrupt lied (PM)!\n");
	}

1428 1429 1430
	if (master_ctl & GEN8_GT_VECS_IRQ) {
		tmp = I915_READ(GEN8_GT_IIR(3));
		if (tmp) {
1431
			I915_WRITE(GEN8_GT_IIR(3), tmp);
1432
			ret = IRQ_HANDLED;
1433

1434
			vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
1435
			ring = &dev_priv->ring[VECS];
1436
			if (vcs & GT_RENDER_USER_INTERRUPT)
1437
				notify_ring(dev, ring);
1438
			if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1439
				intel_execlists_handle_ctx_events(ring);
1440 1441 1442 1443 1444 1445 1446
		} else
			DRM_ERROR("The master control interrupt lied (GT3)!\n");
	}

	return ret;
}

1447 1448 1449
#define HPD_STORM_DETECT_PERIOD 1000
#define HPD_STORM_THRESHOLD 5

1450
static int pch_port_to_hotplug_shift(enum port port)
1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465
{
	switch (port) {
	case PORT_A:
	case PORT_E:
	default:
		return -1;
	case PORT_B:
		return 0;
	case PORT_C:
		return 8;
	case PORT_D:
		return 16;
	}
}

1466
static int i915_port_to_hotplug_shift(enum port port)
1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495
{
	switch (port) {
	case PORT_A:
	case PORT_E:
	default:
		return -1;
	case PORT_B:
		return 17;
	case PORT_C:
		return 19;
	case PORT_D:
		return 21;
	}
}

static inline enum port get_port_from_pin(enum hpd_pin pin)
{
	switch (pin) {
	case HPD_PORT_B:
		return PORT_B;
	case HPD_PORT_C:
		return PORT_C;
	case HPD_PORT_D:
		return PORT_D;
	default:
		return PORT_A; /* no hpd */
	}
}

1496
static inline void intel_hpd_irq_handler(struct drm_device *dev,
1497
					 u32 hotplug_trigger,
1498
					 u32 dig_hotplug_reg,
1499
					 const u32 *hpd)
1500
{
1501
	struct drm_i915_private *dev_priv = dev->dev_private;
1502
	int i;
1503
	enum port port;
1504
	bool storm_detected = false;
1505 1506 1507
	bool queue_dig = false, queue_hp = false;
	u32 dig_shift;
	u32 dig_port_mask = 0;
1508

1509 1510 1511
	if (!hotplug_trigger)
		return;

1512 1513
	DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n",
			 hotplug_trigger, dig_hotplug_reg);
1514

1515
	spin_lock(&dev_priv->irq_lock);
1516
	for (i = 1; i < HPD_NUM_PINS; i++) {
1517 1518 1519 1520 1521 1522 1523
		if (!(hpd[i] & hotplug_trigger))
			continue;

		port = get_port_from_pin(i);
		if (port && dev_priv->hpd_irq_port[port]) {
			bool long_hpd;

1524 1525
			if (HAS_PCH_SPLIT(dev)) {
				dig_shift = pch_port_to_hotplug_shift(port);
1526
				long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
1527 1528 1529
			} else {
				dig_shift = i915_port_to_hotplug_shift(port);
				long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
1530 1531
			}

1532 1533 1534
			DRM_DEBUG_DRIVER("digital hpd port %c - %s\n",
					 port_name(port),
					 long_hpd ? "long" : "short");
1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547
			/* for long HPD pulses we want to have the digital queue happen,
			   but we still want HPD storm detection to function. */
			if (long_hpd) {
				dev_priv->long_hpd_port_mask |= (1 << port);
				dig_port_mask |= hpd[i];
			} else {
				/* for short HPD just trigger the digital queue */
				dev_priv->short_hpd_port_mask |= (1 << port);
				hotplug_trigger &= ~hpd[i];
			}
			queue_dig = true;
		}
	}
1548

1549
	for (i = 1; i < HPD_NUM_PINS; i++) {
1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563
		if (hpd[i] & hotplug_trigger &&
		    dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) {
			/*
			 * On GMCH platforms the interrupt mask bits only
			 * prevent irq generation, not the setting of the
			 * hotplug bits itself. So only WARN about unexpected
			 * interrupts on saner platforms.
			 */
			WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev),
				  "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
				  hotplug_trigger, i, hpd[i]);

			continue;
		}
1564

1565 1566 1567 1568
		if (!(hpd[i] & hotplug_trigger) ||
		    dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
			continue;

1569 1570 1571 1572 1573
		if (!(dig_port_mask & hpd[i])) {
			dev_priv->hpd_event_bits |= (1 << i);
			queue_hp = true;
		}

1574 1575 1576 1577 1578
		if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
				   dev_priv->hpd_stats[i].hpd_last_jiffies
				   + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
			dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
			dev_priv->hpd_stats[i].hpd_cnt = 0;
1579
			DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
1580 1581
		} else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
			dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
1582
			dev_priv->hpd_event_bits &= ~(1 << i);
1583
			DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
1584
			storm_detected = true;
1585 1586
		} else {
			dev_priv->hpd_stats[i].hpd_cnt++;
1587 1588
			DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i,
				      dev_priv->hpd_stats[i].hpd_cnt);
1589 1590 1591
		}
	}

1592 1593
	if (storm_detected)
		dev_priv->display.hpd_irq_setup(dev);
1594
	spin_unlock(&dev_priv->irq_lock);
1595

1596 1597 1598 1599 1600 1601
	/*
	 * Our hotplug handler can grab modeset locks (by calling down into the
	 * fb helpers). Hence it must not be run on our own dev-priv->wq work
	 * queue for otherwise the flush_work in the pageflip code will
	 * deadlock.
	 */
1602
	if (queue_dig)
1603
		queue_work(dev_priv->dp_wq, &dev_priv->dig_port_work);
1604 1605
	if (queue_hp)
		schedule_work(&dev_priv->hotplug_work);
1606 1607
}

1608 1609
static void gmbus_irq_handler(struct drm_device *dev)
{
1610
	struct drm_i915_private *dev_priv = dev->dev_private;
1611 1612

	wake_up_all(&dev_priv->gmbus_wait_queue);
1613 1614
}

1615 1616
static void dp_aux_irq_handler(struct drm_device *dev)
{
1617
	struct drm_i915_private *dev_priv = dev->dev_private;
1618 1619

	wake_up_all(&dev_priv->gmbus_wait_queue);
1620 1621
}

1622
#if defined(CONFIG_DEBUG_FS)
1623 1624 1625 1626
static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
					 uint32_t crc0, uint32_t crc1,
					 uint32_t crc2, uint32_t crc3,
					 uint32_t crc4)
1627 1628 1629 1630
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
	struct intel_pipe_crc_entry *entry;
1631
	int head, tail;
1632

1633 1634
	spin_lock(&pipe_crc->lock);

1635
	if (!pipe_crc->entries) {
1636
		spin_unlock(&pipe_crc->lock);
1637
		DRM_DEBUG_KMS("spurious interrupt\n");
1638 1639 1640
		return;
	}

1641 1642
	head = pipe_crc->head;
	tail = pipe_crc->tail;
1643 1644

	if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1645
		spin_unlock(&pipe_crc->lock);
1646 1647 1648 1649 1650
		DRM_ERROR("CRC buffer overflowing\n");
		return;
	}

	entry = &pipe_crc->entries[head];
1651

1652
	entry->frame = dev->driver->get_vblank_counter(dev, pipe);
1653 1654 1655 1656 1657
	entry->crc[0] = crc0;
	entry->crc[1] = crc1;
	entry->crc[2] = crc2;
	entry->crc[3] = crc3;
	entry->crc[4] = crc4;
1658 1659

	head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1660 1661 1662
	pipe_crc->head = head;

	spin_unlock(&pipe_crc->lock);
1663 1664

	wake_up_interruptible(&pipe_crc->wq);
1665
}
1666 1667 1668 1669 1670 1671 1672 1673
#else
static inline void
display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
			     uint32_t crc0, uint32_t crc1,
			     uint32_t crc2, uint32_t crc3,
			     uint32_t crc4) {}
#endif

1674

1675
static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
D
Daniel Vetter 已提交
1676 1677 1678
{
	struct drm_i915_private *dev_priv = dev->dev_private;

1679 1680 1681
	display_pipe_crc_irq_handler(dev, pipe,
				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
				     0, 0, 0, 0);
D
Daniel Vetter 已提交
1682 1683
}

1684
static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1685 1686 1687
{
	struct drm_i915_private *dev_priv = dev->dev_private;

1688 1689 1690 1691 1692 1693
	display_pipe_crc_irq_handler(dev, pipe,
				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
				     I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
				     I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
				     I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
				     I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1694
}
1695

1696
static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1697 1698
{
	struct drm_i915_private *dev_priv = dev->dev_private;
1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709
	uint32_t res1, res2;

	if (INTEL_INFO(dev)->gen >= 3)
		res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
	else
		res1 = 0;

	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
		res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
	else
		res2 = 0;
1710

1711 1712 1713 1714 1715
	display_pipe_crc_irq_handler(dev, pipe,
				     I915_READ(PIPE_CRC_RES_RED(pipe)),
				     I915_READ(PIPE_CRC_RES_GREEN(pipe)),
				     I915_READ(PIPE_CRC_RES_BLUE(pipe)),
				     res1, res2);
1716
}
1717

1718 1719 1720 1721
/* The RPS events need forcewake, so we add them to a work queue and mask their
 * IMR bits until the work is done. Other interrupts can be processed without
 * the work queue. */
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1722
{
1723 1724 1725
	/* TODO: RPS on GEN9+ is not supported yet. */
	if (WARN_ONCE(INTEL_INFO(dev_priv)->gen >= 9,
		      "GEN9+: unexpected RPS IRQ\n"))
1726 1727
		return;

1728
	if (pm_iir & dev_priv->pm_rps_events) {
1729
		spin_lock(&dev_priv->irq_lock);
1730
		gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
I
Imre Deak 已提交
1731 1732 1733 1734
		if (dev_priv->rps.interrupts_enabled) {
			dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
			queue_work(dev_priv->wq, &dev_priv->rps.work);
		}
1735
		spin_unlock(&dev_priv->irq_lock);
1736 1737
	}

1738 1739 1740
	if (INTEL_INFO(dev_priv)->gen >= 8)
		return;

1741 1742 1743
	if (HAS_VEBOX(dev_priv->dev)) {
		if (pm_iir & PM_VEBOX_USER_INTERRUPT)
			notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
B
Ben Widawsky 已提交
1744

1745 1746
		if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
			DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
B
Ben Widawsky 已提交
1747
	}
1748 1749
}

1750 1751 1752 1753 1754 1755 1756 1757
static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
{
	if (!drm_handle_vblank(dev, pipe))
		return false;

	return true;
}

1758 1759 1760
static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
1761
	u32 pipe_stats[I915_MAX_PIPES] = { };
1762 1763
	int pipe;

1764
	spin_lock(&dev_priv->irq_lock);
1765
	for_each_pipe(dev_priv, pipe) {
1766
		int reg;
1767
		u32 mask, iir_bit = 0;
1768

1769 1770 1771 1772 1773 1774 1775
		/*
		 * PIPESTAT bits get signalled even when the interrupt is
		 * disabled with the mask bits, and some of the status bits do
		 * not generate interrupts at all (like the underrun bit). Hence
		 * we need to be careful that we only handle what we want to
		 * handle.
		 */
1776 1777 1778

		/* fifo underruns are filterered in the underrun handler. */
		mask = PIPE_FIFO_UNDERRUN_STATUS;
1779 1780 1781 1782 1783 1784 1785 1786

		switch (pipe) {
		case PIPE_A:
			iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
			break;
		case PIPE_B:
			iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
			break;
1787 1788 1789
		case PIPE_C:
			iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
			break;
1790 1791 1792 1793 1794
		}
		if (iir & iir_bit)
			mask |= dev_priv->pipestat_irq_mask[pipe];

		if (!mask)
1795 1796 1797
			continue;

		reg = PIPESTAT(pipe);
1798 1799
		mask |= PIPESTAT_INT_ENABLE_MASK;
		pipe_stats[pipe] = I915_READ(reg) & mask;
1800 1801 1802 1803

		/*
		 * Clear the PIPE*STAT regs before the IIR
		 */
1804 1805
		if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
					PIPESTAT_INT_STATUS_MASK))
1806 1807
			I915_WRITE(reg, pipe_stats[pipe]);
	}
1808
	spin_unlock(&dev_priv->irq_lock);
1809

1810
	for_each_pipe(dev_priv, pipe) {
1811 1812 1813
		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
		    intel_pipe_handle_vblank(dev, pipe))
			intel_check_page_flip(dev, pipe);
1814

1815
		if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
1816 1817 1818 1819 1820 1821 1822
			intel_prepare_page_flip(dev, pipe);
			intel_finish_page_flip(dev, pipe);
		}

		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
			i9xx_pipe_crc_irq_handler(dev, pipe);

1823 1824
		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1825 1826 1827 1828 1829 1830
	}

	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
		gmbus_irq_handler(dev);
}

1831 1832 1833 1834 1835
static void i9xx_hpd_irq_handler(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);

1836 1837 1838 1839 1840 1841 1842
	if (hotplug_status) {
		I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
		/*
		 * Make sure hotplug status is cleared before we clear IIR, or else we
		 * may miss hotplug events.
		 */
		POSTING_READ(PORT_HOTPLUG_STAT);
1843

1844 1845
		if (IS_G4X(dev)) {
			u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1846

1847
			intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x);
1848 1849
		} else {
			u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1850

1851
			intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_i915);
1852
		}
1853

1854 1855 1856 1857
		if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) &&
		    hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
			dp_aux_irq_handler(dev);
	}
1858 1859
}

1860
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
J
Jesse Barnes 已提交
1861
{
1862
	struct drm_device *dev = arg;
1863
	struct drm_i915_private *dev_priv = dev->dev_private;
J
Jesse Barnes 已提交
1864 1865 1866 1867
	u32 iir, gt_iir, pm_iir;
	irqreturn_t ret = IRQ_NONE;

	while (true) {
1868 1869
		/* Find, clear, then process each source of interrupt */

J
Jesse Barnes 已提交
1870
		gt_iir = I915_READ(GTIIR);
1871 1872 1873
		if (gt_iir)
			I915_WRITE(GTIIR, gt_iir);

J
Jesse Barnes 已提交
1874
		pm_iir = I915_READ(GEN6_PMIIR);
1875 1876 1877 1878 1879 1880 1881 1882 1883 1884
		if (pm_iir)
			I915_WRITE(GEN6_PMIIR, pm_iir);

		iir = I915_READ(VLV_IIR);
		if (iir) {
			/* Consume port before clearing IIR or we'll miss events */
			if (iir & I915_DISPLAY_PORT_INTERRUPT)
				i9xx_hpd_irq_handler(dev);
			I915_WRITE(VLV_IIR, iir);
		}
J
Jesse Barnes 已提交
1885 1886 1887 1888 1889 1890

		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
			goto out;

		ret = IRQ_HANDLED;

1891 1892
		if (gt_iir)
			snb_gt_irq_handler(dev, dev_priv, gt_iir);
1893
		if (pm_iir)
1894
			gen6_rps_irq_handler(dev_priv, pm_iir);
1895 1896 1897
		/* Call regardless, as some status bits might not be
		 * signalled in iir */
		valleyview_pipestat_irq_handler(dev, iir);
J
Jesse Barnes 已提交
1898 1899 1900 1901 1902 1903
	}

out:
	return ret;
}

1904 1905
static irqreturn_t cherryview_irq_handler(int irq, void *arg)
{
1906
	struct drm_device *dev = arg;
1907 1908 1909 1910
	struct drm_i915_private *dev_priv = dev->dev_private;
	u32 master_ctl, iir;
	irqreturn_t ret = IRQ_NONE;

1911 1912 1913
	for (;;) {
		master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
		iir = I915_READ(VLV_IIR);
1914

1915 1916
		if (master_ctl == 0 && iir == 0)
			break;
1917

1918 1919
		ret = IRQ_HANDLED;

1920
		I915_WRITE(GEN8_MASTER_IRQ, 0);
1921

1922
		/* Find, clear, then process each source of interrupt */
1923

1924 1925 1926 1927 1928 1929
		if (iir) {
			/* Consume port before clearing IIR or we'll miss events */
			if (iir & I915_DISPLAY_PORT_INTERRUPT)
				i9xx_hpd_irq_handler(dev);
			I915_WRITE(VLV_IIR, iir);
		}
1930

1931
		gen8_gt_irq_handler(dev, dev_priv, master_ctl);
1932

1933 1934 1935
		/* Call regardless, as some status bits might not be
		 * signalled in iir */
		valleyview_pipestat_irq_handler(dev, iir);
1936

1937 1938 1939
		I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
		POSTING_READ(GEN8_MASTER_IRQ);
	}
1940

1941 1942 1943
	return ret;
}

1944
static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1945
{
1946
	struct drm_i915_private *dev_priv = dev->dev_private;
1947
	int pipe;
1948
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1949 1950 1951 1952
	u32 dig_hotplug_reg;

	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1953

1954
	intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_ibx);
1955

1956 1957 1958
	if (pch_iir & SDE_AUDIO_POWER_MASK) {
		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
			       SDE_AUDIO_POWER_SHIFT);
1959
		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1960 1961
				 port_name(port));
	}
1962

1963 1964 1965
	if (pch_iir & SDE_AUX_MASK)
		dp_aux_irq_handler(dev);

1966
	if (pch_iir & SDE_GMBUS)
1967
		gmbus_irq_handler(dev);
1968 1969 1970 1971 1972 1973 1974 1975 1976 1977

	if (pch_iir & SDE_AUDIO_HDCP_MASK)
		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");

	if (pch_iir & SDE_AUDIO_TRANS_MASK)
		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");

	if (pch_iir & SDE_POISON)
		DRM_ERROR("PCH poison interrupt\n");

1978
	if (pch_iir & SDE_FDI_MASK)
1979
		for_each_pipe(dev_priv, pipe)
1980 1981 1982
			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
					 pipe_name(pipe),
					 I915_READ(FDI_RX_IIR(pipe)));
1983 1984 1985 1986 1987 1988 1989 1990

	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");

	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");

	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1991
		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
1992 1993

	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1994
		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
1995 1996 1997 1998 1999 2000
}

static void ivb_err_int_handler(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	u32 err_int = I915_READ(GEN7_ERR_INT);
D
Daniel Vetter 已提交
2001
	enum pipe pipe;
2002

2003 2004 2005
	if (err_int & ERR_INT_POISON)
		DRM_ERROR("Poison interrupt\n");

2006
	for_each_pipe(dev_priv, pipe) {
2007 2008
		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2009

D
Daniel Vetter 已提交
2010 2011
		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
			if (IS_IVYBRIDGE(dev))
2012
				ivb_pipe_crc_irq_handler(dev, pipe);
D
Daniel Vetter 已提交
2013
			else
2014
				hsw_pipe_crc_irq_handler(dev, pipe);
D
Daniel Vetter 已提交
2015 2016
		}
	}
2017

2018 2019 2020 2021 2022 2023 2024 2025
	I915_WRITE(GEN7_ERR_INT, err_int);
}

static void cpt_serr_int_handler(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	u32 serr_int = I915_READ(SERR_INT);

2026 2027 2028
	if (serr_int & SERR_INT_POISON)
		DRM_ERROR("PCH poison interrupt\n");

2029
	if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
2030
		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
2031 2032

	if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
2033
		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
2034 2035

	if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
2036
		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
2037 2038

	I915_WRITE(SERR_INT, serr_int);
2039 2040
}

2041 2042
static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
{
2043
	struct drm_i915_private *dev_priv = dev->dev_private;
2044
	int pipe;
2045
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
2046 2047 2048 2049
	u32 dig_hotplug_reg;

	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2050

2051
	intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_cpt);
2052

2053 2054 2055 2056 2057 2058
	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
			       SDE_AUDIO_POWER_SHIFT_CPT);
		DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
				 port_name(port));
	}
2059 2060

	if (pch_iir & SDE_AUX_MASK_CPT)
2061
		dp_aux_irq_handler(dev);
2062 2063

	if (pch_iir & SDE_GMBUS_CPT)
2064
		gmbus_irq_handler(dev);
2065 2066 2067 2068 2069 2070 2071 2072

	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
		DRM_DEBUG_DRIVER("Audio CP request interrupt\n");

	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
		DRM_DEBUG_DRIVER("Audio CP change interrupt\n");

	if (pch_iir & SDE_FDI_MASK_CPT)
2073
		for_each_pipe(dev_priv, pipe)
2074 2075 2076
			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
					 pipe_name(pipe),
					 I915_READ(FDI_RX_IIR(pipe)));
2077 2078 2079

	if (pch_iir & SDE_ERROR_CPT)
		cpt_serr_int_handler(dev);
2080 2081
}

2082 2083 2084
static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
2085
	enum pipe pipe;
2086 2087 2088 2089 2090 2091 2092 2093 2094 2095

	if (de_iir & DE_AUX_CHANNEL_A)
		dp_aux_irq_handler(dev);

	if (de_iir & DE_GSE)
		intel_opregion_asle_intr(dev);

	if (de_iir & DE_POISON)
		DRM_ERROR("Poison interrupt\n");

2096
	for_each_pipe(dev_priv, pipe) {
2097 2098 2099
		if (de_iir & DE_PIPE_VBLANK(pipe) &&
		    intel_pipe_handle_vblank(dev, pipe))
			intel_check_page_flip(dev, pipe);
2100

2101
		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2102
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2103

2104 2105
		if (de_iir & DE_PIPE_CRC_DONE(pipe))
			i9xx_pipe_crc_irq_handler(dev, pipe);
2106

2107 2108 2109 2110 2111
		/* plane/pipes map 1:1 on ilk+ */
		if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
			intel_prepare_page_flip(dev, pipe);
			intel_finish_page_flip_plane(dev, pipe);
		}
2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130
	}

	/* check event from PCH */
	if (de_iir & DE_PCH_EVENT) {
		u32 pch_iir = I915_READ(SDEIIR);

		if (HAS_PCH_CPT(dev))
			cpt_irq_handler(dev, pch_iir);
		else
			ibx_irq_handler(dev, pch_iir);

		/* should clear PCH hotplug event before clear CPU irq */
		I915_WRITE(SDEIIR, pch_iir);
	}

	if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
		ironlake_rps_change_irq_handler(dev);
}

2131 2132 2133
static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
2134
	enum pipe pipe;
2135 2136 2137 2138 2139 2140 2141 2142 2143 2144

	if (de_iir & DE_ERR_INT_IVB)
		ivb_err_int_handler(dev);

	if (de_iir & DE_AUX_CHANNEL_A_IVB)
		dp_aux_irq_handler(dev);

	if (de_iir & DE_GSE_IVB)
		intel_opregion_asle_intr(dev);

2145
	for_each_pipe(dev_priv, pipe) {
2146 2147 2148
		if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
		    intel_pipe_handle_vblank(dev, pipe))
			intel_check_page_flip(dev, pipe);
2149 2150

		/* plane/pipes map 1:1 on ilk+ */
2151 2152 2153
		if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
			intel_prepare_page_flip(dev, pipe);
			intel_finish_page_flip_plane(dev, pipe);
2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167
		}
	}

	/* check event from PCH */
	if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
		u32 pch_iir = I915_READ(SDEIIR);

		cpt_irq_handler(dev, pch_iir);

		/* clear PCH hotplug event before clear CPU irq */
		I915_WRITE(SDEIIR, pch_iir);
	}
}

2168 2169 2170 2171 2172 2173 2174 2175
/*
 * To handle irqs with the minimum potential races with fresh interrupts, we:
 * 1 - Disable Master Interrupt Control.
 * 2 - Find the source(s) of the interrupt.
 * 3 - Clear the Interrupt Identity bits (IIR).
 * 4 - Process the interrupt(s) that had bits set in the IIRs.
 * 5 - Re-enable Master Interrupt Control.
 */
2176
static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2177
{
2178
	struct drm_device *dev = arg;
2179
	struct drm_i915_private *dev_priv = dev->dev_private;
2180
	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2181
	irqreturn_t ret = IRQ_NONE;
2182

2183 2184
	/* We get interrupts on unclaimed registers, so check for this before we
	 * do any I915_{READ,WRITE}. */
2185
	intel_uncore_check_errors(dev);
2186

2187 2188 2189
	/* disable master interrupt before clearing iir  */
	de_ier = I915_READ(DEIER);
	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2190
	POSTING_READ(DEIER);
2191

2192 2193 2194 2195 2196
	/* Disable south interrupts. We'll only write to SDEIIR once, so further
	 * interrupts will will be stored on its back queue, and then we'll be
	 * able to process them after we restore SDEIER (as soon as we restore
	 * it, we'll get an interrupt if SDEIIR still has something to process
	 * due to its back queue). */
2197 2198 2199 2200 2201
	if (!HAS_PCH_NOP(dev)) {
		sde_ier = I915_READ(SDEIER);
		I915_WRITE(SDEIER, 0);
		POSTING_READ(SDEIER);
	}
2202

2203 2204
	/* Find, clear, then process each source of interrupt */

2205
	gt_iir = I915_READ(GTIIR);
2206
	if (gt_iir) {
2207 2208
		I915_WRITE(GTIIR, gt_iir);
		ret = IRQ_HANDLED;
2209
		if (INTEL_INFO(dev)->gen >= 6)
2210
			snb_gt_irq_handler(dev, dev_priv, gt_iir);
2211 2212
		else
			ilk_gt_irq_handler(dev, dev_priv, gt_iir);
2213 2214
	}

2215 2216
	de_iir = I915_READ(DEIIR);
	if (de_iir) {
2217 2218
		I915_WRITE(DEIIR, de_iir);
		ret = IRQ_HANDLED;
2219 2220 2221 2222
		if (INTEL_INFO(dev)->gen >= 7)
			ivb_display_irq_handler(dev, de_iir);
		else
			ilk_display_irq_handler(dev, de_iir);
2223 2224
	}

2225 2226 2227 2228 2229
	if (INTEL_INFO(dev)->gen >= 6) {
		u32 pm_iir = I915_READ(GEN6_PMIIR);
		if (pm_iir) {
			I915_WRITE(GEN6_PMIIR, pm_iir);
			ret = IRQ_HANDLED;
2230
			gen6_rps_irq_handler(dev_priv, pm_iir);
2231
		}
2232
	}
2233 2234 2235

	I915_WRITE(DEIER, de_ier);
	POSTING_READ(DEIER);
2236 2237 2238 2239
	if (!HAS_PCH_NOP(dev)) {
		I915_WRITE(SDEIER, sde_ier);
		POSTING_READ(SDEIER);
	}
2240 2241 2242 2243

	return ret;
}

2244 2245 2246 2247 2248 2249 2250
static irqreturn_t gen8_irq_handler(int irq, void *arg)
{
	struct drm_device *dev = arg;
	struct drm_i915_private *dev_priv = dev->dev_private;
	u32 master_ctl;
	irqreturn_t ret = IRQ_NONE;
	uint32_t tmp = 0;
2251
	enum pipe pipe;
J
Jesse Barnes 已提交
2252 2253 2254 2255 2256
	u32 aux_mask = GEN8_AUX_CHANNEL_A;

	if (IS_GEN9(dev))
		aux_mask |=  GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
			GEN9_AUX_CHANNEL_D;
2257 2258 2259 2260 2261 2262 2263 2264 2265

	master_ctl = I915_READ(GEN8_MASTER_IRQ);
	master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
	if (!master_ctl)
		return IRQ_NONE;

	I915_WRITE(GEN8_MASTER_IRQ, 0);
	POSTING_READ(GEN8_MASTER_IRQ);

2266 2267
	/* Find, clear, then process each source of interrupt */

2268 2269 2270 2271 2272 2273 2274
	ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl);

	if (master_ctl & GEN8_DE_MISC_IRQ) {
		tmp = I915_READ(GEN8_DE_MISC_IIR);
		if (tmp) {
			I915_WRITE(GEN8_DE_MISC_IIR, tmp);
			ret = IRQ_HANDLED;
2275 2276 2277 2278
			if (tmp & GEN8_DE_MISC_GSE)
				intel_opregion_asle_intr(dev);
			else
				DRM_ERROR("Unexpected DE Misc interrupt\n");
2279
		}
2280 2281
		else
			DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2282 2283
	}

2284 2285 2286 2287 2288
	if (master_ctl & GEN8_DE_PORT_IRQ) {
		tmp = I915_READ(GEN8_DE_PORT_IIR);
		if (tmp) {
			I915_WRITE(GEN8_DE_PORT_IIR, tmp);
			ret = IRQ_HANDLED;
J
Jesse Barnes 已提交
2289 2290

			if (tmp & aux_mask)
2291 2292 2293
				dp_aux_irq_handler(dev);
			else
				DRM_ERROR("Unexpected DE Port interrupt\n");
2294
		}
2295 2296
		else
			DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2297 2298
	}

2299
	for_each_pipe(dev_priv, pipe) {
2300
		uint32_t pipe_iir, flip_done = 0, fault_errors = 0;
2301

2302 2303
		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
			continue;
2304

2305 2306 2307 2308
		pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
		if (pipe_iir) {
			ret = IRQ_HANDLED;
			I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
2309

2310 2311 2312
			if (pipe_iir & GEN8_PIPE_VBLANK &&
			    intel_pipe_handle_vblank(dev, pipe))
				intel_check_page_flip(dev, pipe);
2313

2314 2315 2316 2317 2318 2319
			if (IS_GEN9(dev))
				flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
			else
				flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;

			if (flip_done) {
2320 2321 2322 2323 2324 2325 2326
				intel_prepare_page_flip(dev, pipe);
				intel_finish_page_flip_plane(dev, pipe);
			}

			if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
				hsw_pipe_crc_irq_handler(dev, pipe);

2327 2328 2329
			if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN)
				intel_cpu_fifo_underrun_irq_handler(dev_priv,
								    pipe);
2330

2331 2332 2333 2334 2335 2336 2337

			if (IS_GEN9(dev))
				fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
			else
				fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;

			if (fault_errors)
2338 2339 2340
				DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
					  pipe_name(pipe),
					  pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
2341
		} else
2342 2343 2344
			DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
	}

2345 2346 2347 2348 2349 2350 2351 2352 2353 2354
	if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) {
		/*
		 * FIXME(BDW): Assume for now that the new interrupt handling
		 * scheme also closed the SDE interrupt handling race we've seen
		 * on older pch-split platforms. But this needs testing.
		 */
		u32 pch_iir = I915_READ(SDEIIR);
		if (pch_iir) {
			I915_WRITE(SDEIIR, pch_iir);
			ret = IRQ_HANDLED;
2355 2356 2357 2358
			cpt_irq_handler(dev, pch_iir);
		} else
			DRM_ERROR("The master control interrupt lied (SDE)!\n");

2359 2360
	}

2361 2362 2363 2364 2365 2366
	I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
	POSTING_READ(GEN8_MASTER_IRQ);

	return ret;
}

2367 2368 2369
static void i915_error_wake_up(struct drm_i915_private *dev_priv,
			       bool reset_completed)
{
2370
	struct intel_engine_cs *ring;
2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394
	int i;

	/*
	 * Notify all waiters for GPU completion events that reset state has
	 * been changed, and that they need to restart their wait after
	 * checking for potential errors (and bail out to drop locks if there is
	 * a gpu reset pending so that i915_error_work_func can acquire them).
	 */

	/* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
	for_each_ring(ring, dev_priv, i)
		wake_up_all(&ring->irq_queue);

	/* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
	wake_up_all(&dev_priv->pending_flip_queue);

	/*
	 * Signal tasks blocked in i915_gem_wait_for_error that the pending
	 * reset state is cleared.
	 */
	if (reset_completed)
		wake_up_all(&dev_priv->gpu_error.reset_queue);
}

2395 2396 2397 2398 2399 2400 2401 2402 2403
/**
 * i915_error_work_func - do process context error handling work
 * @work: work struct
 *
 * Fire an error uevent so userspace can see that a hang or error
 * was detected.
 */
static void i915_error_work_func(struct work_struct *work)
{
2404 2405
	struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
						    work);
2406 2407
	struct drm_i915_private *dev_priv =
		container_of(error, struct drm_i915_private, gpu_error);
2408
	struct drm_device *dev = dev_priv->dev;
2409 2410 2411
	char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
	char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
	char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2412
	int ret;
2413

2414
	kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
2415

2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426
	/*
	 * Note that there's only one work item which does gpu resets, so we
	 * need not worry about concurrent gpu resets potentially incrementing
	 * error->reset_counter twice. We only need to take care of another
	 * racing irq/hangcheck declaring the gpu dead for a second time. A
	 * quick check for that is good enough: schedule_work ensures the
	 * correct ordering between hang detection and this work item, and since
	 * the reset in-progress bit is only ever set by code outside of this
	 * work we don't need to worry about any other races.
	 */
	if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
2427
		DRM_DEBUG_DRIVER("resetting chip\n");
2428
		kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
2429
				   reset_event);
2430

2431 2432 2433 2434 2435 2436 2437 2438
		/*
		 * In most cases it's guaranteed that we get here with an RPM
		 * reference held, for example because there is a pending GPU
		 * request that won't finish until the reset is done. This
		 * isn't the case at least when we get here by doing a
		 * simulated reset via debugs, so get an RPM reference.
		 */
		intel_runtime_pm_get(dev_priv);
2439 2440 2441

		intel_prepare_reset(dev);

2442 2443 2444 2445 2446 2447
		/*
		 * All state reset _must_ be completed before we update the
		 * reset counter, for otherwise waiters might miss the reset
		 * pending state and not properly drop locks, resulting in
		 * deadlocks with the reset work.
		 */
2448 2449
		ret = i915_reset(dev);

2450
		intel_finish_reset(dev);
2451

2452 2453
		intel_runtime_pm_put(dev_priv);

2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464
		if (ret == 0) {
			/*
			 * After all the gem state is reset, increment the reset
			 * counter and wake up everyone waiting for the reset to
			 * complete.
			 *
			 * Since unlock operations are a one-sided barrier only,
			 * we need to insert a barrier here to order any seqno
			 * updates before
			 * the counter increment.
			 */
2465
			smp_mb__before_atomic();
2466 2467
			atomic_inc(&dev_priv->gpu_error.reset_counter);

2468
			kobject_uevent_env(&dev->primary->kdev->kobj,
2469
					   KOBJ_CHANGE, reset_done_event);
2470
		} else {
M
Mika Kuoppala 已提交
2471
			atomic_set_mask(I915_WEDGED, &error->reset_counter);
2472
		}
2473

2474 2475 2476 2477 2478
		/*
		 * Note: The wake_up also serves as a memory barrier so that
		 * waiters see the update value of the reset counter atomic_t.
		 */
		i915_error_wake_up(dev_priv, true);
2479
	}
2480 2481
}

2482
static void i915_report_and_clear_eir(struct drm_device *dev)
2483 2484
{
	struct drm_i915_private *dev_priv = dev->dev_private;
2485
	uint32_t instdone[I915_NUM_INSTDONE_REG];
2486
	u32 eir = I915_READ(EIR);
2487
	int pipe, i;
2488

2489 2490
	if (!eir)
		return;
2491

2492
	pr_err("render error detected, EIR: 0x%08x\n", eir);
2493

2494 2495
	i915_get_extra_instdone(dev, instdone);

2496 2497 2498 2499
	if (IS_G4X(dev)) {
		if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
			u32 ipeir = I915_READ(IPEIR_I965);

2500 2501
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2502 2503
			for (i = 0; i < ARRAY_SIZE(instdone); i++)
				pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2504 2505
			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2506
			I915_WRITE(IPEIR_I965, ipeir);
2507
			POSTING_READ(IPEIR_I965);
2508 2509 2510
		}
		if (eir & GM45_ERROR_PAGE_TABLE) {
			u32 pgtbl_err = I915_READ(PGTBL_ER);
2511 2512
			pr_err("page table error\n");
			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
2513
			I915_WRITE(PGTBL_ER, pgtbl_err);
2514
			POSTING_READ(PGTBL_ER);
2515 2516 2517
		}
	}

2518
	if (!IS_GEN2(dev)) {
2519 2520
		if (eir & I915_ERROR_PAGE_TABLE) {
			u32 pgtbl_err = I915_READ(PGTBL_ER);
2521 2522
			pr_err("page table error\n");
			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
2523
			I915_WRITE(PGTBL_ER, pgtbl_err);
2524
			POSTING_READ(PGTBL_ER);
2525 2526 2527 2528
		}
	}

	if (eir & I915_ERROR_MEMORY_REFRESH) {
2529
		pr_err("memory refresh error:\n");
2530
		for_each_pipe(dev_priv, pipe)
2531
			pr_err("pipe %c stat: 0x%08x\n",
2532
			       pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2533 2534 2535
		/* pipestat has already been acked */
	}
	if (eir & I915_ERROR_INSTRUCTION) {
2536 2537
		pr_err("instruction error\n");
		pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
2538 2539
		for (i = 0; i < ARRAY_SIZE(instdone); i++)
			pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2540
		if (INTEL_INFO(dev)->gen < 4) {
2541 2542
			u32 ipeir = I915_READ(IPEIR);

2543 2544 2545
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
2546
			I915_WRITE(IPEIR, ipeir);
2547
			POSTING_READ(IPEIR);
2548 2549 2550
		} else {
			u32 ipeir = I915_READ(IPEIR_I965);

2551 2552 2553 2554
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2555
			I915_WRITE(IPEIR_I965, ipeir);
2556
			POSTING_READ(IPEIR_I965);
2557 2558 2559 2560
		}
	}

	I915_WRITE(EIR, eir);
2561
	POSTING_READ(EIR);
2562 2563 2564 2565 2566 2567 2568 2569 2570 2571
	eir = I915_READ(EIR);
	if (eir) {
		/*
		 * some errors might have become stuck,
		 * mask them.
		 */
		DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
		I915_WRITE(EMR, I915_READ(EMR) | eir);
		I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
	}
2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583
}

/**
 * i915_handle_error - handle an error interrupt
 * @dev: drm device
 *
 * Do some basic checking of regsiter state at error interrupt time and
 * dump it to the syslog.  Also call i915_capture_error_state() to make
 * sure we get a record and make it available in debugfs.  Fire a uevent
 * so userspace knows something bad happened (should trigger collection
 * of a ring dump etc.).
 */
2584 2585
void i915_handle_error(struct drm_device *dev, bool wedged,
		       const char *fmt, ...)
2586 2587
{
	struct drm_i915_private *dev_priv = dev->dev_private;
2588 2589
	va_list args;
	char error_msg[80];
2590

2591 2592 2593 2594 2595
	va_start(args, fmt);
	vscnprintf(error_msg, sizeof(error_msg), fmt, args);
	va_end(args);

	i915_capture_error_state(dev, wedged, error_msg);
2596
	i915_report_and_clear_eir(dev);
2597

2598
	if (wedged) {
2599 2600
		atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
				&dev_priv->gpu_error.reset_counter);
2601

2602
		/*
2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613
		 * Wakeup waiting processes so that the reset work function
		 * i915_error_work_func doesn't deadlock trying to grab various
		 * locks. By bumping the reset counter first, the woken
		 * processes will see a reset in progress and back off,
		 * releasing their locks and then wait for the reset completion.
		 * We must do this for _all_ gpu waiters that might hold locks
		 * that the reset work needs to acquire.
		 *
		 * Note: The wake_up serves as the required memory barrier to
		 * ensure that the waiters see the updated value of the reset
		 * counter atomic_t.
2614
		 */
2615
		i915_error_wake_up(dev_priv, false);
2616 2617
	}

2618 2619 2620 2621 2622 2623 2624
	/*
	 * Our reset work can grab modeset locks (since it needs to reset the
	 * state of outstanding pagelips). Hence it must not be run on our own
	 * dev-priv->wq work queue for otherwise the flush_work in the pageflip
	 * code will deadlock.
	 */
	schedule_work(&dev_priv->gpu_error.work);
2625 2626
}

2627 2628 2629
/* Called from drm generic code, passed 'crtc' which
 * we use as a pipe index
 */
2630
static int i915_enable_vblank(struct drm_device *dev, int pipe)
2631
{
2632
	struct drm_i915_private *dev_priv = dev->dev_private;
2633
	unsigned long irqflags;
2634

2635
	if (!i915_pipe_enabled(dev, pipe))
2636
		return -EINVAL;
2637

2638
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2639
	if (INTEL_INFO(dev)->gen >= 4)
2640
		i915_enable_pipestat(dev_priv, pipe,
2641
				     PIPE_START_VBLANK_INTERRUPT_STATUS);
2642
	else
2643
		i915_enable_pipestat(dev_priv, pipe,
2644
				     PIPE_VBLANK_INTERRUPT_STATUS);
2645
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2646

2647 2648 2649
	return 0;
}

2650
static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
2651
{
2652
	struct drm_i915_private *dev_priv = dev->dev_private;
2653
	unsigned long irqflags;
2654
	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2655
						     DE_PIPE_VBLANK(pipe);
2656 2657 2658 2659 2660

	if (!i915_pipe_enabled(dev, pipe))
		return -EINVAL;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2661
	ironlake_enable_display_irq(dev_priv, bit);
2662 2663 2664 2665 2666
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);

	return 0;
}

J
Jesse Barnes 已提交
2667 2668
static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
{
2669
	struct drm_i915_private *dev_priv = dev->dev_private;
J
Jesse Barnes 已提交
2670 2671 2672 2673 2674 2675
	unsigned long irqflags;

	if (!i915_pipe_enabled(dev, pipe))
		return -EINVAL;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2676
	i915_enable_pipestat(dev_priv, pipe,
2677
			     PIPE_START_VBLANK_INTERRUPT_STATUS);
J
Jesse Barnes 已提交
2678 2679 2680 2681 2682
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);

	return 0;
}

2683 2684 2685 2686 2687 2688 2689 2690 2691
static int gen8_enable_vblank(struct drm_device *dev, int pipe)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	unsigned long irqflags;

	if (!i915_pipe_enabled(dev, pipe))
		return -EINVAL;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2692 2693 2694
	dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
	I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
	POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2695 2696 2697 2698
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
	return 0;
}

2699 2700 2701
/* Called from drm generic code, passed 'crtc' which
 * we use as a pipe index
 */
2702
static void i915_disable_vblank(struct drm_device *dev, int pipe)
2703
{
2704
	struct drm_i915_private *dev_priv = dev->dev_private;
2705
	unsigned long irqflags;
2706

2707
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2708
	i915_disable_pipestat(dev_priv, pipe,
2709 2710
			      PIPE_VBLANK_INTERRUPT_STATUS |
			      PIPE_START_VBLANK_INTERRUPT_STATUS);
2711 2712 2713
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

2714
static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
2715
{
2716
	struct drm_i915_private *dev_priv = dev->dev_private;
2717
	unsigned long irqflags;
2718
	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2719
						     DE_PIPE_VBLANK(pipe);
2720 2721

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2722
	ironlake_disable_display_irq(dev_priv, bit);
2723 2724 2725
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

J
Jesse Barnes 已提交
2726 2727
static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
{
2728
	struct drm_i915_private *dev_priv = dev->dev_private;
J
Jesse Barnes 已提交
2729 2730 2731
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2732
	i915_disable_pipestat(dev_priv, pipe,
2733
			      PIPE_START_VBLANK_INTERRUPT_STATUS);
J
Jesse Barnes 已提交
2734 2735 2736
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

2737 2738 2739 2740 2741 2742 2743 2744 2745
static void gen8_disable_vblank(struct drm_device *dev, int pipe)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	unsigned long irqflags;

	if (!i915_pipe_enabled(dev, pipe))
		return;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2746 2747 2748
	dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
	I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
	POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2749 2750 2751
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

2752 2753
static struct drm_i915_gem_request *
ring_last_request(struct intel_engine_cs *ring)
2754
{
2755
	return list_entry(ring->request_list.prev,
2756
			  struct drm_i915_gem_request, list);
2757 2758
}

2759
static bool
2760
ring_idle(struct intel_engine_cs *ring)
2761 2762
{
	return (list_empty(&ring->request_list) ||
2763
		i915_gem_request_completed(ring_last_request(ring), false));
B
Ben Gamari 已提交
2764 2765
}

2766 2767 2768 2769
static bool
ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
{
	if (INTEL_INFO(dev)->gen >= 8) {
2770
		return (ipehr >> 23) == 0x1c;
2771 2772 2773 2774 2775 2776 2777
	} else {
		ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
		return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
				 MI_SEMAPHORE_REGISTER);
	}
}

2778
static struct intel_engine_cs *
2779
semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
2780 2781
{
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
2782
	struct intel_engine_cs *signaller;
2783 2784 2785
	int i;

	if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
2786 2787 2788 2789 2790 2791 2792
		for_each_ring(signaller, dev_priv, i) {
			if (ring == signaller)
				continue;

			if (offset == signaller->semaphore.signal_ggtt[ring->id])
				return signaller;
		}
2793 2794 2795 2796 2797 2798 2799
	} else {
		u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;

		for_each_ring(signaller, dev_priv, i) {
			if(ring == signaller)
				continue;

2800
			if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
2801 2802 2803 2804
				return signaller;
		}
	}

2805 2806
	DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
		  ring->id, ipehr, offset);
2807 2808 2809 2810

	return NULL;
}

2811 2812
static struct intel_engine_cs *
semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
2813 2814
{
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
2815
	u32 cmd, ipehr, head;
2816 2817
	u64 offset = 0;
	int i, backwards;
2818 2819

	ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
2820
	if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
2821
		return NULL;
2822

2823 2824 2825
	/*
	 * HEAD is likely pointing to the dword after the actual command,
	 * so scan backwards until we find the MBOX. But limit it to just 3
2826 2827
	 * or 4 dwords depending on the semaphore wait command size.
	 * Note that we don't care about ACTHD here since that might
2828 2829
	 * point at at batch, and semaphores are always emitted into the
	 * ringbuffer itself.
2830
	 */
2831
	head = I915_READ_HEAD(ring) & HEAD_ADDR;
2832
	backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
2833

2834
	for (i = backwards; i; --i) {
2835 2836 2837 2838 2839
		/*
		 * Be paranoid and presume the hw has gone off into the wild -
		 * our ring is smaller than what the hardware (and hence
		 * HEAD_ADDR) allows. Also handles wrap-around.
		 */
2840
		head &= ring->buffer->size - 1;
2841 2842

		/* This here seems to blow up */
2843
		cmd = ioread32(ring->buffer->virtual_start + head);
2844 2845 2846
		if (cmd == ipehr)
			break;

2847 2848
		head -= 4;
	}
2849

2850 2851
	if (!i)
		return NULL;
2852

2853
	*seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
2854 2855 2856 2857 2858 2859
	if (INTEL_INFO(ring->dev)->gen >= 8) {
		offset = ioread32(ring->buffer->virtual_start + head + 12);
		offset <<= 32;
		offset = ioread32(ring->buffer->virtual_start + head + 8);
	}
	return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
2860 2861
}

2862
static int semaphore_passed(struct intel_engine_cs *ring)
2863 2864
{
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
2865
	struct intel_engine_cs *signaller;
2866
	u32 seqno;
2867

2868
	ring->hangcheck.deadlock++;
2869 2870

	signaller = semaphore_waits_for(ring, &seqno);
2871 2872 2873 2874 2875
	if (signaller == NULL)
		return -1;

	/* Prevent pathological recursion due to driver bugs */
	if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
2876 2877
		return -1;

2878 2879 2880
	if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
		return 1;

2881 2882 2883
	/* cursory check for an unkickable deadlock */
	if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
	    semaphore_passed(signaller) < 0)
2884 2885 2886
		return -1;

	return 0;
2887 2888 2889 2890
}

static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
{
2891
	struct intel_engine_cs *ring;
2892 2893 2894
	int i;

	for_each_ring(ring, dev_priv, i)
2895
		ring->hangcheck.deadlock = 0;
2896 2897
}

2898
static enum intel_ring_hangcheck_action
2899
ring_stuck(struct intel_engine_cs *ring, u64 acthd)
2900 2901 2902
{
	struct drm_device *dev = ring->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
2903 2904
	u32 tmp;

2905 2906 2907 2908 2909 2910 2911 2912
	if (acthd != ring->hangcheck.acthd) {
		if (acthd > ring->hangcheck.max_acthd) {
			ring->hangcheck.max_acthd = acthd;
			return HANGCHECK_ACTIVE;
		}

		return HANGCHECK_ACTIVE_LOOP;
	}
2913

2914
	if (IS_GEN2(dev))
2915
		return HANGCHECK_HUNG;
2916 2917 2918 2919 2920 2921 2922

	/* Is the chip hanging on a WAIT_FOR_EVENT?
	 * If so we can simply poke the RB_WAIT bit
	 * and break the hang. This should work on
	 * all but the second generation chipsets.
	 */
	tmp = I915_READ_CTL(ring);
2923
	if (tmp & RING_WAIT) {
2924 2925 2926
		i915_handle_error(dev, false,
				  "Kicking stuck wait on %s",
				  ring->name);
2927
		I915_WRITE_CTL(ring, tmp);
2928
		return HANGCHECK_KICK;
2929 2930 2931 2932 2933
	}

	if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
		switch (semaphore_passed(ring)) {
		default:
2934
			return HANGCHECK_HUNG;
2935
		case 1:
2936 2937 2938
			i915_handle_error(dev, false,
					  "Kicking stuck semaphore on %s",
					  ring->name);
2939
			I915_WRITE_CTL(ring, tmp);
2940
			return HANGCHECK_KICK;
2941
		case 0:
2942
			return HANGCHECK_WAIT;
2943
		}
2944
	}
2945

2946
	return HANGCHECK_HUNG;
2947 2948
}

B
Ben Gamari 已提交
2949 2950
/**
 * This is called when the chip hasn't reported back with completed
2951 2952 2953 2954 2955
 * batchbuffers in a long time. We keep track per ring seqno progress and
 * if there are no progress, hangcheck score for that ring is increased.
 * Further, acthd is inspected to see if the ring is stuck. On stuck case
 * we kick the ring. If we see no progress on three subsequent calls
 * we assume chip is wedged and try to fix it by resetting the chip.
B
Ben Gamari 已提交
2956
 */
2957
static void i915_hangcheck_elapsed(unsigned long data)
B
Ben Gamari 已提交
2958 2959
{
	struct drm_device *dev = (struct drm_device *)data;
2960
	struct drm_i915_private *dev_priv = dev->dev_private;
2961
	struct intel_engine_cs *ring;
2962
	int i;
2963
	int busy_count = 0, rings_hung = 0;
2964 2965 2966 2967
	bool stuck[I915_NUM_RINGS] = { 0 };
#define BUSY 1
#define KICK 5
#define HUNG 20
2968

2969
	if (!i915.enable_hangcheck)
2970 2971
		return;

2972
	for_each_ring(ring, dev_priv, i) {
2973 2974
		u64 acthd;
		u32 seqno;
2975
		bool busy = true;
2976

2977 2978
		semaphore_clear_deadlocks(dev_priv);

2979 2980
		seqno = ring->get_seqno(ring, false);
		acthd = intel_ring_get_active_head(ring);
2981

2982
		if (ring->hangcheck.seqno == seqno) {
2983
			if (ring_idle(ring)) {
2984 2985
				ring->hangcheck.action = HANGCHECK_IDLE;

2986 2987
				if (waitqueue_active(&ring->irq_queue)) {
					/* Issue a wake-up to catch stuck h/w. */
2988
					if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
2989 2990 2991 2992 2993 2994
						if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
							DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
								  ring->name);
						else
							DRM_INFO("Fake missed irq on %s\n",
								 ring->name);
2995 2996 2997 2998
						wake_up_all(&ring->irq_queue);
					}
					/* Safeguard against driver failure */
					ring->hangcheck.score += BUSY;
2999 3000
				} else
					busy = false;
3001
			} else {
3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016
				/* We always increment the hangcheck score
				 * if the ring is busy and still processing
				 * the same request, so that no single request
				 * can run indefinitely (such as a chain of
				 * batches). The only time we do not increment
				 * the hangcheck score on this ring, if this
				 * ring is in a legitimate wait for another
				 * ring. In that case the waiting ring is a
				 * victim and we want to be sure we catch the
				 * right culprit. Then every time we do kick
				 * the ring, add a small increment to the
				 * score so that we can catch a batch that is
				 * being repeatedly kicked and so responsible
				 * for stalling the machine.
				 */
3017 3018 3019 3020
				ring->hangcheck.action = ring_stuck(ring,
								    acthd);

				switch (ring->hangcheck.action) {
3021
				case HANGCHECK_IDLE:
3022 3023
				case HANGCHECK_WAIT:
				case HANGCHECK_ACTIVE:
3024 3025
					break;
				case HANGCHECK_ACTIVE_LOOP:
3026
					ring->hangcheck.score += BUSY;
3027
					break;
3028
				case HANGCHECK_KICK:
3029
					ring->hangcheck.score += KICK;
3030
					break;
3031
				case HANGCHECK_HUNG:
3032
					ring->hangcheck.score += HUNG;
3033 3034 3035
					stuck[i] = true;
					break;
				}
3036
			}
3037
		} else {
3038 3039
			ring->hangcheck.action = HANGCHECK_ACTIVE;

3040 3041 3042 3043 3044
			/* Gradually reduce the count so that we catch DoS
			 * attempts across multiple batches.
			 */
			if (ring->hangcheck.score > 0)
				ring->hangcheck.score--;
3045 3046

			ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
3047 3048
		}

3049 3050
		ring->hangcheck.seqno = seqno;
		ring->hangcheck.acthd = acthd;
3051
		busy_count += busy;
3052
	}
3053

3054
	for_each_ring(ring, dev_priv, i) {
3055
		if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
3056 3057 3058
			DRM_INFO("%s on %s\n",
				 stuck[i] ? "stuck" : "no progress",
				 ring->name);
3059
			rings_hung++;
3060 3061 3062
		}
	}

3063
	if (rings_hung)
3064
		return i915_handle_error(dev, true, "Ring hung");
B
Ben Gamari 已提交
3065

3066 3067 3068
	if (busy_count)
		/* Reset timer case chip hangs without another request
		 * being added */
3069 3070 3071 3072 3073 3074
		i915_queue_hangcheck(dev);
}

void i915_queue_hangcheck(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
3075 3076
	struct timer_list *timer = &dev_priv->gpu_error.hangcheck_timer;

3077
	if (!i915.enable_hangcheck)
3078 3079
		return;

3080
	/* Don't continually defer the hangcheck, but make sure it is active */
3081 3082 3083 3084
	if (timer_pending(timer))
		return;
	mod_timer(timer,
		  round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
B
Ben Gamari 已提交
3085 3086
}

3087
static void ibx_irq_reset(struct drm_device *dev)
P
Paulo Zanoni 已提交
3088 3089 3090 3091 3092 3093
{
	struct drm_i915_private *dev_priv = dev->dev_private;

	if (HAS_PCH_NOP(dev))
		return;

3094
	GEN5_IRQ_RESET(SDE);
3095 3096 3097

	if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
		I915_WRITE(SERR_INT, 0xffffffff);
P
Paulo Zanoni 已提交
3098
}
3099

P
Paulo Zanoni 已提交
3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115
/*
 * SDEIER is also touched by the interrupt handler to work around missed PCH
 * interrupts. Hence we can't update it after the interrupt handler is enabled -
 * instead we unconditionally enable all PCH interrupt sources here, but then
 * only unmask them as needed with SDEIMR.
 *
 * This function needs to be called before interrupts are enabled.
 */
static void ibx_irq_pre_postinstall(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;

	if (HAS_PCH_NOP(dev))
		return;

	WARN_ON(I915_READ(SDEIER) != 0);
P
Paulo Zanoni 已提交
3116 3117 3118 3119
	I915_WRITE(SDEIER, 0xffffffff);
	POSTING_READ(SDEIER);
}

3120
static void gen5_gt_irq_reset(struct drm_device *dev)
3121 3122 3123
{
	struct drm_i915_private *dev_priv = dev->dev_private;

3124
	GEN5_IRQ_RESET(GT);
P
Paulo Zanoni 已提交
3125
	if (INTEL_INFO(dev)->gen >= 6)
3126
		GEN5_IRQ_RESET(GEN6_PM);
3127 3128
}

L
Linus Torvalds 已提交
3129 3130
/* drm_dma.h hooks
*/
P
Paulo Zanoni 已提交
3131
static void ironlake_irq_reset(struct drm_device *dev)
3132
{
3133
	struct drm_i915_private *dev_priv = dev->dev_private;
3134

3135
	I915_WRITE(HWSTAM, 0xffffffff);
3136

3137
	GEN5_IRQ_RESET(DE);
3138 3139
	if (IS_GEN7(dev))
		I915_WRITE(GEN7_ERR_INT, 0xffffffff);
3140

3141
	gen5_gt_irq_reset(dev);
3142

3143
	ibx_irq_reset(dev);
3144
}
3145

3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158
static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
{
	enum pipe pipe;

	I915_WRITE(PORT_HOTPLUG_EN, 0);
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));

	for_each_pipe(dev_priv, pipe)
		I915_WRITE(PIPESTAT(pipe), 0xffff);

	GEN5_IRQ_RESET(VLV_);
}

J
Jesse Barnes 已提交
3159 3160
static void valleyview_irq_preinstall(struct drm_device *dev)
{
3161
	struct drm_i915_private *dev_priv = dev->dev_private;
J
Jesse Barnes 已提交
3162 3163 3164 3165 3166 3167 3168

	/* VLV magic */
	I915_WRITE(VLV_IMR, 0);
	I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
	I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
	I915_WRITE(RING_IMR(BLT_RING_BASE), 0);

3169
	gen5_gt_irq_reset(dev);
J
Jesse Barnes 已提交
3170

3171
	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
J
Jesse Barnes 已提交
3172

3173
	vlv_display_irq_reset(dev_priv);
J
Jesse Barnes 已提交
3174 3175
}

3176 3177 3178 3179 3180 3181 3182 3183
static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
{
	GEN8_IRQ_RESET_NDX(GT, 0);
	GEN8_IRQ_RESET_NDX(GT, 1);
	GEN8_IRQ_RESET_NDX(GT, 2);
	GEN8_IRQ_RESET_NDX(GT, 3);
}

P
Paulo Zanoni 已提交
3184
static void gen8_irq_reset(struct drm_device *dev)
3185 3186 3187 3188 3189 3190 3191
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int pipe;

	I915_WRITE(GEN8_MASTER_IRQ, 0);
	POSTING_READ(GEN8_MASTER_IRQ);

3192
	gen8_gt_irq_reset(dev_priv);
3193

3194
	for_each_pipe(dev_priv, pipe)
3195 3196
		if (intel_display_power_is_enabled(dev_priv,
						   POWER_DOMAIN_PIPE(pipe)))
3197
			GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3198

3199 3200 3201
	GEN5_IRQ_RESET(GEN8_DE_PORT_);
	GEN5_IRQ_RESET(GEN8_DE_MISC_);
	GEN5_IRQ_RESET(GEN8_PCU_);
3202

3203
	ibx_irq_reset(dev);
3204
}
3205

3206 3207
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv)
{
3208
	uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3209

3210
	spin_lock_irq(&dev_priv->irq_lock);
3211
	GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B],
3212
			  ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
3213
	GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C],
3214
			  ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
3215
	spin_unlock_irq(&dev_priv->irq_lock);
3216 3217
}

3218 3219 3220 3221 3222 3223 3224
static void cherryview_irq_preinstall(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;

	I915_WRITE(GEN8_MASTER_IRQ, 0);
	POSTING_READ(GEN8_MASTER_IRQ);

3225
	gen8_gt_irq_reset(dev_priv);
3226 3227 3228 3229 3230

	GEN5_IRQ_RESET(GEN8_PCU_);

	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);

3231
	vlv_display_irq_reset(dev_priv);
3232 3233
}

3234
static void ibx_hpd_irq_setup(struct drm_device *dev)
3235
{
3236
	struct drm_i915_private *dev_priv = dev->dev_private;
3237
	struct intel_encoder *intel_encoder;
3238
	u32 hotplug_irqs, hotplug, enabled_irqs = 0;
3239 3240

	if (HAS_PCH_IBX(dev)) {
3241
		hotplug_irqs = SDE_HOTPLUG_MASK;
3242
		for_each_intel_encoder(dev, intel_encoder)
3243
			if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3244
				enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
3245
	} else {
3246
		hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3247
		for_each_intel_encoder(dev, intel_encoder)
3248
			if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3249
				enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
3250
	}
3251

3252
	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3253 3254 3255 3256 3257 3258 3259

	/*
	 * Enable digital hotplug on the PCH, and configure the DP short pulse
	 * duration to 2ms (which is the minimum in the Display Port spec)
	 *
	 * This register is the same on all known PCH chips.
	 */
3260 3261 3262 3263 3264 3265 3266 3267
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
	hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
}

P
Paulo Zanoni 已提交
3268 3269
static void ibx_irq_postinstall(struct drm_device *dev)
{
3270
	struct drm_i915_private *dev_priv = dev->dev_private;
3271
	u32 mask;
3272

D
Daniel Vetter 已提交
3273 3274 3275
	if (HAS_PCH_NOP(dev))
		return;

3276
	if (HAS_PCH_IBX(dev))
3277
		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3278
	else
3279
		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3280

3281
	GEN5_ASSERT_IIR_IS_ZERO(SDEIIR);
P
Paulo Zanoni 已提交
3282 3283 3284
	I915_WRITE(SDEIMR, ~mask);
}

3285 3286 3287 3288 3289 3290 3291 3292
static void gen5_gt_irq_postinstall(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	u32 pm_irqs, gt_irqs;

	pm_irqs = gt_irqs = 0;

	dev_priv->gt_irq_mask = ~0;
3293
	if (HAS_L3_DPF(dev)) {
3294
		/* L3 parity interrupt is always unmasked. */
3295 3296
		dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
		gt_irqs |= GT_PARITY_ERROR(dev);
3297 3298 3299 3300 3301 3302 3303 3304 3305 3306
	}

	gt_irqs |= GT_RENDER_USER_INTERRUPT;
	if (IS_GEN5(dev)) {
		gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
			   ILK_BSD_USER_INTERRUPT;
	} else {
		gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
	}

P
Paulo Zanoni 已提交
3307
	GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
3308 3309

	if (INTEL_INFO(dev)->gen >= 6) {
3310
		pm_irqs |= dev_priv->pm_rps_events;
3311 3312 3313 3314

		if (HAS_VEBOX(dev))
			pm_irqs |= PM_VEBOX_USER_INTERRUPT;

3315
		dev_priv->pm_irq_mask = 0xffffffff;
P
Paulo Zanoni 已提交
3316
		GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
3317 3318 3319
	}
}

3320
static int ironlake_irq_postinstall(struct drm_device *dev)
3321
{
3322
	struct drm_i915_private *dev_priv = dev->dev_private;
3323 3324 3325 3326 3327 3328
	u32 display_mask, extra_mask;

	if (INTEL_INFO(dev)->gen >= 7) {
		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
				DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
				DE_PLANEB_FLIP_DONE_IVB |
3329
				DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
3330
		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3331
			      DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB);
3332 3333 3334
	} else {
		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
				DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
3335 3336 3337
				DE_AUX_CHANNEL_A |
				DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
				DE_POISON);
3338 3339
		extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
				DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN;
3340
	}
3341

3342
	dev_priv->irq_mask = ~display_mask;
3343

3344 3345
	I915_WRITE(HWSTAM, 0xeffe);

P
Paulo Zanoni 已提交
3346 3347
	ibx_irq_pre_postinstall(dev);

P
Paulo Zanoni 已提交
3348
	GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3349

3350
	gen5_gt_irq_postinstall(dev);
3351

P
Paulo Zanoni 已提交
3352
	ibx_irq_postinstall(dev);
3353

3354
	if (IS_IRONLAKE_M(dev)) {
3355 3356 3357
		/* Enable PCU event interrupts
		 *
		 * spinlocking not required here for correctness since interrupt
3358 3359
		 * setup is guaranteed to run in single-threaded context. But we
		 * need it to make the assert_spin_locked happy. */
3360
		spin_lock_irq(&dev_priv->irq_lock);
3361
		ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
3362
		spin_unlock_irq(&dev_priv->irq_lock);
3363 3364
	}

3365 3366 3367
	return 0;
}

3368 3369 3370 3371
static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
{
	u32 pipestat_mask;
	u32 iir_mask;
3372
	enum pipe pipe;
3373 3374 3375 3376

	pipestat_mask = PIPESTAT_INT_STATUS_MASK |
			PIPE_FIFO_UNDERRUN_STATUS;

3377 3378
	for_each_pipe(dev_priv, pipe)
		I915_WRITE(PIPESTAT(pipe), pipestat_mask);
3379 3380 3381 3382 3383
	POSTING_READ(PIPESTAT(PIPE_A));

	pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
			PIPE_CRC_DONE_INTERRUPT_STATUS;

3384 3385 3386
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
	for_each_pipe(dev_priv, pipe)
		      i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3387 3388 3389 3390

	iir_mask = I915_DISPLAY_PORT_INTERRUPT |
		   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3391 3392
	if (IS_CHERRYVIEW(dev_priv))
		iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3393 3394 3395 3396 3397
	dev_priv->irq_mask &= ~iir_mask;

	I915_WRITE(VLV_IIR, iir_mask);
	I915_WRITE(VLV_IIR, iir_mask);
	I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3398 3399
	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
	POSTING_READ(VLV_IMR);
3400 3401 3402 3403 3404 3405
}

static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
{
	u32 pipestat_mask;
	u32 iir_mask;
3406
	enum pipe pipe;
3407 3408 3409

	iir_mask = I915_DISPLAY_PORT_INTERRUPT |
		   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3410
		   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3411 3412
	if (IS_CHERRYVIEW(dev_priv))
		iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3413 3414 3415

	dev_priv->irq_mask |= iir_mask;
	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3416
	I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3417 3418 3419 3420 3421 3422 3423
	I915_WRITE(VLV_IIR, iir_mask);
	I915_WRITE(VLV_IIR, iir_mask);
	POSTING_READ(VLV_IIR);

	pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
			PIPE_CRC_DONE_INTERRUPT_STATUS;

3424 3425 3426
	i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
	for_each_pipe(dev_priv, pipe)
		i915_disable_pipestat(dev_priv, pipe, pipestat_mask);
3427 3428 3429

	pipestat_mask = PIPESTAT_INT_STATUS_MASK |
			PIPE_FIFO_UNDERRUN_STATUS;
3430 3431 3432

	for_each_pipe(dev_priv, pipe)
		I915_WRITE(PIPESTAT(pipe), pipestat_mask);
3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444
	POSTING_READ(PIPESTAT(PIPE_A));
}

void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
{
	assert_spin_locked(&dev_priv->irq_lock);

	if (dev_priv->display_irqs_enabled)
		return;

	dev_priv->display_irqs_enabled = true;

3445
	if (intel_irqs_enabled(dev_priv))
3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457
		valleyview_display_irqs_install(dev_priv);
}

void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
{
	assert_spin_locked(&dev_priv->irq_lock);

	if (!dev_priv->display_irqs_enabled)
		return;

	dev_priv->display_irqs_enabled = false;

3458
	if (intel_irqs_enabled(dev_priv))
3459 3460 3461
		valleyview_display_irqs_uninstall(dev_priv);
}

3462
static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
J
Jesse Barnes 已提交
3463
{
3464
	dev_priv->irq_mask = ~0;
J
Jesse Barnes 已提交
3465

3466 3467 3468
	I915_WRITE(PORT_HOTPLUG_EN, 0);
	POSTING_READ(PORT_HOTPLUG_EN);

J
Jesse Barnes 已提交
3469
	I915_WRITE(VLV_IIR, 0xffffffff);
3470 3471 3472 3473
	I915_WRITE(VLV_IIR, 0xffffffff);
	I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
	POSTING_READ(VLV_IMR);
J
Jesse Barnes 已提交
3474

3475 3476
	/* Interrupt setup is already guaranteed to be single-threaded, this is
	 * just to make the assert_spin_locked check happy. */
3477
	spin_lock_irq(&dev_priv->irq_lock);
3478 3479
	if (dev_priv->display_irqs_enabled)
		valleyview_display_irqs_install(dev_priv);
3480
	spin_unlock_irq(&dev_priv->irq_lock);
3481 3482 3483 3484 3485 3486 3487
}

static int valleyview_irq_postinstall(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;

	vlv_display_irq_postinstall(dev_priv);
J
Jesse Barnes 已提交
3488

3489
	gen5_gt_irq_postinstall(dev);
J
Jesse Barnes 已提交
3490 3491 3492 3493 3494 3495 3496 3497

	/* ack & enable invalid PTE error interrupts */
#if 0 /* FIXME: add support to irq handler for checking these bits */
	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
	I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
#endif

	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3498 3499 3500 3501

	return 0;
}

3502 3503 3504 3505 3506
static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
{
	/* These are interrupts we'll toggle with the ring mask register */
	uint32_t gt_interrupts[] = {
		GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3507
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3508
			GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
3509 3510
			GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3511
		GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3512 3513 3514
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
			GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3515
		0,
3516 3517
		GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3518 3519
		};

3520
	dev_priv->pm_irq_mask = 0xffffffff;
3521 3522 3523 3524
	GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
	GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
	GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, dev_priv->pm_rps_events);
	GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
3525 3526 3527 3528
}

static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
{
3529 3530
	uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
	uint32_t de_pipe_enables;
3531
	int pipe;
J
Jesse Barnes 已提交
3532
	u32 aux_en = GEN8_AUX_CHANNEL_A;
3533

J
Jesse Barnes 已提交
3534
	if (IS_GEN9(dev_priv)) {
3535 3536
		de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
				  GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
J
Jesse Barnes 已提交
3537 3538 3539
		aux_en |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
			GEN9_AUX_CHANNEL_D;
	} else
3540 3541 3542 3543 3544 3545
		de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
				  GEN8_DE_PIPE_IRQ_FAULT_ERRORS;

	de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
					   GEN8_PIPE_FIFO_UNDERRUN;

3546 3547 3548
	dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
	dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
	dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3549

3550
	for_each_pipe(dev_priv, pipe)
3551
		if (intel_display_power_is_enabled(dev_priv,
3552 3553 3554 3555
				POWER_DOMAIN_PIPE(pipe)))
			GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
					  dev_priv->de_irq_mask[pipe],
					  de_pipe_enables);
3556

J
Jesse Barnes 已提交
3557
	GEN5_IRQ_INIT(GEN8_DE_PORT_, ~aux_en, aux_en);
3558 3559 3560 3561 3562 3563
}

static int gen8_irq_postinstall(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;

P
Paulo Zanoni 已提交
3564 3565
	ibx_irq_pre_postinstall(dev);

3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576
	gen8_gt_irq_postinstall(dev_priv);
	gen8_de_irq_postinstall(dev_priv);

	ibx_irq_postinstall(dev);

	I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
	POSTING_READ(GEN8_MASTER_IRQ);

	return 0;
}

3577 3578 3579 3580
static int cherryview_irq_postinstall(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;

3581
	vlv_display_irq_postinstall(dev_priv);
3582 3583 3584 3585 3586 3587 3588 3589 3590

	gen8_gt_irq_postinstall(dev_priv);

	I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
	POSTING_READ(GEN8_MASTER_IRQ);

	return 0;
}

3591 3592 3593 3594 3595 3596 3597
static void gen8_irq_uninstall(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;

	if (!dev_priv)
		return;

P
Paulo Zanoni 已提交
3598
	gen8_irq_reset(dev);
3599 3600
}

3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614
static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
{
	/* Interrupt setup is already guaranteed to be single-threaded, this is
	 * just to make the assert_spin_locked check happy. */
	spin_lock_irq(&dev_priv->irq_lock);
	if (dev_priv->display_irqs_enabled)
		valleyview_display_irqs_uninstall(dev_priv);
	spin_unlock_irq(&dev_priv->irq_lock);

	vlv_display_irq_reset(dev_priv);

	dev_priv->irq_mask = 0;
}

J
Jesse Barnes 已提交
3615 3616
static void valleyview_irq_uninstall(struct drm_device *dev)
{
3617
	struct drm_i915_private *dev_priv = dev->dev_private;
J
Jesse Barnes 已提交
3618 3619 3620 3621

	if (!dev_priv)
		return;

3622 3623
	I915_WRITE(VLV_MASTER_IER, 0);

3624 3625
	gen5_gt_irq_reset(dev);

J
Jesse Barnes 已提交
3626
	I915_WRITE(HWSTAM, 0xffffffff);
3627

3628
	vlv_display_irq_uninstall(dev_priv);
J
Jesse Barnes 已提交
3629 3630
}

3631 3632 3633 3634 3635 3636 3637 3638 3639 3640
static void cherryview_irq_uninstall(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;

	if (!dev_priv)
		return;

	I915_WRITE(GEN8_MASTER_IRQ, 0);
	POSTING_READ(GEN8_MASTER_IRQ);

3641
	gen8_gt_irq_reset(dev_priv);
3642

3643
	GEN5_IRQ_RESET(GEN8_PCU_);
3644

3645
	vlv_display_irq_uninstall(dev_priv);
3646 3647
}

3648
static void ironlake_irq_uninstall(struct drm_device *dev)
3649
{
3650
	struct drm_i915_private *dev_priv = dev->dev_private;
3651 3652 3653 3654

	if (!dev_priv)
		return;

P
Paulo Zanoni 已提交
3655
	ironlake_irq_reset(dev);
3656 3657
}

3658
static void i8xx_irq_preinstall(struct drm_device * dev)
L
Linus Torvalds 已提交
3659
{
3660
	struct drm_i915_private *dev_priv = dev->dev_private;
3661
	int pipe;
3662

3663
	for_each_pipe(dev_priv, pipe)
3664
		I915_WRITE(PIPESTAT(pipe), 0);
3665 3666 3667
	I915_WRITE16(IMR, 0xffff);
	I915_WRITE16(IER, 0x0);
	POSTING_READ16(IER);
C
Chris Wilson 已提交
3668 3669 3670 3671
}

static int i8xx_irq_postinstall(struct drm_device *dev)
{
3672
	struct drm_i915_private *dev_priv = dev->dev_private;
C
Chris Wilson 已提交
3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692

	I915_WRITE16(EMR,
		     ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));

	/* Unmask the interrupts that we always want on. */
	dev_priv->irq_mask =
		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
		  I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
	I915_WRITE16(IMR, dev_priv->irq_mask);

	I915_WRITE16(IER,
		     I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		     I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
		     I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
		     I915_USER_INTERRUPT);
	POSTING_READ16(IER);

3693 3694
	/* Interrupt setup is already guaranteed to be single-threaded, this is
	 * just to make the assert_spin_locked check happy. */
3695
	spin_lock_irq(&dev_priv->irq_lock);
3696 3697
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3698
	spin_unlock_irq(&dev_priv->irq_lock);
3699

C
Chris Wilson 已提交
3700 3701 3702
	return 0;
}

3703 3704 3705 3706
/*
 * Returns true when a page flip has completed.
 */
static bool i8xx_handle_vblank(struct drm_device *dev,
3707
			       int plane, int pipe, u32 iir)
3708
{
3709
	struct drm_i915_private *dev_priv = dev->dev_private;
3710
	u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3711

3712
	if (!intel_pipe_handle_vblank(dev, pipe))
3713 3714 3715
		return false;

	if ((iir & flip_pending) == 0)
3716
		goto check_page_flip;
3717

3718
	intel_prepare_page_flip(dev, plane);
3719 3720 3721 3722 3723 3724 3725 3726

	/* We detect FlipDone by looking for the change in PendingFlip from '1'
	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
	 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
	 * the flip is completed (no longer pending). Since this doesn't raise
	 * an interrupt per se, we watch for the change at vblank.
	 */
	if (I915_READ16(ISR) & flip_pending)
3727
		goto check_page_flip;
3728 3729 3730

	intel_finish_page_flip(dev, pipe);
	return true;
3731 3732 3733 3734

check_page_flip:
	intel_check_page_flip(dev, pipe);
	return false;
3735 3736
}

3737
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
C
Chris Wilson 已提交
3738
{
3739
	struct drm_device *dev = arg;
3740
	struct drm_i915_private *dev_priv = dev->dev_private;
C
Chris Wilson 已提交
3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757
	u16 iir, new_iir;
	u32 pipe_stats[2];
	int pipe;
	u16 flip_mask =
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;

	iir = I915_READ16(IIR);
	if (iir == 0)
		return IRQ_NONE;

	while (iir & ~flip_mask) {
		/* Can't rely on pipestat interrupt bit in iir as it might
		 * have been cleared after the pipestat interrupt was received.
		 * It doesn't set the bit in iir again, but it still produces
		 * interrupts (for non-MSI).
		 */
3758
		spin_lock(&dev_priv->irq_lock);
C
Chris Wilson 已提交
3759
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3760
			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
C
Chris Wilson 已提交
3761

3762
		for_each_pipe(dev_priv, pipe) {
C
Chris Wilson 已提交
3763 3764 3765 3766 3767 3768
			int reg = PIPESTAT(pipe);
			pipe_stats[pipe] = I915_READ(reg);

			/*
			 * Clear the PIPE*STAT regs before the IIR
			 */
3769
			if (pipe_stats[pipe] & 0x8000ffff)
C
Chris Wilson 已提交
3770 3771
				I915_WRITE(reg, pipe_stats[pipe]);
		}
3772
		spin_unlock(&dev_priv->irq_lock);
C
Chris Wilson 已提交
3773 3774 3775 3776 3777 3778 3779

		I915_WRITE16(IIR, iir & ~flip_mask);
		new_iir = I915_READ16(IIR); /* Flush posted writes */

		if (iir & I915_USER_INTERRUPT)
			notify_ring(dev, &dev_priv->ring[RCS]);

3780
		for_each_pipe(dev_priv, pipe) {
3781
			int plane = pipe;
3782
			if (HAS_FBC(dev))
3783 3784
				plane = !plane;

3785
			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3786 3787
			    i8xx_handle_vblank(dev, plane, pipe, iir))
				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
C
Chris Wilson 已提交
3788

3789
			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3790
				i9xx_pipe_crc_irq_handler(dev, pipe);
3791

3792 3793 3794
			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
				intel_cpu_fifo_underrun_irq_handler(dev_priv,
								    pipe);
3795
		}
C
Chris Wilson 已提交
3796 3797 3798 3799 3800 3801 3802 3803 3804

		iir = new_iir;
	}

	return IRQ_HANDLED;
}

static void i8xx_irq_uninstall(struct drm_device * dev)
{
3805
	struct drm_i915_private *dev_priv = dev->dev_private;
C
Chris Wilson 已提交
3806 3807
	int pipe;

3808
	for_each_pipe(dev_priv, pipe) {
C
Chris Wilson 已提交
3809 3810 3811 3812 3813 3814 3815 3816 3817
		/* Clear enable bits; then clear status bits */
		I915_WRITE(PIPESTAT(pipe), 0);
		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
	}
	I915_WRITE16(IMR, 0xffff);
	I915_WRITE16(IER, 0x0);
	I915_WRITE16(IIR, I915_READ16(IIR));
}

3818 3819
static void i915_irq_preinstall(struct drm_device * dev)
{
3820
	struct drm_i915_private *dev_priv = dev->dev_private;
3821 3822 3823 3824 3825 3826 3827
	int pipe;

	if (I915_HAS_HOTPLUG(dev)) {
		I915_WRITE(PORT_HOTPLUG_EN, 0);
		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
	}

3828
	I915_WRITE16(HWSTAM, 0xeffe);
3829
	for_each_pipe(dev_priv, pipe)
3830 3831 3832 3833 3834 3835 3836 3837
		I915_WRITE(PIPESTAT(pipe), 0);
	I915_WRITE(IMR, 0xffffffff);
	I915_WRITE(IER, 0x0);
	POSTING_READ(IER);
}

static int i915_irq_postinstall(struct drm_device *dev)
{
3838
	struct drm_i915_private *dev_priv = dev->dev_private;
3839
	u32 enable_mask;
3840

3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858
	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));

	/* Unmask the interrupts that we always want on. */
	dev_priv->irq_mask =
		~(I915_ASLE_INTERRUPT |
		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
		  I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);

	enable_mask =
		I915_ASLE_INTERRUPT |
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
		I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
		I915_USER_INTERRUPT;

3859
	if (I915_HAS_HOTPLUG(dev)) {
3860 3861 3862
		I915_WRITE(PORT_HOTPLUG_EN, 0);
		POSTING_READ(PORT_HOTPLUG_EN);

3863 3864 3865 3866 3867 3868 3869 3870 3871 3872
		/* Enable in IER... */
		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
		/* and unmask in IMR */
		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
	}

	I915_WRITE(IMR, dev_priv->irq_mask);
	I915_WRITE(IER, enable_mask);
	POSTING_READ(IER);

3873
	i915_enable_asle_pipestat(dev);
3874

3875 3876
	/* Interrupt setup is already guaranteed to be single-threaded, this is
	 * just to make the assert_spin_locked check happy. */
3877
	spin_lock_irq(&dev_priv->irq_lock);
3878 3879
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3880
	spin_unlock_irq(&dev_priv->irq_lock);
3881

3882 3883 3884
	return 0;
}

3885 3886 3887 3888 3889 3890
/*
 * Returns true when a page flip has completed.
 */
static bool i915_handle_vblank(struct drm_device *dev,
			       int plane, int pipe, u32 iir)
{
3891
	struct drm_i915_private *dev_priv = dev->dev_private;
3892 3893
	u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);

3894
	if (!intel_pipe_handle_vblank(dev, pipe))
3895 3896 3897
		return false;

	if ((iir & flip_pending) == 0)
3898
		goto check_page_flip;
3899 3900 3901 3902 3903 3904 3905 3906 3907 3908

	intel_prepare_page_flip(dev, plane);

	/* We detect FlipDone by looking for the change in PendingFlip from '1'
	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
	 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
	 * the flip is completed (no longer pending). Since this doesn't raise
	 * an interrupt per se, we watch for the change at vblank.
	 */
	if (I915_READ(ISR) & flip_pending)
3909
		goto check_page_flip;
3910 3911 3912

	intel_finish_page_flip(dev, pipe);
	return true;
3913 3914 3915 3916

check_page_flip:
	intel_check_page_flip(dev, pipe);
	return false;
3917 3918
}

3919
static irqreturn_t i915_irq_handler(int irq, void *arg)
3920
{
3921
	struct drm_device *dev = arg;
3922
	struct drm_i915_private *dev_priv = dev->dev_private;
3923
	u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
3924 3925 3926 3927
	u32 flip_mask =
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
	int pipe, ret = IRQ_NONE;
3928 3929

	iir = I915_READ(IIR);
3930 3931
	do {
		bool irq_received = (iir & ~flip_mask) != 0;
3932
		bool blc_event = false;
3933 3934 3935 3936 3937 3938

		/* Can't rely on pipestat interrupt bit in iir as it might
		 * have been cleared after the pipestat interrupt was received.
		 * It doesn't set the bit in iir again, but it still produces
		 * interrupts (for non-MSI).
		 */
3939
		spin_lock(&dev_priv->irq_lock);
3940
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3941
			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3942

3943
		for_each_pipe(dev_priv, pipe) {
3944 3945 3946
			int reg = PIPESTAT(pipe);
			pipe_stats[pipe] = I915_READ(reg);

3947
			/* Clear the PIPE*STAT regs before the IIR */
3948 3949
			if (pipe_stats[pipe] & 0x8000ffff) {
				I915_WRITE(reg, pipe_stats[pipe]);
3950
				irq_received = true;
3951 3952
			}
		}
3953
		spin_unlock(&dev_priv->irq_lock);
3954 3955 3956 3957 3958

		if (!irq_received)
			break;

		/* Consume port.  Then clear IIR or we'll miss events */
3959 3960 3961
		if (I915_HAS_HOTPLUG(dev) &&
		    iir & I915_DISPLAY_PORT_INTERRUPT)
			i9xx_hpd_irq_handler(dev);
3962

3963
		I915_WRITE(IIR, iir & ~flip_mask);
3964 3965 3966 3967 3968
		new_iir = I915_READ(IIR); /* Flush posted writes */

		if (iir & I915_USER_INTERRUPT)
			notify_ring(dev, &dev_priv->ring[RCS]);

3969
		for_each_pipe(dev_priv, pipe) {
3970
			int plane = pipe;
3971
			if (HAS_FBC(dev))
3972
				plane = !plane;
3973

3974
			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3975 3976
			    i915_handle_vblank(dev, plane, pipe, iir))
				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3977 3978 3979

			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
				blc_event = true;
3980 3981

			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3982
				i9xx_pipe_crc_irq_handler(dev, pipe);
3983

3984 3985 3986
			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
				intel_cpu_fifo_underrun_irq_handler(dev_priv,
								    pipe);
3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006
		}

		if (blc_event || (iir & I915_ASLE_INTERRUPT))
			intel_opregion_asle_intr(dev);

		/* With MSI, interrupts are only generated when iir
		 * transitions from zero to nonzero.  If another bit got
		 * set while we were handling the existing iir bits, then
		 * we would never get another interrupt.
		 *
		 * This is fine on non-MSI as well, as if we hit this path
		 * we avoid exiting the interrupt handler only to generate
		 * another one.
		 *
		 * Note that for MSI this could cause a stray interrupt report
		 * if an interrupt landed in the time between writing IIR and
		 * the posting read.  This should be rare enough to never
		 * trigger the 99% of 100,000 interrupts test for disabling
		 * stray interrupts.
		 */
4007
		ret = IRQ_HANDLED;
4008
		iir = new_iir;
4009
	} while (iir & ~flip_mask);
4010 4011 4012 4013 4014 4015

	return ret;
}

static void i915_irq_uninstall(struct drm_device * dev)
{
4016
	struct drm_i915_private *dev_priv = dev->dev_private;
4017 4018 4019 4020 4021 4022 4023
	int pipe;

	if (I915_HAS_HOTPLUG(dev)) {
		I915_WRITE(PORT_HOTPLUG_EN, 0);
		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
	}

4024
	I915_WRITE16(HWSTAM, 0xffff);
4025
	for_each_pipe(dev_priv, pipe) {
4026
		/* Clear enable bits; then clear status bits */
4027
		I915_WRITE(PIPESTAT(pipe), 0);
4028 4029
		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
	}
4030 4031 4032 4033 4034 4035 4036 4037
	I915_WRITE(IMR, 0xffffffff);
	I915_WRITE(IER, 0x0);

	I915_WRITE(IIR, I915_READ(IIR));
}

static void i965_irq_preinstall(struct drm_device * dev)
{
4038
	struct drm_i915_private *dev_priv = dev->dev_private;
4039 4040
	int pipe;

4041 4042
	I915_WRITE(PORT_HOTPLUG_EN, 0);
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4043 4044

	I915_WRITE(HWSTAM, 0xeffe);
4045
	for_each_pipe(dev_priv, pipe)
4046 4047 4048 4049 4050 4051 4052 4053
		I915_WRITE(PIPESTAT(pipe), 0);
	I915_WRITE(IMR, 0xffffffff);
	I915_WRITE(IER, 0x0);
	POSTING_READ(IER);
}

static int i965_irq_postinstall(struct drm_device *dev)
{
4054
	struct drm_i915_private *dev_priv = dev->dev_private;
4055
	u32 enable_mask;
4056 4057 4058
	u32 error_mask;

	/* Unmask the interrupts that we always want on. */
4059
	dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
4060
			       I915_DISPLAY_PORT_INTERRUPT |
4061 4062 4063 4064 4065 4066 4067
			       I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
			       I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
			       I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
			       I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
			       I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);

	enable_mask = ~dev_priv->irq_mask;
4068 4069
	enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
			 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4070 4071 4072 4073
	enable_mask |= I915_USER_INTERRUPT;

	if (IS_G4X(dev))
		enable_mask |= I915_BSD_USER_INTERRUPT;
4074

4075 4076
	/* Interrupt setup is already guaranteed to be single-threaded, this is
	 * just to make the assert_spin_locked check happy. */
4077
	spin_lock_irq(&dev_priv->irq_lock);
4078 4079 4080
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4081
	spin_unlock_irq(&dev_priv->irq_lock);
4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101

	/*
	 * Enable some error detection, note the instruction error mask
	 * bit is reserved, so we leave it masked.
	 */
	if (IS_G4X(dev)) {
		error_mask = ~(GM45_ERROR_PAGE_TABLE |
			       GM45_ERROR_MEM_PRIV |
			       GM45_ERROR_CP_PRIV |
			       I915_ERROR_MEMORY_REFRESH);
	} else {
		error_mask = ~(I915_ERROR_PAGE_TABLE |
			       I915_ERROR_MEMORY_REFRESH);
	}
	I915_WRITE(EMR, error_mask);

	I915_WRITE(IMR, dev_priv->irq_mask);
	I915_WRITE(IER, enable_mask);
	POSTING_READ(IER);

4102 4103 4104
	I915_WRITE(PORT_HOTPLUG_EN, 0);
	POSTING_READ(PORT_HOTPLUG_EN);

4105
	i915_enable_asle_pipestat(dev);
4106 4107 4108 4109

	return 0;
}

4110
static void i915_hpd_irq_setup(struct drm_device *dev)
4111
{
4112
	struct drm_i915_private *dev_priv = dev->dev_private;
4113
	struct intel_encoder *intel_encoder;
4114 4115
	u32 hotplug_en;

4116 4117
	assert_spin_locked(&dev_priv->irq_lock);

4118 4119 4120 4121
	if (I915_HAS_HOTPLUG(dev)) {
		hotplug_en = I915_READ(PORT_HOTPLUG_EN);
		hotplug_en &= ~HOTPLUG_INT_EN_MASK;
		/* Note HDMI and DP share hotplug bits */
4122
		/* enable bits are the same for all generations */
4123
		for_each_intel_encoder(dev, intel_encoder)
4124 4125
			if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
				hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
4126 4127 4128 4129 4130 4131
		/* Programming the CRT detection parameters tends
		   to generate a spurious hotplug event about three
		   seconds later.  So just do it once.
		*/
		if (IS_G4X(dev))
			hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4132
		hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
4133
		hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4134

4135 4136 4137
		/* Ignore TV since it's buggy */
		I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
	}
4138 4139
}

4140
static irqreturn_t i965_irq_handler(int irq, void *arg)
4141
{
4142
	struct drm_device *dev = arg;
4143
	struct drm_i915_private *dev_priv = dev->dev_private;
4144 4145 4146
	u32 iir, new_iir;
	u32 pipe_stats[I915_MAX_PIPES];
	int ret = IRQ_NONE, pipe;
4147 4148 4149
	u32 flip_mask =
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4150 4151 4152 4153

	iir = I915_READ(IIR);

	for (;;) {
4154
		bool irq_received = (iir & ~flip_mask) != 0;
4155 4156
		bool blc_event = false;

4157 4158 4159 4160 4161
		/* Can't rely on pipestat interrupt bit in iir as it might
		 * have been cleared after the pipestat interrupt was received.
		 * It doesn't set the bit in iir again, but it still produces
		 * interrupts (for non-MSI).
		 */
4162
		spin_lock(&dev_priv->irq_lock);
4163
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4164
			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4165

4166
		for_each_pipe(dev_priv, pipe) {
4167 4168 4169 4170 4171 4172 4173 4174
			int reg = PIPESTAT(pipe);
			pipe_stats[pipe] = I915_READ(reg);

			/*
			 * Clear the PIPE*STAT regs before the IIR
			 */
			if (pipe_stats[pipe] & 0x8000ffff) {
				I915_WRITE(reg, pipe_stats[pipe]);
4175
				irq_received = true;
4176 4177
			}
		}
4178
		spin_unlock(&dev_priv->irq_lock);
4179 4180 4181 4182 4183 4184 4185

		if (!irq_received)
			break;

		ret = IRQ_HANDLED;

		/* Consume port.  Then clear IIR or we'll miss events */
4186 4187
		if (iir & I915_DISPLAY_PORT_INTERRUPT)
			i9xx_hpd_irq_handler(dev);
4188

4189
		I915_WRITE(IIR, iir & ~flip_mask);
4190 4191 4192 4193 4194 4195 4196
		new_iir = I915_READ(IIR); /* Flush posted writes */

		if (iir & I915_USER_INTERRUPT)
			notify_ring(dev, &dev_priv->ring[RCS]);
		if (iir & I915_BSD_USER_INTERRUPT)
			notify_ring(dev, &dev_priv->ring[VCS]);

4197
		for_each_pipe(dev_priv, pipe) {
4198
			if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4199 4200
			    i915_handle_vblank(dev, pipe, pipe, iir))
				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
4201 4202 4203

			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
				blc_event = true;
4204 4205

			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4206
				i9xx_pipe_crc_irq_handler(dev, pipe);
4207

4208 4209
			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
				intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
4210
		}
4211 4212 4213 4214

		if (blc_event || (iir & I915_ASLE_INTERRUPT))
			intel_opregion_asle_intr(dev);

4215 4216 4217
		if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
			gmbus_irq_handler(dev);

4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240
		/* With MSI, interrupts are only generated when iir
		 * transitions from zero to nonzero.  If another bit got
		 * set while we were handling the existing iir bits, then
		 * we would never get another interrupt.
		 *
		 * This is fine on non-MSI as well, as if we hit this path
		 * we avoid exiting the interrupt handler only to generate
		 * another one.
		 *
		 * Note that for MSI this could cause a stray interrupt report
		 * if an interrupt landed in the time between writing IIR and
		 * the posting read.  This should be rare enough to never
		 * trigger the 99% of 100,000 interrupts test for disabling
		 * stray interrupts.
		 */
		iir = new_iir;
	}

	return ret;
}

static void i965_irq_uninstall(struct drm_device * dev)
{
4241
	struct drm_i915_private *dev_priv = dev->dev_private;
4242 4243 4244 4245 4246
	int pipe;

	if (!dev_priv)
		return;

4247 4248
	I915_WRITE(PORT_HOTPLUG_EN, 0);
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4249 4250

	I915_WRITE(HWSTAM, 0xffffffff);
4251
	for_each_pipe(dev_priv, pipe)
4252 4253 4254 4255
		I915_WRITE(PIPESTAT(pipe), 0);
	I915_WRITE(IMR, 0xffffffff);
	I915_WRITE(IER, 0x0);

4256
	for_each_pipe(dev_priv, pipe)
4257 4258 4259 4260 4261
		I915_WRITE(PIPESTAT(pipe),
			   I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
	I915_WRITE(IIR, I915_READ(IIR));
}

4262
static void intel_hpd_irq_reenable_work(struct work_struct *work)
4263
{
4264 4265 4266
	struct drm_i915_private *dev_priv =
		container_of(work, typeof(*dev_priv),
			     hotplug_reenable_work.work);
4267 4268 4269 4270
	struct drm_device *dev = dev_priv->dev;
	struct drm_mode_config *mode_config = &dev->mode_config;
	int i;

4271 4272
	intel_runtime_pm_get(dev_priv);

4273
	spin_lock_irq(&dev_priv->irq_lock);
4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287
	for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
		struct drm_connector *connector;

		if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
			continue;

		dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;

		list_for_each_entry(connector, &mode_config->connector_list, head) {
			struct intel_connector *intel_connector = to_intel_connector(connector);

			if (intel_connector->encoder->hpd_pin == i) {
				if (connector->polled != intel_connector->polled)
					DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
4288
							 connector->name);
4289 4290 4291 4292 4293 4294 4295 4296
				connector->polled = intel_connector->polled;
				if (!connector->polled)
					connector->polled = DRM_CONNECTOR_POLL_HPD;
			}
		}
	}
	if (dev_priv->display.hpd_irq_setup)
		dev_priv->display.hpd_irq_setup(dev);
4297
	spin_unlock_irq(&dev_priv->irq_lock);
4298 4299

	intel_runtime_pm_put(dev_priv);
4300 4301
}

4302 4303 4304 4305 4306 4307 4308
/**
 * intel_irq_init - initializes irq support
 * @dev_priv: i915 device instance
 *
 * This function initializes all the irq support including work items, timers
 * and all the vtables. It does not setup the interrupt itself though.
 */
4309
void intel_irq_init(struct drm_i915_private *dev_priv)
4310
{
4311
	struct drm_device *dev = dev_priv->dev;
4312 4313

	INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
4314
	INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func);
4315
	INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
4316
	INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
4317
	INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4318

4319
	/* Let's track the enabled rps events */
4320
	if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
4321
		/* WaGsvRC0ResidencyMethod:vlv */
4322 4323 4324
		dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
	else
		dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4325

4326 4327
	setup_timer(&dev_priv->gpu_error.hangcheck_timer,
		    i915_hangcheck_elapsed,
4328
		    (unsigned long) dev);
4329
	INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work,
4330
			  intel_hpd_irq_reenable_work);
4331

4332
	pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
4333

4334
	if (IS_GEN2(dev_priv)) {
4335 4336
		dev->max_vblank_count = 0;
		dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
4337
	} else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
4338 4339
		dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
		dev->driver->get_vblank_counter = gm45_get_vblank_counter;
4340 4341 4342
	} else {
		dev->driver->get_vblank_counter = i915_get_vblank_counter;
		dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4343 4344
	}

4345 4346 4347 4348 4349
	/*
	 * Opt out of the vblank disable timer on everything except gen2.
	 * Gen2 doesn't have a hardware frame counter and so depends on
	 * vblank interrupts to produce sane vblank seuquence numbers.
	 */
4350
	if (!IS_GEN2(dev_priv))
4351 4352
		dev->vblank_disable_immediate = true;

4353
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
4354
		dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4355 4356
		dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
	}
4357

4358
	if (IS_CHERRYVIEW(dev_priv)) {
4359 4360 4361 4362 4363 4364 4365
		dev->driver->irq_handler = cherryview_irq_handler;
		dev->driver->irq_preinstall = cherryview_irq_preinstall;
		dev->driver->irq_postinstall = cherryview_irq_postinstall;
		dev->driver->irq_uninstall = cherryview_irq_uninstall;
		dev->driver->enable_vblank = valleyview_enable_vblank;
		dev->driver->disable_vblank = valleyview_disable_vblank;
		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4366
	} else if (IS_VALLEYVIEW(dev_priv)) {
J
Jesse Barnes 已提交
4367 4368 4369 4370 4371 4372
		dev->driver->irq_handler = valleyview_irq_handler;
		dev->driver->irq_preinstall = valleyview_irq_preinstall;
		dev->driver->irq_postinstall = valleyview_irq_postinstall;
		dev->driver->irq_uninstall = valleyview_irq_uninstall;
		dev->driver->enable_vblank = valleyview_enable_vblank;
		dev->driver->disable_vblank = valleyview_disable_vblank;
4373
		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4374
	} else if (INTEL_INFO(dev_priv)->gen >= 8) {
4375
		dev->driver->irq_handler = gen8_irq_handler;
4376
		dev->driver->irq_preinstall = gen8_irq_reset;
4377 4378 4379 4380 4381
		dev->driver->irq_postinstall = gen8_irq_postinstall;
		dev->driver->irq_uninstall = gen8_irq_uninstall;
		dev->driver->enable_vblank = gen8_enable_vblank;
		dev->driver->disable_vblank = gen8_disable_vblank;
		dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
4382 4383
	} else if (HAS_PCH_SPLIT(dev)) {
		dev->driver->irq_handler = ironlake_irq_handler;
4384
		dev->driver->irq_preinstall = ironlake_irq_reset;
4385 4386 4387 4388
		dev->driver->irq_postinstall = ironlake_irq_postinstall;
		dev->driver->irq_uninstall = ironlake_irq_uninstall;
		dev->driver->enable_vblank = ironlake_enable_vblank;
		dev->driver->disable_vblank = ironlake_disable_vblank;
4389
		dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
4390
	} else {
4391
		if (INTEL_INFO(dev_priv)->gen == 2) {
C
Chris Wilson 已提交
4392 4393 4394 4395
			dev->driver->irq_preinstall = i8xx_irq_preinstall;
			dev->driver->irq_postinstall = i8xx_irq_postinstall;
			dev->driver->irq_handler = i8xx_irq_handler;
			dev->driver->irq_uninstall = i8xx_irq_uninstall;
4396
		} else if (INTEL_INFO(dev_priv)->gen == 3) {
4397 4398 4399 4400
			dev->driver->irq_preinstall = i915_irq_preinstall;
			dev->driver->irq_postinstall = i915_irq_postinstall;
			dev->driver->irq_uninstall = i915_irq_uninstall;
			dev->driver->irq_handler = i915_irq_handler;
4401
			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
C
Chris Wilson 已提交
4402
		} else {
4403 4404 4405 4406
			dev->driver->irq_preinstall = i965_irq_preinstall;
			dev->driver->irq_postinstall = i965_irq_postinstall;
			dev->driver->irq_uninstall = i965_irq_uninstall;
			dev->driver->irq_handler = i965_irq_handler;
4407
			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
C
Chris Wilson 已提交
4408
		}
4409 4410 4411 4412
		dev->driver->enable_vblank = i915_enable_vblank;
		dev->driver->disable_vblank = i915_disable_vblank;
	}
}
4413

4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425
/**
 * intel_hpd_init - initializes and enables hpd support
 * @dev_priv: i915 device instance
 *
 * This function enables the hotplug support. It requires that interrupts have
 * already been enabled with intel_irq_init_hw(). From this point on hotplug and
 * poll request can run concurrently to other code, so locking rules must be
 * obeyed.
 *
 * This is a separate step from interrupt enabling to simplify the locking rules
 * in the driver load and resume code.
 */
4426
void intel_hpd_init(struct drm_i915_private *dev_priv)
4427
{
4428
	struct drm_device *dev = dev_priv->dev;
4429 4430 4431
	struct drm_mode_config *mode_config = &dev->mode_config;
	struct drm_connector *connector;
	int i;
4432

4433 4434 4435 4436 4437 4438 4439
	for (i = 1; i < HPD_NUM_PINS; i++) {
		dev_priv->hpd_stats[i].hpd_cnt = 0;
		dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
	}
	list_for_each_entry(connector, &mode_config->connector_list, head) {
		struct intel_connector *intel_connector = to_intel_connector(connector);
		connector->polled = intel_connector->polled;
4440 4441 4442
		if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
			connector->polled = DRM_CONNECTOR_POLL_HPD;
		if (intel_connector->mst_port)
4443 4444
			connector->polled = DRM_CONNECTOR_POLL_HPD;
	}
4445 4446 4447

	/* Interrupt setup is already guaranteed to be single-threaded, this is
	 * just to make the assert_spin_locked checks happy. */
4448
	spin_lock_irq(&dev_priv->irq_lock);
4449 4450
	if (dev_priv->display.hpd_irq_setup)
		dev_priv->display.hpd_irq_setup(dev);
4451
	spin_unlock_irq(&dev_priv->irq_lock);
4452
}
4453

4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464
/**
 * intel_irq_install - enables the hardware interrupt
 * @dev_priv: i915 device instance
 *
 * This function enables the hardware interrupt handling, but leaves the hotplug
 * handling still disabled. It is called after intel_irq_init().
 *
 * In the driver load and resume code we need working interrupts in a few places
 * but don't want to deal with the hassle of concurrent probe and hotplug
 * workers. Hence the split into this two-stage approach.
 */
4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476
int intel_irq_install(struct drm_i915_private *dev_priv)
{
	/*
	 * We enable some interrupt sources in our postinstall hooks, so mark
	 * interrupts as enabled _before_ actually enabling them to avoid
	 * special cases in our ordering checks.
	 */
	dev_priv->pm.irqs_enabled = true;

	return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
}

4477 4478 4479 4480 4481 4482 4483
/**
 * intel_irq_uninstall - finilizes all irq handling
 * @dev_priv: i915 device instance
 *
 * This stops interrupt and hotplug handling and unregisters and frees all
 * resources acquired in the init functions.
 */
4484 4485 4486 4487 4488 4489 4490
void intel_irq_uninstall(struct drm_i915_private *dev_priv)
{
	drm_irq_uninstall(dev_priv->dev);
	intel_hpd_cancel_work(dev_priv);
	dev_priv->pm.irqs_enabled = false;
}

4491 4492 4493 4494 4495 4496 4497
/**
 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
 * @dev_priv: i915 device instance
 *
 * This function is used to disable interrupts at runtime, both in the runtime
 * pm and the system suspend/resume code.
 */
4498
void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4499
{
4500
	dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
4501
	dev_priv->pm.irqs_enabled = false;
4502 4503
}

4504 4505 4506 4507 4508 4509 4510
/**
 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
 * @dev_priv: i915 device instance
 *
 * This function is used to enable interrupts at runtime, both in the runtime
 * pm and the system suspend/resume code.
 */
4511
void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4512
{
4513
	dev_priv->pm.irqs_enabled = true;
4514 4515
	dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
	dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
4516
}