i915_irq.c 111.8 KB
Newer Older
D
Dave Airlie 已提交
1
/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
L
Linus Torvalds 已提交
2
 */
D
Dave Airlie 已提交
3
/*
L
Linus Torvalds 已提交
4 5
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
 * All Rights Reserved.
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
D
Dave Airlie 已提交
27
 */
L
Linus Torvalds 已提交
28

29 30
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

31
#include <linux/sysrq.h>
32
#include <linux/slab.h>
33
#include <linux/circ_buf.h>
34 35
#include <drm/drmP.h>
#include <drm/i915_drm.h>
L
Linus Torvalds 已提交
36
#include "i915_drv.h"
C
Chris Wilson 已提交
37
#include "i915_trace.h"
J
Jesse Barnes 已提交
38
#include "intel_drv.h"
L
Linus Torvalds 已提交
39

40 41 42 43 44 45 46 47 48 49
static const u32 hpd_ibx[] = {
	[HPD_CRT] = SDE_CRT_HOTPLUG,
	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG
};

static const u32 hpd_cpt[] = {
	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
50
	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
51 52 53 54 55 56 57 58 59 60 61 62 63 64
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
};

static const u32 hpd_mask_i915[] = {
	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
};

65
static const u32 hpd_status_g4x[] = {
66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82
	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
};

static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
};

83
/* For display hotplug interrupt */
84
static void
85
ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
86
{
87 88
	assert_spin_locked(&dev_priv->irq_lock);

89 90 91 92 93 94
	if (dev_priv->pc8.irqs_disabled) {
		WARN(1, "IRQs disabled\n");
		dev_priv->pc8.regsave.deimr &= ~mask;
		return;
	}

95 96 97
	if ((dev_priv->irq_mask & mask) != 0) {
		dev_priv->irq_mask &= ~mask;
		I915_WRITE(DEIMR, dev_priv->irq_mask);
98
		POSTING_READ(DEIMR);
99 100 101
	}
}

102
static void
103
ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
104
{
105 106
	assert_spin_locked(&dev_priv->irq_lock);

107 108 109 110 111 112
	if (dev_priv->pc8.irqs_disabled) {
		WARN(1, "IRQs disabled\n");
		dev_priv->pc8.regsave.deimr |= mask;
		return;
	}

113 114 115
	if ((dev_priv->irq_mask & mask) != mask) {
		dev_priv->irq_mask |= mask;
		I915_WRITE(DEIMR, dev_priv->irq_mask);
116
		POSTING_READ(DEIMR);
117 118 119
	}
}

P
Paulo Zanoni 已提交
120 121 122 123 124 125 126 127 128 129 130 131
/**
 * ilk_update_gt_irq - update GTIMR
 * @dev_priv: driver private
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
			      uint32_t interrupt_mask,
			      uint32_t enabled_irq_mask)
{
	assert_spin_locked(&dev_priv->irq_lock);

132 133 134 135 136 137 138 139
	if (dev_priv->pc8.irqs_disabled) {
		WARN(1, "IRQs disabled\n");
		dev_priv->pc8.regsave.gtimr &= ~interrupt_mask;
		dev_priv->pc8.regsave.gtimr |= (~enabled_irq_mask &
						interrupt_mask);
		return;
	}

P
Paulo Zanoni 已提交
140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155
	dev_priv->gt_irq_mask &= ~interrupt_mask;
	dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
	POSTING_READ(GTIMR);
}

void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
{
	ilk_update_gt_irq(dev_priv, mask, mask);
}

void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
{
	ilk_update_gt_irq(dev_priv, mask, 0);
}

P
Paulo Zanoni 已提交
156 157 158 159 160 161 162 163 164 165
/**
  * snb_update_pm_irq - update GEN6_PMIMR
  * @dev_priv: driver private
  * @interrupt_mask: mask of interrupt bits to update
  * @enabled_irq_mask: mask of interrupt bits to enable
  */
static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
			      uint32_t interrupt_mask,
			      uint32_t enabled_irq_mask)
{
166
	uint32_t new_val;
P
Paulo Zanoni 已提交
167 168 169

	assert_spin_locked(&dev_priv->irq_lock);

170 171 172 173 174 175 176 177
	if (dev_priv->pc8.irqs_disabled) {
		WARN(1, "IRQs disabled\n");
		dev_priv->pc8.regsave.gen6_pmimr &= ~interrupt_mask;
		dev_priv->pc8.regsave.gen6_pmimr |= (~enabled_irq_mask &
						     interrupt_mask);
		return;
	}

178
	new_val = dev_priv->pm_irq_mask;
179 180 181
	new_val &= ~interrupt_mask;
	new_val |= (~enabled_irq_mask & interrupt_mask);

182 183 184
	if (new_val != dev_priv->pm_irq_mask) {
		dev_priv->pm_irq_mask = new_val;
		I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
185 186
		POSTING_READ(GEN6_PMIMR);
	}
P
Paulo Zanoni 已提交
187 188 189 190 191 192 193 194 195 196 197 198
}

void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
{
	snb_update_pm_irq(dev_priv, mask, mask);
}

void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
{
	snb_update_pm_irq(dev_priv, mask, 0);
}

199 200 201 202 203 204
static bool ivb_can_enable_err_int(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct intel_crtc *crtc;
	enum pipe pipe;

205 206
	assert_spin_locked(&dev_priv->irq_lock);

207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222
	for_each_pipe(pipe) {
		crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);

		if (crtc->cpu_fifo_underrun_disabled)
			return false;
	}

	return true;
}

static bool cpt_can_enable_serr_int(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	enum pipe pipe;
	struct intel_crtc *crtc;

223 224
	assert_spin_locked(&dev_priv->irq_lock);

225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248
	for_each_pipe(pipe) {
		crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);

		if (crtc->pch_fifo_underrun_disabled)
			return false;
	}

	return true;
}

static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
						 enum pipe pipe, bool enable)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
					  DE_PIPEB_FIFO_UNDERRUN;

	if (enable)
		ironlake_enable_display_irq(dev_priv, bit);
	else
		ironlake_disable_display_irq(dev_priv, bit);
}

static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
249
						  enum pipe pipe, bool enable)
250 251 252
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	if (enable) {
253 254
		I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));

255 256 257 258 259
		if (!ivb_can_enable_err_int(dev))
			return;

		ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
	} else {
260 261 262
		bool was_enabled = !(I915_READ(DEIMR) & DE_ERR_INT_IVB);

		/* Change the state _after_ we've read out the current one. */
263
		ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
264 265 266 267 268 269

		if (!was_enabled &&
		    (I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe))) {
			DRM_DEBUG_KMS("uncleared fifo underrun on pipe %c\n",
				      pipe_name(pipe));
		}
270 271 272
	}
}

273 274 275 276 277 278 279 280 281 282 283 284 285 286 287
static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
						  enum pipe pipe, bool enable)
{
	struct drm_i915_private *dev_priv = dev->dev_private;

	assert_spin_locked(&dev_priv->irq_lock);

	if (enable)
		dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN;
	else
		dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN;
	I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
	POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
}

288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303
/**
 * ibx_display_interrupt_update - update SDEIMR
 * @dev_priv: driver private
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
					 uint32_t interrupt_mask,
					 uint32_t enabled_irq_mask)
{
	uint32_t sdeimr = I915_READ(SDEIMR);
	sdeimr &= ~interrupt_mask;
	sdeimr |= (~enabled_irq_mask & interrupt_mask);

	assert_spin_locked(&dev_priv->irq_lock);

304 305 306 307 308 309 310 311 312
	if (dev_priv->pc8.irqs_disabled &&
	    (interrupt_mask & SDE_HOTPLUG_MASK_CPT)) {
		WARN(1, "IRQs disabled\n");
		dev_priv->pc8.regsave.sdeimr &= ~interrupt_mask;
		dev_priv->pc8.regsave.sdeimr |= (~enabled_irq_mask &
						 interrupt_mask);
		return;
	}

313 314 315 316 317 318 319 320
	I915_WRITE(SDEIMR, sdeimr);
	POSTING_READ(SDEIMR);
}
#define ibx_enable_display_interrupt(dev_priv, bits) \
	ibx_display_interrupt_update((dev_priv), (bits), (bits))
#define ibx_disable_display_interrupt(dev_priv, bits) \
	ibx_display_interrupt_update((dev_priv), (bits), 0)

321 322
static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
					    enum transcoder pch_transcoder,
323 324 325
					    bool enable)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
326 327
	uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
		       SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
328 329

	if (enable)
330
		ibx_enable_display_interrupt(dev_priv, bit);
331
	else
332
		ibx_disable_display_interrupt(dev_priv, bit);
333 334 335 336 337 338 339 340 341
}

static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
					    enum transcoder pch_transcoder,
					    bool enable)
{
	struct drm_i915_private *dev_priv = dev->dev_private;

	if (enable) {
342 343 344
		I915_WRITE(SERR_INT,
			   SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));

345 346 347
		if (!cpt_can_enable_serr_int(dev))
			return;

348
		ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
349
	} else {
350 351 352 353
		uint32_t tmp = I915_READ(SERR_INT);
		bool was_enabled = !(I915_READ(SDEIMR) & SDE_ERROR_CPT);

		/* Change the state _after_ we've read out the current one. */
354
		ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
355 356 357 358 359 360

		if (!was_enabled &&
		    (tmp & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder))) {
			DRM_DEBUG_KMS("uncleared pch fifo underrun on pch transcoder %c\n",
				      transcoder_name(pch_transcoder));
		}
361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398
	}
}

/**
 * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages
 * @dev: drm device
 * @pipe: pipe
 * @enable: true if we want to report FIFO underrun errors, false otherwise
 *
 * This function makes us disable or enable CPU fifo underruns for a specific
 * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun
 * reporting for one pipe may also disable all the other CPU error interruts for
 * the other pipes, due to the fact that there's just one interrupt mask/enable
 * bit for all the pipes.
 *
 * Returns the previous state of underrun reporting.
 */
bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
					   enum pipe pipe, bool enable)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
	unsigned long flags;
	bool ret;

	spin_lock_irqsave(&dev_priv->irq_lock, flags);

	ret = !intel_crtc->cpu_fifo_underrun_disabled;

	if (enable == ret)
		goto done;

	intel_crtc->cpu_fifo_underrun_disabled = !enable;

	if (IS_GEN5(dev) || IS_GEN6(dev))
		ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
	else if (IS_GEN7(dev))
399
		ivybridge_set_fifo_underrun_reporting(dev, pipe, enable);
400 401
	else if (IS_GEN8(dev))
		broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426

done:
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
	return ret;
}

/**
 * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
 * @dev: drm device
 * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
 * @enable: true if we want to report FIFO underrun errors, false otherwise
 *
 * This function makes us disable or enable PCH fifo underruns for a specific
 * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
 * underrun reporting for one transcoder may also disable all the other PCH
 * error interruts for the other transcoders, due to the fact that there's just
 * one interrupt mask/enable bit for all the transcoders.
 *
 * Returns the previous state of underrun reporting.
 */
bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
					   enum transcoder pch_transcoder,
					   bool enable)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
427 428
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
429 430 431
	unsigned long flags;
	bool ret;

432 433 434 435 436 437 438 439
	/*
	 * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
	 * has only one pch transcoder A that all pipes can use. To avoid racy
	 * pch transcoder -> pipe lookups from interrupt code simply store the
	 * underrun statistics in crtc A. Since we never expose this anywhere
	 * nor use it outside of the fifo underrun code here using the "wrong"
	 * crtc on LPT won't cause issues.
	 */
440 441 442 443 444 445 446 447 448 449 450

	spin_lock_irqsave(&dev_priv->irq_lock, flags);

	ret = !intel_crtc->pch_fifo_underrun_disabled;

	if (enable == ret)
		goto done;

	intel_crtc->pch_fifo_underrun_disabled = !enable;

	if (HAS_PCH_IBX(dev))
451
		ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
452 453 454 455 456 457 458 459 460
	else
		cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable);

done:
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
	return ret;
}


461
void
462
i915_enable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask)
463
{
464 465
	u32 reg = PIPESTAT(pipe);
	u32 pipestat = I915_READ(reg) & 0x7fff0000;
466

467 468
	assert_spin_locked(&dev_priv->irq_lock);

469 470 471 472 473 474 475
	if ((pipestat & mask) == mask)
		return;

	/* Enable the interrupt, clear any pending status */
	pipestat |= mask | (mask >> 16);
	I915_WRITE(reg, pipestat);
	POSTING_READ(reg);
476 477 478
}

void
479
i915_disable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask)
480
{
481 482
	u32 reg = PIPESTAT(pipe);
	u32 pipestat = I915_READ(reg) & 0x7fff0000;
483

484 485
	assert_spin_locked(&dev_priv->irq_lock);

486 487 488 489 490 491
	if ((pipestat & mask) == 0)
		return;

	pipestat &= ~mask;
	I915_WRITE(reg, pipestat);
	POSTING_READ(reg);
492 493
}

494
/**
495
 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
496
 */
497
static void i915_enable_asle_pipestat(struct drm_device *dev)
498
{
499 500 501
	drm_i915_private_t *dev_priv = dev->dev_private;
	unsigned long irqflags;

502 503 504
	if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
		return;

505
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
506

507
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_ENABLE);
508
	if (INTEL_INFO(dev)->gen >= 4)
509 510
		i915_enable_pipestat(dev_priv, PIPE_A,
				     PIPE_LEGACY_BLC_EVENT_ENABLE);
511 512

	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
513 514
}

515 516 517 518 519 520 521 522 523 524 525 526 527
/**
 * i915_pipe_enabled - check if a pipe is enabled
 * @dev: DRM device
 * @pipe: pipe to check
 *
 * Reading certain registers when the pipe is disabled can hang the chip.
 * Use this routine to make sure the PLL is running and the pipe is active
 * before reading such registers if unsure.
 */
static int
i915_pipe_enabled(struct drm_device *dev, int pipe)
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
528

529 530 531 532
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
		/* Locking is horribly broken here, but whatever. */
		struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
533

534 535 536 537
		return intel_crtc->active;
	} else {
		return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
	}
538 539
}

540 541 542 543 544 545
static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
{
	/* Gen2 doesn't have a hardware frame counter */
	return 0;
}

546 547 548
/* Called from drm generic code, passed a 'crtc', which
 * we use as a pipe index
 */
549
static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
550 551 552 553
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
	unsigned long high_frame;
	unsigned long low_frame;
554
	u32 high1, high2, low, pixel, vbl_start;
555 556

	if (!i915_pipe_enabled(dev, pipe)) {
557
		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
558
				"pipe %c\n", pipe_name(pipe));
559 560 561
		return 0;
	}

562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
		struct intel_crtc *intel_crtc =
			to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
		const struct drm_display_mode *mode =
			&intel_crtc->config.adjusted_mode;

		vbl_start = mode->crtc_vblank_start * mode->crtc_htotal;
	} else {
		enum transcoder cpu_transcoder =
			intel_pipe_to_cpu_transcoder(dev_priv, pipe);
		u32 htotal;

		htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
		vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;

		vbl_start *= htotal;
	}

580 581
	high_frame = PIPEFRAME(pipe);
	low_frame = PIPEFRAMEPIXEL(pipe);
582

583 584 585 586 587 588
	/*
	 * High & low register fields aren't synchronized, so make sure
	 * we get a low value that's stable across two reads of the high
	 * register.
	 */
	do {
589
		high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
590
		low   = I915_READ(low_frame);
591
		high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
592 593
	} while (high1 != high2);

594
	high1 >>= PIPE_FRAME_HIGH_SHIFT;
595
	pixel = low & PIPE_PIXEL_MASK;
596
	low >>= PIPE_FRAME_LOW_SHIFT;
597 598 599 600 601 602

	/*
	 * The frame counter increments at beginning of active.
	 * Cook up a vblank counter by also checking the pixel
	 * counter against vblank start.
	 */
603
	return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
604 605
}

606
static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
607 608
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
609
	int reg = PIPE_FRMCOUNT_GM45(pipe);
610 611

	if (!i915_pipe_enabled(dev, pipe)) {
612
		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
613
				 "pipe %c\n", pipe_name(pipe));
614 615 616 617 618 619
		return 0;
	}

	return I915_READ(reg);
}

620 621 622 623 624
/* raw reads, only for fast reads of display block, no need for forcewake etc. */
#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
#define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))

static bool intel_pipe_in_vblank_locked(struct drm_device *dev, enum pipe pipe)
625 626 627
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	uint32_t status;
628
	int reg;
629 630 631 632 633 634

	if (IS_VALLEYVIEW(dev)) {
		status = pipe == PIPE_A ?
			I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
			I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;

635
		reg = VLV_ISR;
636 637 638 639 640
	} else if (IS_GEN2(dev)) {
		status = pipe == PIPE_A ?
			I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
			I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;

641
		reg = ISR;
642
	} else if (INTEL_INFO(dev)->gen < 5) {
643 644 645 646
		status = pipe == PIPE_A ?
			I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
			I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;

647
		reg = ISR;
648 649 650 651 652
	} else if (INTEL_INFO(dev)->gen < 7) {
		status = pipe == PIPE_A ?
			DE_PIPEA_VBLANK :
			DE_PIPEB_VBLANK;

653
		reg = DEISR;
654 655 656 657 658 659 660 661 662 663 664 665 666 667
	} else {
		switch (pipe) {
		default:
		case PIPE_A:
			status = DE_PIPEA_VBLANK_IVB;
			break;
		case PIPE_B:
			status = DE_PIPEB_VBLANK_IVB;
			break;
		case PIPE_C:
			status = DE_PIPEC_VBLANK_IVB;
			break;
		}

668
		reg = DEISR;
669
	}
670 671 672 673 674

	if (IS_GEN2(dev))
		return __raw_i915_read16(dev_priv, reg) & status;
	else
		return __raw_i915_read32(dev_priv, reg) & status;
675 676
}

677
static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
678
			     int *vpos, int *hpos, ktime_t *stime, ktime_t *etime)
679
{
680 681 682 683
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
	const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
684
	int position;
685 686 687
	int vbl_start, vbl_end, htotal, vtotal;
	bool in_vbl = true;
	int ret = 0;
688
	unsigned long irqflags;
689

690
	if (!intel_crtc->active) {
691
		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
692
				 "pipe %c\n", pipe_name(pipe));
693 694 695
		return 0;
	}

696 697 698 699
	htotal = mode->crtc_htotal;
	vtotal = mode->crtc_vtotal;
	vbl_start = mode->crtc_vblank_start;
	vbl_end = mode->crtc_vblank_end;
700

701 702
	ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;

703 704 705 706 707 708 709 710 711 712 713 714 715
	/*
	 * Lock uncore.lock, as we will do multiple timing critical raw
	 * register reads, potentially with preemption disabled, so the
	 * following code must not block on uncore.lock.
	 */
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
	
	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */

	/* Get optional system timestamp before query. */
	if (stime)
		*stime = ktime_get();

716
	if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
717 718 719
		/* No obvious pixelcount register. Only query vertical
		 * scanout position from Display scan line register.
		 */
720
		if (IS_GEN2(dev))
721
			position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
722
		else
723
			position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
724 725 726 727 728 729 730 731

		/*
		 * The scanline counter increments at the leading edge
		 * of hsync, ie. it completely misses the active portion
		 * of the line. Fix up the counter at both edges of vblank
		 * to get a more accurate picture whether we're in vblank
		 * or not.
		 */
732
		in_vbl = intel_pipe_in_vblank_locked(dev, pipe);
733 734 735
		if ((in_vbl && position == vbl_start - 1) ||
		    (!in_vbl && position == vbl_end - 1))
			position = (position + 1) % vtotal;
736 737 738 739 740
	} else {
		/* Have access to pixelcount since start of frame.
		 * We can split this into vertical and horizontal
		 * scanout position.
		 */
741
		position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
742

743 744 745 746
		/* convert to pixel counts */
		vbl_start *= htotal;
		vbl_end *= htotal;
		vtotal *= htotal;
747 748
	}

749 750 751 752 753 754 755 756
	/* Get optional system timestamp after query. */
	if (etime)
		*etime = ktime_get();

	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */

	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);

757 758 759 760 761 762 763 764 765 766 767 768
	in_vbl = position >= vbl_start && position < vbl_end;

	/*
	 * While in vblank, position will be negative
	 * counting up towards 0 at vbl_end. And outside
	 * vblank, position will be positive counting
	 * up since vbl_end.
	 */
	if (position >= vbl_start)
		position -= vbl_end;
	else
		position += vtotal - vbl_end;
769

770
	if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
771 772 773 774 775 776
		*vpos = position;
		*hpos = 0;
	} else {
		*vpos = position / htotal;
		*hpos = position - (*vpos * htotal);
	}
777 778 779 780 781 782 783 784

	/* In vblank? */
	if (in_vbl)
		ret |= DRM_SCANOUTPOS_INVBL;

	return ret;
}

785
static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
786 787 788 789
			      int *max_error,
			      struct timeval *vblank_time,
			      unsigned flags)
{
790
	struct drm_crtc *crtc;
791

792
	if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
793
		DRM_ERROR("Invalid crtc %d\n", pipe);
794 795 796 797
		return -EINVAL;
	}

	/* Get drm_crtc to timestamp: */
798 799 800 801 802 803 804 805 806 807
	crtc = intel_get_crtc_for_pipe(dev, pipe);
	if (crtc == NULL) {
		DRM_ERROR("Invalid crtc %d\n", pipe);
		return -EINVAL;
	}

	if (!crtc->enabled) {
		DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
		return -EBUSY;
	}
808 809

	/* Helper routine in DRM core does all the work: */
810 811 812
	return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
						     vblank_time, flags,
						     crtc);
813 814
}

815 816
static bool intel_hpd_irq_event(struct drm_device *dev,
				struct drm_connector *connector)
817 818 819 820 821 822 823
{
	enum drm_connector_status old_status;

	WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
	old_status = connector->status;

	connector->status = connector->funcs->detect(connector, false);
824 825 826 827
	if (old_status == connector->status)
		return false;

	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
828 829
		      connector->base.id,
		      drm_get_connector_name(connector),
830 831 832 833
		      drm_get_connector_status_name(old_status),
		      drm_get_connector_status_name(connector->status));

	return true;
834 835
}

836 837 838
/*
 * Handle hotplug events outside the interrupt handler proper.
 */
839 840
#define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)

841 842 843 844 845
static void i915_hotplug_work_func(struct work_struct *work)
{
	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
						    hotplug_work);
	struct drm_device *dev = dev_priv->dev;
846
	struct drm_mode_config *mode_config = &dev->mode_config;
847 848 849 850 851
	struct intel_connector *intel_connector;
	struct intel_encoder *intel_encoder;
	struct drm_connector *connector;
	unsigned long irqflags;
	bool hpd_disabled = false;
852
	bool changed = false;
853
	u32 hpd_event_bits;
854

855 856 857 858
	/* HPD irq before everything is fully set up. */
	if (!dev_priv->enable_hotplug_processing)
		return;

859
	mutex_lock(&mode_config->mutex);
860 861
	DRM_DEBUG_KMS("running encoder hotplug functions\n");

862
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
863 864 865

	hpd_event_bits = dev_priv->hpd_event_bits;
	dev_priv->hpd_event_bits = 0;
866 867 868 869 870 871 872 873 874 875 876 877 878 879
	list_for_each_entry(connector, &mode_config->connector_list, head) {
		intel_connector = to_intel_connector(connector);
		intel_encoder = intel_connector->encoder;
		if (intel_encoder->hpd_pin > HPD_NONE &&
		    dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
		    connector->polled == DRM_CONNECTOR_POLL_HPD) {
			DRM_INFO("HPD interrupt storm detected on connector %s: "
				 "switching from hotplug detection to polling\n",
				drm_get_connector_name(connector));
			dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
			connector->polled = DRM_CONNECTOR_POLL_CONNECT
				| DRM_CONNECTOR_POLL_DISCONNECT;
			hpd_disabled = true;
		}
880 881 882 883
		if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
			DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
				      drm_get_connector_name(connector), intel_encoder->hpd_pin);
		}
884 885 886 887
	}
	 /* if there were no outputs to poll, poll was disabled,
	  * therefore make sure it's enabled when disabling HPD on
	  * some connectors */
888
	if (hpd_disabled) {
889
		drm_kms_helper_poll_enable(dev);
890 891 892
		mod_timer(&dev_priv->hotplug_reenable_timer,
			  jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
	}
893 894 895

	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);

896 897 898 899 900 901 902 903 904 905
	list_for_each_entry(connector, &mode_config->connector_list, head) {
		intel_connector = to_intel_connector(connector);
		intel_encoder = intel_connector->encoder;
		if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
			if (intel_encoder->hot_plug)
				intel_encoder->hot_plug(intel_encoder);
			if (intel_hpd_irq_event(dev, connector))
				changed = true;
		}
	}
906 907
	mutex_unlock(&mode_config->mutex);

908 909
	if (changed)
		drm_kms_helper_hotplug_event(dev);
910 911
}

912
static void ironlake_rps_change_irq_handler(struct drm_device *dev)
913 914
{
	drm_i915_private_t *dev_priv = dev->dev_private;
915
	u32 busy_up, busy_down, max_avg, min_avg;
916 917
	u8 new_delay;

918
	spin_lock(&mchdev_lock);
919

920 921
	I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));

922
	new_delay = dev_priv->ips.cur_delay;
923

924
	I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
925 926
	busy_up = I915_READ(RCPREVBSYTUPAVG);
	busy_down = I915_READ(RCPREVBSYTDNAVG);
927 928 929 930
	max_avg = I915_READ(RCBMAXAVG);
	min_avg = I915_READ(RCBMINAVG);

	/* Handle RCS change request from hw */
931
	if (busy_up > max_avg) {
932 933 934 935
		if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
			new_delay = dev_priv->ips.cur_delay - 1;
		if (new_delay < dev_priv->ips.max_delay)
			new_delay = dev_priv->ips.max_delay;
936
	} else if (busy_down < min_avg) {
937 938 939 940
		if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
			new_delay = dev_priv->ips.cur_delay + 1;
		if (new_delay > dev_priv->ips.min_delay)
			new_delay = dev_priv->ips.min_delay;
941 942
	}

943
	if (ironlake_set_drps(dev, new_delay))
944
		dev_priv->ips.cur_delay = new_delay;
945

946
	spin_unlock(&mchdev_lock);
947

948 949 950
	return;
}

951 952 953
static void notify_ring(struct drm_device *dev,
			struct intel_ring_buffer *ring)
{
954 955 956
	if (ring->obj == NULL)
		return;

957
	trace_i915_gem_request_complete(ring);
958

959
	wake_up_all(&ring->irq_queue);
960
	i915_queue_hangcheck(dev);
961 962
}

963
static void gen6_pm_rps_work(struct work_struct *work)
964
{
965
	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
966
						    rps.work);
P
Paulo Zanoni 已提交
967
	u32 pm_iir;
968
	int new_delay, adj;
969

970
	spin_lock_irq(&dev_priv->irq_lock);
971 972
	pm_iir = dev_priv->rps.pm_iir;
	dev_priv->rps.pm_iir = 0;
973
	/* Make sure not to corrupt PMIMR state used by ringbuffer code */
P
Paulo Zanoni 已提交
974
	snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
975
	spin_unlock_irq(&dev_priv->irq_lock);
976

977 978 979
	/* Make sure we didn't queue anything we're not going to process. */
	WARN_ON(pm_iir & ~GEN6_PM_RPS_EVENTS);

980
	if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0)
981 982
		return;

983
	mutex_lock(&dev_priv->rps.hw_lock);
984

985
	adj = dev_priv->rps.last_adj;
986
	if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
987 988 989 990 991
		if (adj > 0)
			adj *= 2;
		else
			adj = 1;
		new_delay = dev_priv->rps.cur_delay + adj;
992 993 994 995 996

		/*
		 * For better performance, jump directly
		 * to RPe if we're below it.
		 */
997 998 999 1000
		if (new_delay < dev_priv->rps.rpe_delay)
			new_delay = dev_priv->rps.rpe_delay;
	} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
		if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay)
1001
			new_delay = dev_priv->rps.rpe_delay;
1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013
		else
			new_delay = dev_priv->rps.min_delay;
		adj = 0;
	} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
		if (adj < 0)
			adj *= 2;
		else
			adj = -1;
		new_delay = dev_priv->rps.cur_delay + adj;
	} else { /* unknown event */
		new_delay = dev_priv->rps.cur_delay;
	}
1014

1015 1016 1017
	/* sysfs frequency interfaces may have snuck in while servicing the
	 * interrupt
	 */
1018 1019
	new_delay = clamp_t(int, new_delay,
			    dev_priv->rps.min_delay, dev_priv->rps.max_delay);
1020 1021 1022 1023 1024 1025
	dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_delay;

	if (IS_VALLEYVIEW(dev_priv->dev))
		valleyview_set_rps(dev_priv->dev, new_delay);
	else
		gen6_set_rps(dev_priv->dev, new_delay);
1026

1027
	mutex_unlock(&dev_priv->rps.hw_lock);
1028 1029
}

1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042

/**
 * ivybridge_parity_work - Workqueue called when a parity error interrupt
 * occurred.
 * @work: workqueue struct
 *
 * Doesn't actually do anything except notify userspace. As a consequence of
 * this event, userspace should try to remap the bad rows since statistically
 * it is likely the same row is more likely to go bad again.
 */
static void ivybridge_parity_work(struct work_struct *work)
{
	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
1043
						    l3_parity.error_work);
1044
	u32 error_status, row, bank, subbank;
1045
	char *parity_event[6];
1046 1047
	uint32_t misccpctl;
	unsigned long flags;
1048
	uint8_t slice = 0;
1049 1050 1051 1052 1053 1054 1055

	/* We must turn off DOP level clock gating to access the L3 registers.
	 * In order to prevent a get/put style interface, acquire struct mutex
	 * any time we access those registers.
	 */
	mutex_lock(&dev_priv->dev->struct_mutex);

1056 1057 1058 1059
	/* If we've screwed up tracking, just let the interrupt fire again */
	if (WARN_ON(!dev_priv->l3_parity.which_slice))
		goto out;

1060 1061 1062 1063
	misccpctl = I915_READ(GEN7_MISCCPCTL);
	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
	POSTING_READ(GEN7_MISCCPCTL);

1064 1065
	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
		u32 reg;
1066

1067 1068 1069
		slice--;
		if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
			break;
1070

1071
		dev_priv->l3_parity.which_slice &= ~(1<<slice);
1072

1073
		reg = GEN7_L3CDERRST1 + (slice * 0x200);
1074

1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089
		error_status = I915_READ(reg);
		row = GEN7_PARITY_ERROR_ROW(error_status);
		bank = GEN7_PARITY_ERROR_BANK(error_status);
		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);

		I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
		POSTING_READ(reg);

		parity_event[0] = I915_L3_PARITY_UEVENT "=1";
		parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
		parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
		parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
		parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
		parity_event[5] = NULL;

1090
		kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
1091
				   KOBJ_CHANGE, parity_event);
1092

1093 1094
		DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
			  slice, row, bank, subbank);
1095

1096 1097 1098 1099 1100
		kfree(parity_event[4]);
		kfree(parity_event[3]);
		kfree(parity_event[2]);
		kfree(parity_event[1]);
	}
1101

1102
	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1103

1104 1105 1106 1107 1108 1109 1110
out:
	WARN_ON(dev_priv->l3_parity.which_slice);
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
	ilk_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);

	mutex_unlock(&dev_priv->dev->struct_mutex);
1111 1112
}

1113
static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
1114 1115 1116
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;

1117
	if (!HAS_L3_DPF(dev))
1118 1119
		return;

1120
	spin_lock(&dev_priv->irq_lock);
1121
	ilk_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
1122
	spin_unlock(&dev_priv->irq_lock);
1123

1124 1125 1126 1127 1128 1129 1130
	iir &= GT_PARITY_ERROR(dev);
	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
		dev_priv->l3_parity.which_slice |= 1 << 1;

	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
		dev_priv->l3_parity.which_slice |= 1 << 0;

1131
	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1132 1133
}

1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144
static void ilk_gt_irq_handler(struct drm_device *dev,
			       struct drm_i915_private *dev_priv,
			       u32 gt_iir)
{
	if (gt_iir &
	    (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
		notify_ring(dev, &dev_priv->ring[RCS]);
	if (gt_iir & ILK_BSD_USER_INTERRUPT)
		notify_ring(dev, &dev_priv->ring[VCS]);
}

1145 1146 1147 1148 1149
static void snb_gt_irq_handler(struct drm_device *dev,
			       struct drm_i915_private *dev_priv,
			       u32 gt_iir)
{

1150 1151
	if (gt_iir &
	    (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1152
		notify_ring(dev, &dev_priv->ring[RCS]);
1153
	if (gt_iir & GT_BSD_USER_INTERRUPT)
1154
		notify_ring(dev, &dev_priv->ring[VCS]);
1155
	if (gt_iir & GT_BLT_USER_INTERRUPT)
1156 1157
		notify_ring(dev, &dev_priv->ring[BCS]);

1158 1159 1160
	if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
		      GT_BSD_CS_ERROR_INTERRUPT |
		      GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
1161 1162 1163
		DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
		i915_handle_error(dev, false);
	}
1164

1165 1166
	if (gt_iir & GT_PARITY_ERROR(dev))
		ivybridge_parity_error_irq_handler(dev, gt_iir);
1167 1168
}

1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218
static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
				       struct drm_i915_private *dev_priv,
				       u32 master_ctl)
{
	u32 rcs, bcs, vcs;
	uint32_t tmp = 0;
	irqreturn_t ret = IRQ_NONE;

	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
		tmp = I915_READ(GEN8_GT_IIR(0));
		if (tmp) {
			ret = IRQ_HANDLED;
			rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
			bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
			if (rcs & GT_RENDER_USER_INTERRUPT)
				notify_ring(dev, &dev_priv->ring[RCS]);
			if (bcs & GT_RENDER_USER_INTERRUPT)
				notify_ring(dev, &dev_priv->ring[BCS]);
			I915_WRITE(GEN8_GT_IIR(0), tmp);
		} else
			DRM_ERROR("The master control interrupt lied (GT0)!\n");
	}

	if (master_ctl & GEN8_GT_VCS1_IRQ) {
		tmp = I915_READ(GEN8_GT_IIR(1));
		if (tmp) {
			ret = IRQ_HANDLED;
			vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
			if (vcs & GT_RENDER_USER_INTERRUPT)
				notify_ring(dev, &dev_priv->ring[VCS]);
			I915_WRITE(GEN8_GT_IIR(1), tmp);
		} else
			DRM_ERROR("The master control interrupt lied (GT1)!\n");
	}

	if (master_ctl & GEN8_GT_VECS_IRQ) {
		tmp = I915_READ(GEN8_GT_IIR(3));
		if (tmp) {
			ret = IRQ_HANDLED;
			vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
			if (vcs & GT_RENDER_USER_INTERRUPT)
				notify_ring(dev, &dev_priv->ring[VECS]);
			I915_WRITE(GEN8_GT_IIR(3), tmp);
		} else
			DRM_ERROR("The master control interrupt lied (GT3)!\n");
	}

	return ret;
}

1219 1220 1221
#define HPD_STORM_DETECT_PERIOD 1000
#define HPD_STORM_THRESHOLD 5

1222
static inline void intel_hpd_irq_handler(struct drm_device *dev,
1223 1224
					 u32 hotplug_trigger,
					 const u32 *hpd)
1225 1226 1227
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	int i;
1228
	bool storm_detected = false;
1229

1230 1231 1232
	if (!hotplug_trigger)
		return;

1233
	spin_lock(&dev_priv->irq_lock);
1234
	for (i = 1; i < HPD_NUM_PINS; i++) {
1235

1236 1237 1238 1239
		WARN(((hpd[i] & hotplug_trigger) &&
		      dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED),
		     "Received HPD interrupt although disabled\n");

1240 1241 1242 1243
		if (!(hpd[i] & hotplug_trigger) ||
		    dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
			continue;

1244
		dev_priv->hpd_event_bits |= (1 << i);
1245 1246 1247 1248 1249
		if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
				   dev_priv->hpd_stats[i].hpd_last_jiffies
				   + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
			dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
			dev_priv->hpd_stats[i].hpd_cnt = 0;
1250
			DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
1251 1252
		} else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
			dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
1253
			dev_priv->hpd_event_bits &= ~(1 << i);
1254
			DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
1255
			storm_detected = true;
1256 1257
		} else {
			dev_priv->hpd_stats[i].hpd_cnt++;
1258 1259
			DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i,
				      dev_priv->hpd_stats[i].hpd_cnt);
1260 1261 1262
		}
	}

1263 1264
	if (storm_detected)
		dev_priv->display.hpd_irq_setup(dev);
1265
	spin_unlock(&dev_priv->irq_lock);
1266

1267 1268 1269 1270 1271 1272 1273
	/*
	 * Our hotplug handler can grab modeset locks (by calling down into the
	 * fb helpers). Hence it must not be run on our own dev-priv->wq work
	 * queue for otherwise the flush_work in the pageflip code will
	 * deadlock.
	 */
	schedule_work(&dev_priv->hotplug_work);
1274 1275
}

1276 1277
static void gmbus_irq_handler(struct drm_device *dev)
{
1278 1279 1280
	struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;

	wake_up_all(&dev_priv->gmbus_wait_queue);
1281 1282
}

1283 1284
static void dp_aux_irq_handler(struct drm_device *dev)
{
1285 1286 1287
	struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;

	wake_up_all(&dev_priv->gmbus_wait_queue);
1288 1289
}

1290
#if defined(CONFIG_DEBUG_FS)
1291 1292 1293 1294
static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
					 uint32_t crc0, uint32_t crc1,
					 uint32_t crc2, uint32_t crc3,
					 uint32_t crc4)
1295 1296 1297 1298
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
	struct intel_pipe_crc_entry *entry;
1299
	int head, tail;
1300

1301 1302
	spin_lock(&pipe_crc->lock);

1303
	if (!pipe_crc->entries) {
1304
		spin_unlock(&pipe_crc->lock);
1305 1306 1307 1308
		DRM_ERROR("spurious interrupt\n");
		return;
	}

1309 1310
	head = pipe_crc->head;
	tail = pipe_crc->tail;
1311 1312

	if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1313
		spin_unlock(&pipe_crc->lock);
1314 1315 1316 1317 1318
		DRM_ERROR("CRC buffer overflowing\n");
		return;
	}

	entry = &pipe_crc->entries[head];
1319

1320
	entry->frame = dev->driver->get_vblank_counter(dev, pipe);
1321 1322 1323 1324 1325
	entry->crc[0] = crc0;
	entry->crc[1] = crc1;
	entry->crc[2] = crc2;
	entry->crc[3] = crc3;
	entry->crc[4] = crc4;
1326 1327

	head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1328 1329 1330
	pipe_crc->head = head;

	spin_unlock(&pipe_crc->lock);
1331 1332

	wake_up_interruptible(&pipe_crc->wq);
1333
}
1334 1335 1336 1337 1338 1339 1340 1341
#else
static inline void
display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
			     uint32_t crc0, uint32_t crc1,
			     uint32_t crc2, uint32_t crc3,
			     uint32_t crc4) {}
#endif

1342

1343
static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
D
Daniel Vetter 已提交
1344 1345 1346
{
	struct drm_i915_private *dev_priv = dev->dev_private;

1347 1348 1349
	display_pipe_crc_irq_handler(dev, pipe,
				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
				     0, 0, 0, 0);
D
Daniel Vetter 已提交
1350 1351
}

1352
static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1353 1354 1355
{
	struct drm_i915_private *dev_priv = dev->dev_private;

1356 1357 1358 1359 1360 1361
	display_pipe_crc_irq_handler(dev, pipe,
				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
				     I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
				     I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
				     I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
				     I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1362
}
1363

1364
static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1365 1366
{
	struct drm_i915_private *dev_priv = dev->dev_private;
1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377
	uint32_t res1, res2;

	if (INTEL_INFO(dev)->gen >= 3)
		res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
	else
		res1 = 0;

	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
		res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
	else
		res2 = 0;
1378

1379 1380 1381 1382 1383
	display_pipe_crc_irq_handler(dev, pipe,
				     I915_READ(PIPE_CRC_RES_RED(pipe)),
				     I915_READ(PIPE_CRC_RES_GREEN(pipe)),
				     I915_READ(PIPE_CRC_RES_BLUE(pipe)),
				     res1, res2);
1384
}
1385

1386 1387 1388 1389
/* The RPS events need forcewake, so we add them to a work queue and mask their
 * IMR bits until the work is done. Other interrupts can be processed without
 * the work queue. */
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1390
{
1391
	if (pm_iir & GEN6_PM_RPS_EVENTS) {
1392
		spin_lock(&dev_priv->irq_lock);
1393
		dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
1394
		snb_disable_pm_irq(dev_priv, pm_iir & GEN6_PM_RPS_EVENTS);
1395
		spin_unlock(&dev_priv->irq_lock);
1396 1397

		queue_work(dev_priv->wq, &dev_priv->rps.work);
1398 1399
	}

1400 1401 1402
	if (HAS_VEBOX(dev_priv->dev)) {
		if (pm_iir & PM_VEBOX_USER_INTERRUPT)
			notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
B
Ben Widawsky 已提交
1403

1404 1405 1406 1407
		if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
			DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir);
			i915_handle_error(dev_priv->dev, false);
		}
B
Ben Widawsky 已提交
1408
	}
1409 1410
}

1411
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
J
Jesse Barnes 已提交
1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432
{
	struct drm_device *dev = (struct drm_device *) arg;
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
	u32 iir, gt_iir, pm_iir;
	irqreturn_t ret = IRQ_NONE;
	unsigned long irqflags;
	int pipe;
	u32 pipe_stats[I915_MAX_PIPES];

	atomic_inc(&dev_priv->irq_received);

	while (true) {
		iir = I915_READ(VLV_IIR);
		gt_iir = I915_READ(GTIIR);
		pm_iir = I915_READ(GEN6_PMIIR);

		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
			goto out;

		ret = IRQ_HANDLED;

1433
		snb_gt_irq_handler(dev, dev_priv, gt_iir);
J
Jesse Barnes 已提交
1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451

		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
		for_each_pipe(pipe) {
			int reg = PIPESTAT(pipe);
			pipe_stats[pipe] = I915_READ(reg);

			/*
			 * Clear the PIPE*STAT regs before the IIR
			 */
			if (pipe_stats[pipe] & 0x8000ffff) {
				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
					DRM_DEBUG_DRIVER("pipe %c underrun\n",
							 pipe_name(pipe));
				I915_WRITE(reg, pipe_stats[pipe]);
			}
		}
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);

1452
		for_each_pipe(pipe) {
1453
			if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1454 1455 1456 1457 1458 1459
				drm_handle_vblank(dev, pipe);

			if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
				intel_prepare_page_flip(dev, pipe);
				intel_finish_page_flip(dev, pipe);
			}
1460 1461

			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1462
				i9xx_pipe_crc_irq_handler(dev, pipe);
1463 1464
		}

J
Jesse Barnes 已提交
1465 1466 1467
		/* Consume port.  Then clear IIR or we'll miss events */
		if (iir & I915_DISPLAY_PORT_INTERRUPT) {
			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1468
			u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
J
Jesse Barnes 已提交
1469 1470 1471

			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
					 hotplug_status);
1472 1473 1474

			intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);

1475 1476 1477
			if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
				dp_aux_irq_handler(dev);

J
Jesse Barnes 已提交
1478 1479 1480 1481
			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
			I915_READ(PORT_HOTPLUG_STAT);
		}

1482 1483
		if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
			gmbus_irq_handler(dev);
J
Jesse Barnes 已提交
1484

1485
		if (pm_iir)
1486
			gen6_rps_irq_handler(dev_priv, pm_iir);
J
Jesse Barnes 已提交
1487 1488 1489 1490 1491 1492 1493 1494 1495 1496

		I915_WRITE(GTIIR, gt_iir);
		I915_WRITE(GEN6_PMIIR, pm_iir);
		I915_WRITE(VLV_IIR, iir);
	}

out:
	return ret;
}

1497
static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1498 1499
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1500
	int pipe;
1501
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1502

1503 1504
	intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);

1505 1506 1507
	if (pch_iir & SDE_AUDIO_POWER_MASK) {
		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
			       SDE_AUDIO_POWER_SHIFT);
1508
		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1509 1510
				 port_name(port));
	}
1511

1512 1513 1514
	if (pch_iir & SDE_AUX_MASK)
		dp_aux_irq_handler(dev);

1515
	if (pch_iir & SDE_GMBUS)
1516
		gmbus_irq_handler(dev);
1517 1518 1519 1520 1521 1522 1523 1524 1525 1526

	if (pch_iir & SDE_AUDIO_HDCP_MASK)
		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");

	if (pch_iir & SDE_AUDIO_TRANS_MASK)
		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");

	if (pch_iir & SDE_POISON)
		DRM_ERROR("PCH poison interrupt\n");

1527 1528 1529 1530 1531
	if (pch_iir & SDE_FDI_MASK)
		for_each_pipe(pipe)
			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
					 pipe_name(pipe),
					 I915_READ(FDI_RX_IIR(pipe)));
1532 1533 1534 1535 1536 1537 1538 1539

	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");

	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");

	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553
		if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
							  false))
			DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");

	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
		if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
							  false))
			DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
}

static void ivb_err_int_handler(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	u32 err_int = I915_READ(GEN7_ERR_INT);
D
Daniel Vetter 已提交
1554
	enum pipe pipe;
1555

1556 1557 1558
	if (err_int & ERR_INT_POISON)
		DRM_ERROR("Poison interrupt\n");

D
Daniel Vetter 已提交
1559 1560 1561 1562 1563 1564 1565
	for_each_pipe(pipe) {
		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) {
			if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
								  false))
				DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
						 pipe_name(pipe));
		}
1566

D
Daniel Vetter 已提交
1567 1568
		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
			if (IS_IVYBRIDGE(dev))
1569
				ivb_pipe_crc_irq_handler(dev, pipe);
D
Daniel Vetter 已提交
1570
			else
1571
				hsw_pipe_crc_irq_handler(dev, pipe);
D
Daniel Vetter 已提交
1572 1573
		}
	}
1574

1575 1576 1577 1578 1579 1580 1581 1582
	I915_WRITE(GEN7_ERR_INT, err_int);
}

static void cpt_serr_int_handler(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	u32 serr_int = I915_READ(SERR_INT);

1583 1584 1585
	if (serr_int & SERR_INT_POISON)
		DRM_ERROR("PCH poison interrupt\n");

1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601
	if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
		if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
							  false))
			DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");

	if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
		if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
							  false))
			DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");

	if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
		if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
							  false))
			DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n");

	I915_WRITE(SERR_INT, serr_int);
1602 1603
}

1604 1605 1606 1607
static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
	int pipe;
1608
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1609

1610 1611
	intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);

1612 1613 1614 1615 1616 1617
	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
			       SDE_AUDIO_POWER_SHIFT_CPT);
		DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
				 port_name(port));
	}
1618 1619

	if (pch_iir & SDE_AUX_MASK_CPT)
1620
		dp_aux_irq_handler(dev);
1621 1622

	if (pch_iir & SDE_GMBUS_CPT)
1623
		gmbus_irq_handler(dev);
1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635

	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
		DRM_DEBUG_DRIVER("Audio CP request interrupt\n");

	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
		DRM_DEBUG_DRIVER("Audio CP change interrupt\n");

	if (pch_iir & SDE_FDI_MASK_CPT)
		for_each_pipe(pipe)
			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
					 pipe_name(pipe),
					 I915_READ(FDI_RX_IIR(pipe)));
1636 1637 1638

	if (pch_iir & SDE_ERROR_CPT)
		cpt_serr_int_handler(dev);
1639 1640
}

1641 1642 1643
static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
1644
	enum pipe pipe;
1645 1646 1647 1648 1649 1650 1651 1652 1653 1654

	if (de_iir & DE_AUX_CHANNEL_A)
		dp_aux_irq_handler(dev);

	if (de_iir & DE_GSE)
		intel_opregion_asle_intr(dev);

	if (de_iir & DE_POISON)
		DRM_ERROR("Poison interrupt\n");

1655 1656 1657
	for_each_pipe(pipe) {
		if (de_iir & DE_PIPE_VBLANK(pipe))
			drm_handle_vblank(dev, pipe);
1658

1659 1660 1661 1662
		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
			if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
				DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
						 pipe_name(pipe));
1663

1664 1665
		if (de_iir & DE_PIPE_CRC_DONE(pipe))
			i9xx_pipe_crc_irq_handler(dev, pipe);
1666

1667 1668 1669 1670 1671
		/* plane/pipes map 1:1 on ilk+ */
		if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
			intel_prepare_page_flip(dev, pipe);
			intel_finish_page_flip_plane(dev, pipe);
		}
1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690
	}

	/* check event from PCH */
	if (de_iir & DE_PCH_EVENT) {
		u32 pch_iir = I915_READ(SDEIIR);

		if (HAS_PCH_CPT(dev))
			cpt_irq_handler(dev, pch_iir);
		else
			ibx_irq_handler(dev, pch_iir);

		/* should clear PCH hotplug event before clear CPU irq */
		I915_WRITE(SDEIIR, pch_iir);
	}

	if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
		ironlake_rps_change_irq_handler(dev);
}

1691 1692 1693
static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
1694
	enum pipe i;
1695 1696 1697 1698 1699 1700 1701 1702 1703 1704

	if (de_iir & DE_ERR_INT_IVB)
		ivb_err_int_handler(dev);

	if (de_iir & DE_AUX_CHANNEL_A_IVB)
		dp_aux_irq_handler(dev);

	if (de_iir & DE_GSE_IVB)
		intel_opregion_asle_intr(dev);

1705
	for_each_pipe(i) {
1706
		if (de_iir & (DE_PIPE_VBLANK_IVB(i)))
1707
			drm_handle_vblank(dev, i);
1708 1709 1710

		/* plane/pipes map 1:1 on ilk+ */
		if (de_iir & DE_PLANE_FLIP_DONE_IVB(i)) {
1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726
			intel_prepare_page_flip(dev, i);
			intel_finish_page_flip_plane(dev, i);
		}
	}

	/* check event from PCH */
	if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
		u32 pch_iir = I915_READ(SDEIIR);

		cpt_irq_handler(dev, pch_iir);

		/* clear PCH hotplug event before clear CPU irq */
		I915_WRITE(SDEIIR, pch_iir);
	}
}

1727
static irqreturn_t ironlake_irq_handler(int irq, void *arg)
1728 1729 1730
{
	struct drm_device *dev = (struct drm_device *) arg;
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1731
	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
1732
	irqreturn_t ret = IRQ_NONE;
1733 1734 1735

	atomic_inc(&dev_priv->irq_received);

1736 1737
	/* We get interrupts on unclaimed registers, so check for this before we
	 * do any I915_{READ,WRITE}. */
1738
	intel_uncore_check_errors(dev);
1739

1740 1741 1742
	/* disable master interrupt before clearing iir  */
	de_ier = I915_READ(DEIER);
	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
1743
	POSTING_READ(DEIER);
1744

1745 1746 1747 1748 1749
	/* Disable south interrupts. We'll only write to SDEIIR once, so further
	 * interrupts will will be stored on its back queue, and then we'll be
	 * able to process them after we restore SDEIER (as soon as we restore
	 * it, we'll get an interrupt if SDEIIR still has something to process
	 * due to its back queue). */
1750 1751 1752 1753 1754
	if (!HAS_PCH_NOP(dev)) {
		sde_ier = I915_READ(SDEIER);
		I915_WRITE(SDEIER, 0);
		POSTING_READ(SDEIER);
	}
1755

1756
	gt_iir = I915_READ(GTIIR);
1757
	if (gt_iir) {
1758
		if (INTEL_INFO(dev)->gen >= 6)
1759
			snb_gt_irq_handler(dev, dev_priv, gt_iir);
1760 1761
		else
			ilk_gt_irq_handler(dev, dev_priv, gt_iir);
1762 1763
		I915_WRITE(GTIIR, gt_iir);
		ret = IRQ_HANDLED;
1764 1765
	}

1766 1767
	de_iir = I915_READ(DEIIR);
	if (de_iir) {
1768 1769 1770 1771
		if (INTEL_INFO(dev)->gen >= 7)
			ivb_display_irq_handler(dev, de_iir);
		else
			ilk_display_irq_handler(dev, de_iir);
1772 1773
		I915_WRITE(DEIIR, de_iir);
		ret = IRQ_HANDLED;
1774 1775
	}

1776 1777 1778
	if (INTEL_INFO(dev)->gen >= 6) {
		u32 pm_iir = I915_READ(GEN6_PMIIR);
		if (pm_iir) {
1779
			gen6_rps_irq_handler(dev_priv, pm_iir);
1780 1781 1782
			I915_WRITE(GEN6_PMIIR, pm_iir);
			ret = IRQ_HANDLED;
		}
1783
	}
1784 1785 1786

	I915_WRITE(DEIER, de_ier);
	POSTING_READ(DEIER);
1787 1788 1789 1790
	if (!HAS_PCH_NOP(dev)) {
		I915_WRITE(SDEIER, sde_ier);
		POSTING_READ(SDEIER);
	}
1791 1792 1793 1794

	return ret;
}

1795 1796 1797 1798 1799 1800 1801
static irqreturn_t gen8_irq_handler(int irq, void *arg)
{
	struct drm_device *dev = arg;
	struct drm_i915_private *dev_priv = dev->dev_private;
	u32 master_ctl;
	irqreturn_t ret = IRQ_NONE;
	uint32_t tmp = 0;
1802
	enum pipe pipe;
1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830

	atomic_inc(&dev_priv->irq_received);

	master_ctl = I915_READ(GEN8_MASTER_IRQ);
	master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
	if (!master_ctl)
		return IRQ_NONE;

	I915_WRITE(GEN8_MASTER_IRQ, 0);
	POSTING_READ(GEN8_MASTER_IRQ);

	ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl);

	if (master_ctl & GEN8_DE_MISC_IRQ) {
		tmp = I915_READ(GEN8_DE_MISC_IIR);
		if (tmp & GEN8_DE_MISC_GSE)
			intel_opregion_asle_intr(dev);
		else if (tmp)
			DRM_ERROR("Unexpected DE Misc interrupt\n");
		else
			DRM_ERROR("The master control interrupt lied (DE MISC)!\n");

		if (tmp) {
			I915_WRITE(GEN8_DE_MISC_IIR, tmp);
			ret = IRQ_HANDLED;
		}
	}

1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845
	if (master_ctl & GEN8_DE_PORT_IRQ) {
		tmp = I915_READ(GEN8_DE_PORT_IIR);
		if (tmp & GEN8_AUX_CHANNEL_A)
			dp_aux_irq_handler(dev);
		else if (tmp)
			DRM_ERROR("Unexpected DE Port interrupt\n");
		else
			DRM_ERROR("The master control interrupt lied (DE PORT)!\n");

		if (tmp) {
			I915_WRITE(GEN8_DE_PORT_IIR, tmp);
			ret = IRQ_HANDLED;
		}
	}

1846 1847
	for_each_pipe(pipe) {
		uint32_t pipe_iir;
1848

1849 1850
		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
			continue;
1851

1852 1853 1854
		pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
		if (pipe_iir & GEN8_PIPE_VBLANK)
			drm_handle_vblank(dev, pipe);
1855

1856 1857 1858
		if (pipe_iir & GEN8_PIPE_FLIP_DONE) {
			intel_prepare_page_flip(dev, pipe);
			intel_finish_page_flip_plane(dev, pipe);
1859
		}
1860

1861 1862 1863
		if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
			hsw_pipe_crc_irq_handler(dev, pipe);

1864 1865 1866 1867 1868 1869 1870
		if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) {
			if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
								  false))
				DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
						 pipe_name(pipe));
		}

1871 1872 1873 1874 1875
		if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) {
			DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
				  pipe_name(pipe),
				  pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
		}
1876 1877 1878 1879 1880

		if (pipe_iir) {
			ret = IRQ_HANDLED;
			I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
		} else
1881 1882 1883
			DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
	}

1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899
	if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) {
		/*
		 * FIXME(BDW): Assume for now that the new interrupt handling
		 * scheme also closed the SDE interrupt handling race we've seen
		 * on older pch-split platforms. But this needs testing.
		 */
		u32 pch_iir = I915_READ(SDEIIR);

		cpt_irq_handler(dev, pch_iir);

		if (pch_iir) {
			I915_WRITE(SDEIIR, pch_iir);
			ret = IRQ_HANDLED;
		}
	}

1900 1901 1902 1903 1904 1905
	I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
	POSTING_READ(GEN8_MASTER_IRQ);

	return ret;
}

1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933
static void i915_error_wake_up(struct drm_i915_private *dev_priv,
			       bool reset_completed)
{
	struct intel_ring_buffer *ring;
	int i;

	/*
	 * Notify all waiters for GPU completion events that reset state has
	 * been changed, and that they need to restart their wait after
	 * checking for potential errors (and bail out to drop locks if there is
	 * a gpu reset pending so that i915_error_work_func can acquire them).
	 */

	/* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
	for_each_ring(ring, dev_priv, i)
		wake_up_all(&ring->irq_queue);

	/* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
	wake_up_all(&dev_priv->pending_flip_queue);

	/*
	 * Signal tasks blocked in i915_gem_wait_for_error that the pending
	 * reset state is cleared.
	 */
	if (reset_completed)
		wake_up_all(&dev_priv->gpu_error.reset_queue);
}

1934 1935 1936 1937 1938 1939 1940 1941 1942
/**
 * i915_error_work_func - do process context error handling work
 * @work: work struct
 *
 * Fire an error uevent so userspace can see that a hang or error
 * was detected.
 */
static void i915_error_work_func(struct work_struct *work)
{
1943 1944 1945 1946
	struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
						    work);
	drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
						    gpu_error);
1947
	struct drm_device *dev = dev_priv->dev;
1948 1949 1950
	char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
	char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
	char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
1951
	int ret;
1952

1953
	kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
1954

1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965
	/*
	 * Note that there's only one work item which does gpu resets, so we
	 * need not worry about concurrent gpu resets potentially incrementing
	 * error->reset_counter twice. We only need to take care of another
	 * racing irq/hangcheck declaring the gpu dead for a second time. A
	 * quick check for that is good enough: schedule_work ensures the
	 * correct ordering between hang detection and this work item, and since
	 * the reset in-progress bit is only ever set by code outside of this
	 * work we don't need to worry about any other races.
	 */
	if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
1966
		DRM_DEBUG_DRIVER("resetting chip\n");
1967
		kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
1968
				   reset_event);
1969

1970 1971 1972 1973 1974 1975
		/*
		 * All state reset _must_ be completed before we update the
		 * reset counter, for otherwise waiters might miss the reset
		 * pending state and not properly drop locks, resulting in
		 * deadlocks with the reset work.
		 */
1976 1977
		ret = i915_reset(dev);

1978 1979
		intel_display_handle_reset(dev);

1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993
		if (ret == 0) {
			/*
			 * After all the gem state is reset, increment the reset
			 * counter and wake up everyone waiting for the reset to
			 * complete.
			 *
			 * Since unlock operations are a one-sided barrier only,
			 * we need to insert a barrier here to order any seqno
			 * updates before
			 * the counter increment.
			 */
			smp_mb__before_atomic_inc();
			atomic_inc(&dev_priv->gpu_error.reset_counter);

1994
			kobject_uevent_env(&dev->primary->kdev->kobj,
1995
					   KOBJ_CHANGE, reset_done_event);
1996
		} else {
M
Mika Kuoppala 已提交
1997
			atomic_set_mask(I915_WEDGED, &error->reset_counter);
1998
		}
1999

2000 2001 2002 2003 2004
		/*
		 * Note: The wake_up also serves as a memory barrier so that
		 * waiters see the update value of the reset counter atomic_t.
		 */
		i915_error_wake_up(dev_priv, true);
2005
	}
2006 2007
}

2008
static void i915_report_and_clear_eir(struct drm_device *dev)
2009 2010
{
	struct drm_i915_private *dev_priv = dev->dev_private;
2011
	uint32_t instdone[I915_NUM_INSTDONE_REG];
2012
	u32 eir = I915_READ(EIR);
2013
	int pipe, i;
2014

2015 2016
	if (!eir)
		return;
2017

2018
	pr_err("render error detected, EIR: 0x%08x\n", eir);
2019

2020 2021
	i915_get_extra_instdone(dev, instdone);

2022 2023 2024 2025
	if (IS_G4X(dev)) {
		if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
			u32 ipeir = I915_READ(IPEIR_I965);

2026 2027
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2028 2029
			for (i = 0; i < ARRAY_SIZE(instdone); i++)
				pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2030 2031
			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2032
			I915_WRITE(IPEIR_I965, ipeir);
2033
			POSTING_READ(IPEIR_I965);
2034 2035 2036
		}
		if (eir & GM45_ERROR_PAGE_TABLE) {
			u32 pgtbl_err = I915_READ(PGTBL_ER);
2037 2038
			pr_err("page table error\n");
			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
2039
			I915_WRITE(PGTBL_ER, pgtbl_err);
2040
			POSTING_READ(PGTBL_ER);
2041 2042 2043
		}
	}

2044
	if (!IS_GEN2(dev)) {
2045 2046
		if (eir & I915_ERROR_PAGE_TABLE) {
			u32 pgtbl_err = I915_READ(PGTBL_ER);
2047 2048
			pr_err("page table error\n");
			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
2049
			I915_WRITE(PGTBL_ER, pgtbl_err);
2050
			POSTING_READ(PGTBL_ER);
2051 2052 2053 2054
		}
	}

	if (eir & I915_ERROR_MEMORY_REFRESH) {
2055
		pr_err("memory refresh error:\n");
2056
		for_each_pipe(pipe)
2057
			pr_err("pipe %c stat: 0x%08x\n",
2058
			       pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2059 2060 2061
		/* pipestat has already been acked */
	}
	if (eir & I915_ERROR_INSTRUCTION) {
2062 2063
		pr_err("instruction error\n");
		pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
2064 2065
		for (i = 0; i < ARRAY_SIZE(instdone); i++)
			pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2066
		if (INTEL_INFO(dev)->gen < 4) {
2067 2068
			u32 ipeir = I915_READ(IPEIR);

2069 2070 2071
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
2072
			I915_WRITE(IPEIR, ipeir);
2073
			POSTING_READ(IPEIR);
2074 2075 2076
		} else {
			u32 ipeir = I915_READ(IPEIR_I965);

2077 2078 2079 2080
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2081
			I915_WRITE(IPEIR_I965, ipeir);
2082
			POSTING_READ(IPEIR_I965);
2083 2084 2085 2086
		}
	}

	I915_WRITE(EIR, eir);
2087
	POSTING_READ(EIR);
2088 2089 2090 2091 2092 2093 2094 2095 2096 2097
	eir = I915_READ(EIR);
	if (eir) {
		/*
		 * some errors might have become stuck,
		 * mask them.
		 */
		DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
		I915_WRITE(EMR, I915_READ(EMR) | eir);
		I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
	}
2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109
}

/**
 * i915_handle_error - handle an error interrupt
 * @dev: drm device
 *
 * Do some basic checking of regsiter state at error interrupt time and
 * dump it to the syslog.  Also call i915_capture_error_state() to make
 * sure we get a record and make it available in debugfs.  Fire a uevent
 * so userspace knows something bad happened (should trigger collection
 * of a ring dump etc.).
 */
2110
void i915_handle_error(struct drm_device *dev, bool wedged)
2111 2112 2113 2114 2115
{
	struct drm_i915_private *dev_priv = dev->dev_private;

	i915_capture_error_state(dev);
	i915_report_and_clear_eir(dev);
2116

2117
	if (wedged) {
2118 2119
		atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
				&dev_priv->gpu_error.reset_counter);
2120

2121
		/*
2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132
		 * Wakeup waiting processes so that the reset work function
		 * i915_error_work_func doesn't deadlock trying to grab various
		 * locks. By bumping the reset counter first, the woken
		 * processes will see a reset in progress and back off,
		 * releasing their locks and then wait for the reset completion.
		 * We must do this for _all_ gpu waiters that might hold locks
		 * that the reset work needs to acquire.
		 *
		 * Note: The wake_up serves as the required memory barrier to
		 * ensure that the waiters see the updated value of the reset
		 * counter atomic_t.
2133
		 */
2134
		i915_error_wake_up(dev_priv, false);
2135 2136
	}

2137 2138 2139 2140 2141 2142 2143
	/*
	 * Our reset work can grab modeset locks (since it needs to reset the
	 * state of outstanding pagelips). Hence it must not be run on our own
	 * dev-priv->wq work queue for otherwise the flush_work in the pageflip
	 * code will deadlock.
	 */
	schedule_work(&dev_priv->gpu_error.work);
2144 2145
}

2146
static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
2147 2148 2149 2150
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2151
	struct drm_i915_gem_object *obj;
2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162
	struct intel_unpin_work *work;
	unsigned long flags;
	bool stall_detected;

	/* Ignore early vblank irqs */
	if (intel_crtc == NULL)
		return;

	spin_lock_irqsave(&dev->event_lock, flags);
	work = intel_crtc->unpin_work;

2163 2164 2165
	if (work == NULL ||
	    atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
	    !work->enable_stall_check) {
2166 2167 2168 2169 2170 2171
		/* Either the pending flip IRQ arrived, or we're too early. Don't check */
		spin_unlock_irqrestore(&dev->event_lock, flags);
		return;
	}

	/* Potential stall - if we see that the flip has happened, assume a missed interrupt */
2172
	obj = work->pending_flip_obj;
2173
	if (INTEL_INFO(dev)->gen >= 4) {
2174
		int dspsurf = DSPSURF(intel_crtc->plane);
2175
		stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
2176
					i915_gem_obj_ggtt_offset(obj);
2177
	} else {
2178
		int dspaddr = DSPADDR(intel_crtc->plane);
2179
		stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) +
2180
							crtc->y * crtc->fb->pitches[0] +
2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191
							crtc->x * crtc->fb->bits_per_pixel/8);
	}

	spin_unlock_irqrestore(&dev->event_lock, flags);

	if (stall_detected) {
		DRM_DEBUG_DRIVER("Pageflip stall detected\n");
		intel_prepare_page_flip(dev, intel_crtc->plane);
	}
}

2192 2193 2194
/* Called from drm generic code, passed 'crtc' which
 * we use as a pipe index
 */
2195
static int i915_enable_vblank(struct drm_device *dev, int pipe)
2196 2197
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2198
	unsigned long irqflags;
2199

2200
	if (!i915_pipe_enabled(dev, pipe))
2201
		return -EINVAL;
2202

2203
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2204
	if (INTEL_INFO(dev)->gen >= 4)
2205 2206
		i915_enable_pipestat(dev_priv, pipe,
				     PIPE_START_VBLANK_INTERRUPT_ENABLE);
2207
	else
2208 2209
		i915_enable_pipestat(dev_priv, pipe,
				     PIPE_VBLANK_INTERRUPT_ENABLE);
2210 2211 2212

	/* maintain vblank delivery even in deep C-states */
	if (dev_priv->info->gen == 3)
2213
		I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
2214
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2215

2216 2217 2218
	return 0;
}

2219
static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
2220 2221 2222
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
	unsigned long irqflags;
2223
	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2224
						     DE_PIPE_VBLANK(pipe);
2225 2226 2227 2228 2229

	if (!i915_pipe_enabled(dev, pipe))
		return -EINVAL;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2230
	ironlake_enable_display_irq(dev_priv, bit);
2231 2232 2233 2234 2235
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);

	return 0;
}

J
Jesse Barnes 已提交
2236 2237 2238 2239
static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
	unsigned long irqflags;
2240
	u32 imr;
J
Jesse Barnes 已提交
2241 2242 2243 2244 2245 2246

	if (!i915_pipe_enabled(dev, pipe))
		return -EINVAL;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
	imr = I915_READ(VLV_IMR);
2247
	if (pipe == PIPE_A)
J
Jesse Barnes 已提交
2248
		imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
2249
	else
J
Jesse Barnes 已提交
2250 2251
		imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
	I915_WRITE(VLV_IMR, imr);
2252 2253
	i915_enable_pipestat(dev_priv, pipe,
			     PIPE_START_VBLANK_INTERRUPT_ENABLE);
J
Jesse Barnes 已提交
2254 2255 2256 2257 2258
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);

	return 0;
}

2259 2260 2261 2262 2263 2264 2265 2266 2267
static int gen8_enable_vblank(struct drm_device *dev, int pipe)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	unsigned long irqflags;

	if (!i915_pipe_enabled(dev, pipe))
		return -EINVAL;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2268 2269 2270
	dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
	I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
	POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2271 2272 2273 2274
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
	return 0;
}

2275 2276 2277
/* Called from drm generic code, passed 'crtc' which
 * we use as a pipe index
 */
2278
static void i915_disable_vblank(struct drm_device *dev, int pipe)
2279 2280
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2281
	unsigned long irqflags;
2282

2283
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2284
	if (dev_priv->info->gen == 3)
2285
		I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
2286

2287 2288 2289 2290 2291 2292
	i915_disable_pipestat(dev_priv, pipe,
			      PIPE_VBLANK_INTERRUPT_ENABLE |
			      PIPE_START_VBLANK_INTERRUPT_ENABLE);
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

2293
static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
2294 2295 2296
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
	unsigned long irqflags;
2297
	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2298
						     DE_PIPE_VBLANK(pipe);
2299 2300

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2301
	ironlake_disable_display_irq(dev_priv, bit);
2302 2303 2304
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

J
Jesse Barnes 已提交
2305 2306 2307 2308
static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
	unsigned long irqflags;
2309
	u32 imr;
J
Jesse Barnes 已提交
2310 2311

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2312 2313
	i915_disable_pipestat(dev_priv, pipe,
			      PIPE_START_VBLANK_INTERRUPT_ENABLE);
J
Jesse Barnes 已提交
2314
	imr = I915_READ(VLV_IMR);
2315
	if (pipe == PIPE_A)
J
Jesse Barnes 已提交
2316
		imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
2317
	else
J
Jesse Barnes 已提交
2318 2319 2320 2321 2322
		imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
	I915_WRITE(VLV_IMR, imr);
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

2323 2324 2325 2326 2327 2328 2329 2330 2331
static void gen8_disable_vblank(struct drm_device *dev, int pipe)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	unsigned long irqflags;

	if (!i915_pipe_enabled(dev, pipe))
		return;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2332 2333 2334
	dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
	I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
	POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2335 2336 2337
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

2338 2339
static u32
ring_last_seqno(struct intel_ring_buffer *ring)
2340
{
2341 2342 2343 2344
	return list_entry(ring->request_list.prev,
			  struct drm_i915_gem_request, list)->seqno;
}

2345 2346 2347 2348 2349
static bool
ring_idle(struct intel_ring_buffer *ring, u32 seqno)
{
	return (list_empty(&ring->request_list) ||
		i915_seqno_passed(seqno, ring_last_seqno(ring)));
B
Ben Gamari 已提交
2350 2351
}

2352 2353
static struct intel_ring_buffer *
semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno)
2354 2355
{
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
2356
	u32 cmd, ipehr, acthd, acthd_min;
2357 2358 2359 2360

	ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
	if ((ipehr & ~(0x3 << 16)) !=
	    (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER))
2361
		return NULL;
2362 2363 2364 2365

	/* ACTHD is likely pointing to the dword after the actual command,
	 * so scan backwards until we find the MBOX.
	 */
2366
	acthd = intel_ring_get_active_head(ring) & HEAD_ADDR;
2367 2368 2369 2370 2371 2372 2373 2374
	acthd_min = max((int)acthd - 3 * 4, 0);
	do {
		cmd = ioread32(ring->virtual_start + acthd);
		if (cmd == ipehr)
			break;

		acthd -= 4;
		if (acthd < acthd_min)
2375
			return NULL;
2376 2377
	} while (1);

2378 2379
	*seqno = ioread32(ring->virtual_start+acthd+4)+1;
	return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3];
2380 2381
}

2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410
static int semaphore_passed(struct intel_ring_buffer *ring)
{
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
	struct intel_ring_buffer *signaller;
	u32 seqno, ctl;

	ring->hangcheck.deadlock = true;

	signaller = semaphore_waits_for(ring, &seqno);
	if (signaller == NULL || signaller->hangcheck.deadlock)
		return -1;

	/* cursory check for an unkickable deadlock */
	ctl = I915_READ_CTL(signaller);
	if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0)
		return -1;

	return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno);
}

static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
{
	struct intel_ring_buffer *ring;
	int i;

	for_each_ring(ring, dev_priv, i)
		ring->hangcheck.deadlock = false;
}

2411 2412
static enum intel_ring_hangcheck_action
ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
2413 2414 2415
{
	struct drm_device *dev = ring->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
2416 2417
	u32 tmp;

2418
	if (ring->hangcheck.acthd != acthd)
2419
		return HANGCHECK_ACTIVE;
2420

2421
	if (IS_GEN2(dev))
2422
		return HANGCHECK_HUNG;
2423 2424 2425 2426 2427 2428 2429

	/* Is the chip hanging on a WAIT_FOR_EVENT?
	 * If so we can simply poke the RB_WAIT bit
	 * and break the hang. This should work on
	 * all but the second generation chipsets.
	 */
	tmp = I915_READ_CTL(ring);
2430 2431 2432
	if (tmp & RING_WAIT) {
		DRM_ERROR("Kicking stuck wait on %s\n",
			  ring->name);
2433
		i915_handle_error(dev, false);
2434
		I915_WRITE_CTL(ring, tmp);
2435
		return HANGCHECK_KICK;
2436 2437 2438 2439 2440
	}

	if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
		switch (semaphore_passed(ring)) {
		default:
2441
			return HANGCHECK_HUNG;
2442 2443 2444
		case 1:
			DRM_ERROR("Kicking stuck semaphore on %s\n",
				  ring->name);
2445
			i915_handle_error(dev, false);
2446
			I915_WRITE_CTL(ring, tmp);
2447
			return HANGCHECK_KICK;
2448
		case 0:
2449
			return HANGCHECK_WAIT;
2450
		}
2451
	}
2452

2453
	return HANGCHECK_HUNG;
2454 2455
}

B
Ben Gamari 已提交
2456 2457
/**
 * This is called when the chip hasn't reported back with completed
2458 2459 2460 2461 2462
 * batchbuffers in a long time. We keep track per ring seqno progress and
 * if there are no progress, hangcheck score for that ring is increased.
 * Further, acthd is inspected to see if the ring is stuck. On stuck case
 * we kick the ring. If we see no progress on three subsequent calls
 * we assume chip is wedged and try to fix it by resetting the chip.
B
Ben Gamari 已提交
2463
 */
2464
static void i915_hangcheck_elapsed(unsigned long data)
B
Ben Gamari 已提交
2465 2466 2467
{
	struct drm_device *dev = (struct drm_device *)data;
	drm_i915_private_t *dev_priv = dev->dev_private;
2468 2469
	struct intel_ring_buffer *ring;
	int i;
2470
	int busy_count = 0, rings_hung = 0;
2471 2472 2473 2474 2475
	bool stuck[I915_NUM_RINGS] = { 0 };
#define BUSY 1
#define KICK 5
#define HUNG 20
#define FIRE 30
2476

2477 2478 2479
	if (!i915_enable_hangcheck)
		return;

2480
	for_each_ring(ring, dev_priv, i) {
2481
		u32 seqno, acthd;
2482
		bool busy = true;
2483

2484 2485
		semaphore_clear_deadlocks(dev_priv);

2486 2487
		seqno = ring->get_seqno(ring, false);
		acthd = intel_ring_get_active_head(ring);
2488

2489 2490
		if (ring->hangcheck.seqno == seqno) {
			if (ring_idle(ring, seqno)) {
2491 2492
				ring->hangcheck.action = HANGCHECK_IDLE;

2493 2494
				if (waitqueue_active(&ring->irq_queue)) {
					/* Issue a wake-up to catch stuck h/w. */
2495
					if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
2496 2497 2498 2499 2500 2501
						if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
							DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
								  ring->name);
						else
							DRM_INFO("Fake missed irq on %s\n",
								 ring->name);
2502 2503 2504 2505
						wake_up_all(&ring->irq_queue);
					}
					/* Safeguard against driver failure */
					ring->hangcheck.score += BUSY;
2506 2507
				} else
					busy = false;
2508
			} else {
2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523
				/* We always increment the hangcheck score
				 * if the ring is busy and still processing
				 * the same request, so that no single request
				 * can run indefinitely (such as a chain of
				 * batches). The only time we do not increment
				 * the hangcheck score on this ring, if this
				 * ring is in a legitimate wait for another
				 * ring. In that case the waiting ring is a
				 * victim and we want to be sure we catch the
				 * right culprit. Then every time we do kick
				 * the ring, add a small increment to the
				 * score so that we can catch a batch that is
				 * being repeatedly kicked and so responsible
				 * for stalling the machine.
				 */
2524 2525 2526 2527
				ring->hangcheck.action = ring_stuck(ring,
								    acthd);

				switch (ring->hangcheck.action) {
2528
				case HANGCHECK_IDLE:
2529
				case HANGCHECK_WAIT:
2530
					break;
2531
				case HANGCHECK_ACTIVE:
2532
					ring->hangcheck.score += BUSY;
2533
					break;
2534
				case HANGCHECK_KICK:
2535
					ring->hangcheck.score += KICK;
2536
					break;
2537
				case HANGCHECK_HUNG:
2538
					ring->hangcheck.score += HUNG;
2539 2540 2541
					stuck[i] = true;
					break;
				}
2542
			}
2543
		} else {
2544 2545
			ring->hangcheck.action = HANGCHECK_ACTIVE;

2546 2547 2548 2549 2550
			/* Gradually reduce the count so that we catch DoS
			 * attempts across multiple batches.
			 */
			if (ring->hangcheck.score > 0)
				ring->hangcheck.score--;
2551 2552
		}

2553 2554
		ring->hangcheck.seqno = seqno;
		ring->hangcheck.acthd = acthd;
2555
		busy_count += busy;
2556
	}
2557

2558
	for_each_ring(ring, dev_priv, i) {
2559
		if (ring->hangcheck.score > FIRE) {
2560 2561 2562
			DRM_INFO("%s on %s\n",
				 stuck[i] ? "stuck" : "no progress",
				 ring->name);
2563
			rings_hung++;
2564 2565 2566
		}
	}

2567 2568
	if (rings_hung)
		return i915_handle_error(dev, true);
B
Ben Gamari 已提交
2569

2570 2571 2572
	if (busy_count)
		/* Reset timer case chip hangs without another request
		 * being added */
2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583
		i915_queue_hangcheck(dev);
}

void i915_queue_hangcheck(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	if (!i915_enable_hangcheck)
		return;

	mod_timer(&dev_priv->gpu_error.hangcheck_timer,
		  round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
B
Ben Gamari 已提交
2584 2585
}

P
Paulo Zanoni 已提交
2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604
static void ibx_irq_preinstall(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;

	if (HAS_PCH_NOP(dev))
		return;

	/* south display irq */
	I915_WRITE(SDEIMR, 0xffffffff);
	/*
	 * SDEIER is also touched by the interrupt handler to work around missed
	 * PCH interrupts. Hence we can't update it after the interrupt handler
	 * is enabled - instead we unconditionally enable all PCH interrupt
	 * sources here, but then only unmask them as needed with SDEIMR.
	 */
	I915_WRITE(SDEIER, 0xffffffff);
	POSTING_READ(SDEIER);
}

2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621
static void gen5_gt_irq_preinstall(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;

	/* and GT */
	I915_WRITE(GTIMR, 0xffffffff);
	I915_WRITE(GTIER, 0x0);
	POSTING_READ(GTIER);

	if (INTEL_INFO(dev)->gen >= 6) {
		/* and PM */
		I915_WRITE(GEN6_PMIMR, 0xffffffff);
		I915_WRITE(GEN6_PMIER, 0x0);
		POSTING_READ(GEN6_PMIER);
	}
}

L
Linus Torvalds 已提交
2622 2623
/* drm_dma.h hooks
*/
2624
static void ironlake_irq_preinstall(struct drm_device *dev)
2625 2626 2627
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;

2628 2629
	atomic_set(&dev_priv->irq_received, 0);

2630
	I915_WRITE(HWSTAM, 0xeffe);
2631

2632 2633
	I915_WRITE(DEIMR, 0xffffffff);
	I915_WRITE(DEIER, 0x0);
2634
	POSTING_READ(DEIER);
2635

2636
	gen5_gt_irq_preinstall(dev);
2637

P
Paulo Zanoni 已提交
2638
	ibx_irq_preinstall(dev);
2639 2640
}

J
Jesse Barnes 已提交
2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656
static void valleyview_irq_preinstall(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
	int pipe;

	atomic_set(&dev_priv->irq_received, 0);

	/* VLV magic */
	I915_WRITE(VLV_IMR, 0);
	I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
	I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
	I915_WRITE(RING_IMR(BLT_RING_BASE), 0);

	/* and GT */
	I915_WRITE(GTIIR, I915_READ(GTIIR));
	I915_WRITE(GTIIR, I915_READ(GTIIR));
2657 2658

	gen5_gt_irq_preinstall(dev);
J
Jesse Barnes 已提交
2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671

	I915_WRITE(DPINVGTT, 0xff);

	I915_WRITE(PORT_HOTPLUG_EN, 0);
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
	for_each_pipe(pipe)
		I915_WRITE(PIPESTAT(pipe), 0xffff);
	I915_WRITE(VLV_IIR, 0xffffffff);
	I915_WRITE(VLV_IMR, 0xffffffff);
	I915_WRITE(VLV_IER, 0x0);
	POSTING_READ(VLV_IER);
}

2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718
static void gen8_irq_preinstall(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int pipe;

	atomic_set(&dev_priv->irq_received, 0);

	I915_WRITE(GEN8_MASTER_IRQ, 0);
	POSTING_READ(GEN8_MASTER_IRQ);

	/* IIR can theoretically queue up two events. Be paranoid */
#define GEN8_IRQ_INIT_NDX(type, which) do { \
		I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
		POSTING_READ(GEN8_##type##_IMR(which)); \
		I915_WRITE(GEN8_##type##_IER(which), 0); \
		I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
		POSTING_READ(GEN8_##type##_IIR(which)); \
		I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
	} while (0)

#define GEN8_IRQ_INIT(type) do { \
		I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
		POSTING_READ(GEN8_##type##_IMR); \
		I915_WRITE(GEN8_##type##_IER, 0); \
		I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
		POSTING_READ(GEN8_##type##_IIR); \
		I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
	} while (0)

	GEN8_IRQ_INIT_NDX(GT, 0);
	GEN8_IRQ_INIT_NDX(GT, 1);
	GEN8_IRQ_INIT_NDX(GT, 2);
	GEN8_IRQ_INIT_NDX(GT, 3);

	for_each_pipe(pipe) {
		GEN8_IRQ_INIT_NDX(DE_PIPE, pipe);
	}

	GEN8_IRQ_INIT(DE_PORT);
	GEN8_IRQ_INIT(DE_MISC);
	GEN8_IRQ_INIT(PCU);
#undef GEN8_IRQ_INIT
#undef GEN8_IRQ_INIT_NDX

	POSTING_READ(GEN8_PCU_IIR);
}

2719
static void ibx_hpd_irq_setup(struct drm_device *dev)
2720 2721
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2722 2723
	struct drm_mode_config *mode_config = &dev->mode_config;
	struct intel_encoder *intel_encoder;
2724
	u32 hotplug_irqs, hotplug, enabled_irqs = 0;
2725 2726

	if (HAS_PCH_IBX(dev)) {
2727
		hotplug_irqs = SDE_HOTPLUG_MASK;
2728
		list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
2729
			if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
2730
				enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
2731
	} else {
2732
		hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
2733
		list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
2734
			if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
2735
				enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
2736
	}
2737

2738
	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
2739 2740 2741 2742 2743 2744 2745

	/*
	 * Enable digital hotplug on the PCH, and configure the DP short pulse
	 * duration to 2ms (which is the minimum in the Display Port spec)
	 *
	 * This register is the same on all known PCH chips.
	 */
2746 2747 2748 2749 2750 2751 2752 2753
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
	hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
}

P
Paulo Zanoni 已提交
2754 2755 2756
static void ibx_irq_postinstall(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2757
	u32 mask;
2758

D
Daniel Vetter 已提交
2759 2760 2761
	if (HAS_PCH_NOP(dev))
		return;

2762 2763
	if (HAS_PCH_IBX(dev)) {
		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER |
2764
		       SDE_TRANSA_FIFO_UNDER | SDE_POISON;
2765 2766 2767 2768 2769
	} else {
		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT;

		I915_WRITE(SERR_INT, I915_READ(SERR_INT));
	}
2770

P
Paulo Zanoni 已提交
2771 2772 2773 2774
	I915_WRITE(SDEIIR, I915_READ(SDEIIR));
	I915_WRITE(SDEIMR, ~mask);
}

2775 2776 2777 2778 2779 2780 2781 2782
static void gen5_gt_irq_postinstall(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	u32 pm_irqs, gt_irqs;

	pm_irqs = gt_irqs = 0;

	dev_priv->gt_irq_mask = ~0;
2783
	if (HAS_L3_DPF(dev)) {
2784
		/* L3 parity interrupt is always unmasked. */
2785 2786
		dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
		gt_irqs |= GT_PARITY_ERROR(dev);
2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807
	}

	gt_irqs |= GT_RENDER_USER_INTERRUPT;
	if (IS_GEN5(dev)) {
		gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
			   ILK_BSD_USER_INTERRUPT;
	} else {
		gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
	}

	I915_WRITE(GTIIR, I915_READ(GTIIR));
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
	I915_WRITE(GTIER, gt_irqs);
	POSTING_READ(GTIER);

	if (INTEL_INFO(dev)->gen >= 6) {
		pm_irqs |= GEN6_PM_RPS_EVENTS;

		if (HAS_VEBOX(dev))
			pm_irqs |= PM_VEBOX_USER_INTERRUPT;

2808
		dev_priv->pm_irq_mask = 0xffffffff;
2809
		I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
2810
		I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
2811 2812 2813 2814 2815
		I915_WRITE(GEN6_PMIER, pm_irqs);
		POSTING_READ(GEN6_PMIER);
	}
}

2816
static int ironlake_irq_postinstall(struct drm_device *dev)
2817
{
2818
	unsigned long irqflags;
2819
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834
	u32 display_mask, extra_mask;

	if (INTEL_INFO(dev)->gen >= 7) {
		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
				DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
				DE_PLANEB_FLIP_DONE_IVB |
				DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB |
				DE_ERR_INT_IVB);
		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
			      DE_PIPEA_VBLANK_IVB);

		I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
	} else {
		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
				DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
2835 2836 2837 2838
				DE_AUX_CHANNEL_A |
				DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
				DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
				DE_POISON);
2839 2840
		extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT;
	}
2841

2842
	dev_priv->irq_mask = ~display_mask;
2843 2844 2845

	/* should always can generate irq */
	I915_WRITE(DEIIR, I915_READ(DEIIR));
2846
	I915_WRITE(DEIMR, dev_priv->irq_mask);
2847
	I915_WRITE(DEIER, display_mask | extra_mask);
2848
	POSTING_READ(DEIER);
2849

2850
	gen5_gt_irq_postinstall(dev);
2851

P
Paulo Zanoni 已提交
2852
	ibx_irq_postinstall(dev);
2853

2854
	if (IS_IRONLAKE_M(dev)) {
2855 2856 2857
		/* Enable PCU event interrupts
		 *
		 * spinlocking not required here for correctness since interrupt
2858 2859 2860
		 * setup is guaranteed to run in single-threaded context. But we
		 * need it to make the assert_spin_locked happy. */
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2861
		ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
2862
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2863 2864
	}

2865 2866 2867
	return 0;
}

J
Jesse Barnes 已提交
2868 2869 2870 2871
static int valleyview_irq_postinstall(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
	u32 enable_mask;
2872 2873
	u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV |
		PIPE_CRC_DONE_ENABLE;
2874
	unsigned long irqflags;
J
Jesse Barnes 已提交
2875 2876

	enable_mask = I915_DISPLAY_PORT_INTERRUPT;
2877 2878 2879
	enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
J
Jesse Barnes 已提交
2880 2881
		I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;

2882 2883 2884 2885 2886 2887 2888
	/*
	 *Leave vblank interrupts masked initially.  enable/disable will
	 * toggle them based on usage.
	 */
	dev_priv->irq_mask = (~enable_mask) |
		I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
		I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
J
Jesse Barnes 已提交
2889

2890 2891 2892
	I915_WRITE(PORT_HOTPLUG_EN, 0);
	POSTING_READ(PORT_HOTPLUG_EN);

J
Jesse Barnes 已提交
2893 2894 2895 2896 2897 2898 2899
	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
	I915_WRITE(VLV_IER, enable_mask);
	I915_WRITE(VLV_IIR, 0xffffffff);
	I915_WRITE(PIPESTAT(0), 0xffff);
	I915_WRITE(PIPESTAT(1), 0xffff);
	POSTING_READ(VLV_IER);

2900 2901 2902
	/* Interrupt setup is already guaranteed to be single-threaded, this is
	 * just to make the assert_spin_locked check happy. */
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2903 2904 2905
	i915_enable_pipestat(dev_priv, PIPE_A, pipestat_enable);
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE);
	i915_enable_pipestat(dev_priv, PIPE_B, pipestat_enable);
2906
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2907

J
Jesse Barnes 已提交
2908 2909 2910
	I915_WRITE(VLV_IIR, 0xffffffff);
	I915_WRITE(VLV_IIR, 0xffffffff);

2911
	gen5_gt_irq_postinstall(dev);
J
Jesse Barnes 已提交
2912 2913 2914 2915 2916 2917 2918 2919

	/* ack & enable invalid PTE error interrupts */
#if 0 /* FIXME: add support to irq handler for checking these bits */
	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
	I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
#endif

	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
2920 2921 2922 2923

	return 0;
}

2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952
static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
{
	int i;

	/* These are interrupts we'll toggle with the ring mask register */
	uint32_t gt_interrupts[] = {
		GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
			GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
			GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
		GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
			GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
		0,
		GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT
		};

	for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++) {
		u32 tmp = I915_READ(GEN8_GT_IIR(i));
		if (tmp)
			DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n",
				  i, tmp);
		I915_WRITE(GEN8_GT_IMR(i), ~gt_interrupts[i]);
		I915_WRITE(GEN8_GT_IER(i), gt_interrupts[i]);
	}
	POSTING_READ(GEN8_GT_IER(0));
}

static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
{
	struct drm_device *dev = dev_priv->dev;
2953 2954 2955 2956 2957
	uint32_t de_pipe_masked = GEN8_PIPE_FLIP_DONE |
		GEN8_PIPE_CDCLK_CRC_DONE |
		GEN8_PIPE_FIFO_UNDERRUN |
		GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
	uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK;
2958
	int pipe;
2959 2960 2961
	dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
	dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
	dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972

	for_each_pipe(pipe) {
		u32 tmp = I915_READ(GEN8_DE_PIPE_IIR(pipe));
		if (tmp)
			DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n",
				  pipe, tmp);
		I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
		I915_WRITE(GEN8_DE_PIPE_IER(pipe), de_pipe_enables);
	}
	POSTING_READ(GEN8_DE_PIPE_ISR(0));

2973 2974
	I915_WRITE(GEN8_DE_PORT_IMR, ~GEN8_AUX_CHANNEL_A);
	I915_WRITE(GEN8_DE_PORT_IER, GEN8_AUX_CHANNEL_A);
2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034
	POSTING_READ(GEN8_DE_PORT_IER);
}

static int gen8_irq_postinstall(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;

	gen8_gt_irq_postinstall(dev_priv);
	gen8_de_irq_postinstall(dev_priv);

	ibx_irq_postinstall(dev);

	I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
	POSTING_READ(GEN8_MASTER_IRQ);

	return 0;
}

static void gen8_irq_uninstall(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int pipe;

	if (!dev_priv)
		return;

	atomic_set(&dev_priv->irq_received, 0);

	I915_WRITE(GEN8_MASTER_IRQ, 0);

#define GEN8_IRQ_FINI_NDX(type, which) do { \
		I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
		I915_WRITE(GEN8_##type##_IER(which), 0); \
		I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
	} while (0)

#define GEN8_IRQ_FINI(type) do { \
		I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
		I915_WRITE(GEN8_##type##_IER, 0); \
		I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
	} while (0)

	GEN8_IRQ_FINI_NDX(GT, 0);
	GEN8_IRQ_FINI_NDX(GT, 1);
	GEN8_IRQ_FINI_NDX(GT, 2);
	GEN8_IRQ_FINI_NDX(GT, 3);

	for_each_pipe(pipe) {
		GEN8_IRQ_FINI_NDX(DE_PIPE, pipe);
	}

	GEN8_IRQ_FINI(DE_PORT);
	GEN8_IRQ_FINI(DE_MISC);
	GEN8_IRQ_FINI(PCU);
#undef GEN8_IRQ_FINI
#undef GEN8_IRQ_FINI_NDX

	POSTING_READ(GEN8_PCU_IIR);
}

J
Jesse Barnes 已提交
3035 3036 3037 3038 3039 3040 3041 3042
static void valleyview_irq_uninstall(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
	int pipe;

	if (!dev_priv)
		return;

3043 3044
	del_timer_sync(&dev_priv->hotplug_reenable_timer);

J
Jesse Barnes 已提交
3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058
	for_each_pipe(pipe)
		I915_WRITE(PIPESTAT(pipe), 0xffff);

	I915_WRITE(HWSTAM, 0xffffffff);
	I915_WRITE(PORT_HOTPLUG_EN, 0);
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
	for_each_pipe(pipe)
		I915_WRITE(PIPESTAT(pipe), 0xffff);
	I915_WRITE(VLV_IIR, 0xffffffff);
	I915_WRITE(VLV_IMR, 0xffffffff);
	I915_WRITE(VLV_IER, 0x0);
	POSTING_READ(VLV_IER);
}

3059
static void ironlake_irq_uninstall(struct drm_device *dev)
3060 3061
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3062 3063 3064 3065

	if (!dev_priv)
		return;

3066 3067
	del_timer_sync(&dev_priv->hotplug_reenable_timer);

3068 3069 3070 3071 3072
	I915_WRITE(HWSTAM, 0xffffffff);

	I915_WRITE(DEIMR, 0xffffffff);
	I915_WRITE(DEIER, 0x0);
	I915_WRITE(DEIIR, I915_READ(DEIIR));
3073 3074
	if (IS_GEN7(dev))
		I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
3075 3076 3077 3078

	I915_WRITE(GTIMR, 0xffffffff);
	I915_WRITE(GTIER, 0x0);
	I915_WRITE(GTIIR, I915_READ(GTIIR));
3079

3080 3081 3082
	if (HAS_PCH_NOP(dev))
		return;

3083 3084 3085
	I915_WRITE(SDEIMR, 0xffffffff);
	I915_WRITE(SDEIER, 0x0);
	I915_WRITE(SDEIIR, I915_READ(SDEIIR));
3086 3087
	if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
		I915_WRITE(SERR_INT, I915_READ(SERR_INT));
3088 3089
}

3090
static void i8xx_irq_preinstall(struct drm_device * dev)
L
Linus Torvalds 已提交
3091 3092
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3093
	int pipe;
3094

3095
	atomic_set(&dev_priv->irq_received, 0);
3096

3097 3098
	for_each_pipe(pipe)
		I915_WRITE(PIPESTAT(pipe), 0);
3099 3100 3101
	I915_WRITE16(IMR, 0xffff);
	I915_WRITE16(IER, 0x0);
	POSTING_READ16(IER);
C
Chris Wilson 已提交
3102 3103 3104 3105 3106
}

static int i8xx_irq_postinstall(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3107
	unsigned long irqflags;
C
Chris Wilson 已提交
3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127

	I915_WRITE16(EMR,
		     ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));

	/* Unmask the interrupts that we always want on. */
	dev_priv->irq_mask =
		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
		  I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
	I915_WRITE16(IMR, dev_priv->irq_mask);

	I915_WRITE16(IER,
		     I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		     I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
		     I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
		     I915_USER_INTERRUPT);
	POSTING_READ16(IER);

3128 3129 3130
	/* Interrupt setup is already guaranteed to be single-threaded, this is
	 * just to make the assert_spin_locked check happy. */
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3131 3132
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
3133 3134
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);

C
Chris Wilson 已提交
3135 3136 3137
	return 0;
}

3138 3139 3140 3141
/*
 * Returns true when a page flip has completed.
 */
static bool i8xx_handle_vblank(struct drm_device *dev,
3142
			       int plane, int pipe, u32 iir)
3143 3144
{
	drm_i915_private_t *dev_priv = dev->dev_private;
3145
	u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3146 3147 3148 3149 3150 3151 3152

	if (!drm_handle_vblank(dev, pipe))
		return false;

	if ((iir & flip_pending) == 0)
		return false;

3153
	intel_prepare_page_flip(dev, plane);
3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168

	/* We detect FlipDone by looking for the change in PendingFlip from '1'
	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
	 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
	 * the flip is completed (no longer pending). Since this doesn't raise
	 * an interrupt per se, we watch for the change at vblank.
	 */
	if (I915_READ16(ISR) & flip_pending)
		return false;

	intel_finish_page_flip(dev, pipe);

	return true;
}

3169
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
C
Chris Wilson 已提交
3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215
{
	struct drm_device *dev = (struct drm_device *) arg;
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
	u16 iir, new_iir;
	u32 pipe_stats[2];
	unsigned long irqflags;
	int pipe;
	u16 flip_mask =
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;

	atomic_inc(&dev_priv->irq_received);

	iir = I915_READ16(IIR);
	if (iir == 0)
		return IRQ_NONE;

	while (iir & ~flip_mask) {
		/* Can't rely on pipestat interrupt bit in iir as it might
		 * have been cleared after the pipestat interrupt was received.
		 * It doesn't set the bit in iir again, but it still produces
		 * interrupts (for non-MSI).
		 */
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
			i915_handle_error(dev, false);

		for_each_pipe(pipe) {
			int reg = PIPESTAT(pipe);
			pipe_stats[pipe] = I915_READ(reg);

			/*
			 * Clear the PIPE*STAT regs before the IIR
			 */
			if (pipe_stats[pipe] & 0x8000ffff) {
				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
					DRM_DEBUG_DRIVER("pipe %c underrun\n",
							 pipe_name(pipe));
				I915_WRITE(reg, pipe_stats[pipe]);
			}
		}
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);

		I915_WRITE16(IIR, iir & ~flip_mask);
		new_iir = I915_READ16(IIR); /* Flush posted writes */

3216
		i915_update_dri1_breadcrumb(dev);
C
Chris Wilson 已提交
3217 3218 3219 3220

		if (iir & I915_USER_INTERRUPT)
			notify_ring(dev, &dev_priv->ring[RCS]);

3221
		for_each_pipe(pipe) {
3222
			int plane = pipe;
3223
			if (HAS_FBC(dev))
3224 3225
				plane = !plane;

3226
			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3227 3228
			    i8xx_handle_vblank(dev, plane, pipe, iir))
				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
C
Chris Wilson 已提交
3229

3230
			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3231
				i9xx_pipe_crc_irq_handler(dev, pipe);
3232
		}
C
Chris Wilson 已提交
3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254

		iir = new_iir;
	}

	return IRQ_HANDLED;
}

static void i8xx_irq_uninstall(struct drm_device * dev)
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
	int pipe;

	for_each_pipe(pipe) {
		/* Clear enable bits; then clear status bits */
		I915_WRITE(PIPESTAT(pipe), 0);
		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
	}
	I915_WRITE16(IMR, 0xffff);
	I915_WRITE16(IER, 0x0);
	I915_WRITE16(IIR, I915_READ16(IIR));
}

3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266
static void i915_irq_preinstall(struct drm_device * dev)
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
	int pipe;

	atomic_set(&dev_priv->irq_received, 0);

	if (I915_HAS_HOTPLUG(dev)) {
		I915_WRITE(PORT_HOTPLUG_EN, 0);
		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
	}

3267
	I915_WRITE16(HWSTAM, 0xeffe);
3268 3269 3270 3271 3272 3273 3274 3275 3276 3277
	for_each_pipe(pipe)
		I915_WRITE(PIPESTAT(pipe), 0);
	I915_WRITE(IMR, 0xffffffff);
	I915_WRITE(IER, 0x0);
	POSTING_READ(IER);
}

static int i915_irq_postinstall(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3278
	u32 enable_mask;
3279
	unsigned long irqflags;
3280

3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298
	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));

	/* Unmask the interrupts that we always want on. */
	dev_priv->irq_mask =
		~(I915_ASLE_INTERRUPT |
		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
		  I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);

	enable_mask =
		I915_ASLE_INTERRUPT |
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
		I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
		I915_USER_INTERRUPT;

3299
	if (I915_HAS_HOTPLUG(dev)) {
3300 3301 3302
		I915_WRITE(PORT_HOTPLUG_EN, 0);
		POSTING_READ(PORT_HOTPLUG_EN);

3303 3304 3305 3306 3307 3308 3309 3310 3311 3312
		/* Enable in IER... */
		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
		/* and unmask in IMR */
		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
	}

	I915_WRITE(IMR, dev_priv->irq_mask);
	I915_WRITE(IER, enable_mask);
	POSTING_READ(IER);

3313
	i915_enable_asle_pipestat(dev);
3314

3315 3316 3317
	/* Interrupt setup is already guaranteed to be single-threaded, this is
	 * just to make the assert_spin_locked check happy. */
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3318 3319
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
3320 3321
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);

3322 3323 3324
	return 0;
}

3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355
/*
 * Returns true when a page flip has completed.
 */
static bool i915_handle_vblank(struct drm_device *dev,
			       int plane, int pipe, u32 iir)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);

	if (!drm_handle_vblank(dev, pipe))
		return false;

	if ((iir & flip_pending) == 0)
		return false;

	intel_prepare_page_flip(dev, plane);

	/* We detect FlipDone by looking for the change in PendingFlip from '1'
	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
	 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
	 * the flip is completed (no longer pending). Since this doesn't raise
	 * an interrupt per se, we watch for the change at vblank.
	 */
	if (I915_READ(ISR) & flip_pending)
		return false;

	intel_finish_page_flip(dev, pipe);

	return true;
}

3356
static irqreturn_t i915_irq_handler(int irq, void *arg)
3357 3358 3359
{
	struct drm_device *dev = (struct drm_device *) arg;
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3360
	u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
3361
	unsigned long irqflags;
3362 3363 3364 3365
	u32 flip_mask =
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
	int pipe, ret = IRQ_NONE;
3366 3367 3368 3369

	atomic_inc(&dev_priv->irq_received);

	iir = I915_READ(IIR);
3370 3371
	do {
		bool irq_received = (iir & ~flip_mask) != 0;
3372
		bool blc_event = false;
3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386

		/* Can't rely on pipestat interrupt bit in iir as it might
		 * have been cleared after the pipestat interrupt was received.
		 * It doesn't set the bit in iir again, but it still produces
		 * interrupts (for non-MSI).
		 */
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
			i915_handle_error(dev, false);

		for_each_pipe(pipe) {
			int reg = PIPESTAT(pipe);
			pipe_stats[pipe] = I915_READ(reg);

3387
			/* Clear the PIPE*STAT regs before the IIR */
3388 3389 3390 3391 3392
			if (pipe_stats[pipe] & 0x8000ffff) {
				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
					DRM_DEBUG_DRIVER("pipe %c underrun\n",
							 pipe_name(pipe));
				I915_WRITE(reg, pipe_stats[pipe]);
3393
				irq_received = true;
3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404
			}
		}
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);

		if (!irq_received)
			break;

		/* Consume port.  Then clear IIR or we'll miss events */
		if ((I915_HAS_HOTPLUG(dev)) &&
		    (iir & I915_DISPLAY_PORT_INTERRUPT)) {
			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
3405
			u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
3406 3407 3408

			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
				  hotplug_status);
3409 3410 3411

			intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);

3412
			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
3413
			POSTING_READ(PORT_HOTPLUG_STAT);
3414 3415
		}

3416
		I915_WRITE(IIR, iir & ~flip_mask);
3417 3418 3419 3420 3421 3422
		new_iir = I915_READ(IIR); /* Flush posted writes */

		if (iir & I915_USER_INTERRUPT)
			notify_ring(dev, &dev_priv->ring[RCS]);

		for_each_pipe(pipe) {
3423
			int plane = pipe;
3424
			if (HAS_FBC(dev))
3425
				plane = !plane;
3426

3427
			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3428 3429
			    i915_handle_vblank(dev, plane, pipe, iir))
				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3430 3431 3432

			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
				blc_event = true;
3433 3434

			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3435
				i9xx_pipe_crc_irq_handler(dev, pipe);
3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455
		}

		if (blc_event || (iir & I915_ASLE_INTERRUPT))
			intel_opregion_asle_intr(dev);

		/* With MSI, interrupts are only generated when iir
		 * transitions from zero to nonzero.  If another bit got
		 * set while we were handling the existing iir bits, then
		 * we would never get another interrupt.
		 *
		 * This is fine on non-MSI as well, as if we hit this path
		 * we avoid exiting the interrupt handler only to generate
		 * another one.
		 *
		 * Note that for MSI this could cause a stray interrupt report
		 * if an interrupt landed in the time between writing IIR and
		 * the posting read.  This should be rare enough to never
		 * trigger the 99% of 100,000 interrupts test for disabling
		 * stray interrupts.
		 */
3456
		ret = IRQ_HANDLED;
3457
		iir = new_iir;
3458
	} while (iir & ~flip_mask);
3459

3460
	i915_update_dri1_breadcrumb(dev);
3461

3462 3463 3464 3465 3466 3467 3468 3469
	return ret;
}

static void i915_irq_uninstall(struct drm_device * dev)
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
	int pipe;

3470 3471
	del_timer_sync(&dev_priv->hotplug_reenable_timer);

3472 3473 3474 3475 3476
	if (I915_HAS_HOTPLUG(dev)) {
		I915_WRITE(PORT_HOTPLUG_EN, 0);
		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
	}

3477
	I915_WRITE16(HWSTAM, 0xffff);
3478 3479
	for_each_pipe(pipe) {
		/* Clear enable bits; then clear status bits */
3480
		I915_WRITE(PIPESTAT(pipe), 0);
3481 3482
		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
	}
3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495
	I915_WRITE(IMR, 0xffffffff);
	I915_WRITE(IER, 0x0);

	I915_WRITE(IIR, I915_READ(IIR));
}

static void i965_irq_preinstall(struct drm_device * dev)
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
	int pipe;

	atomic_set(&dev_priv->irq_received, 0);

3496 3497
	I915_WRITE(PORT_HOTPLUG_EN, 0);
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509

	I915_WRITE(HWSTAM, 0xeffe);
	for_each_pipe(pipe)
		I915_WRITE(PIPESTAT(pipe), 0);
	I915_WRITE(IMR, 0xffffffff);
	I915_WRITE(IER, 0x0);
	POSTING_READ(IER);
}

static int i965_irq_postinstall(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3510
	u32 enable_mask;
3511
	u32 error_mask;
3512
	unsigned long irqflags;
3513 3514

	/* Unmask the interrupts that we always want on. */
3515
	dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
3516
			       I915_DISPLAY_PORT_INTERRUPT |
3517 3518 3519 3520 3521 3522 3523
			       I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
			       I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
			       I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
			       I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
			       I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);

	enable_mask = ~dev_priv->irq_mask;
3524 3525
	enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
			 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3526 3527 3528 3529
	enable_mask |= I915_USER_INTERRUPT;

	if (IS_G4X(dev))
		enable_mask |= I915_BSD_USER_INTERRUPT;
3530

3531 3532 3533
	/* Interrupt setup is already guaranteed to be single-threaded, this is
	 * just to make the assert_spin_locked check happy. */
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3534 3535 3536
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE);
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
3537
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557

	/*
	 * Enable some error detection, note the instruction error mask
	 * bit is reserved, so we leave it masked.
	 */
	if (IS_G4X(dev)) {
		error_mask = ~(GM45_ERROR_PAGE_TABLE |
			       GM45_ERROR_MEM_PRIV |
			       GM45_ERROR_CP_PRIV |
			       I915_ERROR_MEMORY_REFRESH);
	} else {
		error_mask = ~(I915_ERROR_PAGE_TABLE |
			       I915_ERROR_MEMORY_REFRESH);
	}
	I915_WRITE(EMR, error_mask);

	I915_WRITE(IMR, dev_priv->irq_mask);
	I915_WRITE(IER, enable_mask);
	POSTING_READ(IER);

3558 3559 3560
	I915_WRITE(PORT_HOTPLUG_EN, 0);
	POSTING_READ(PORT_HOTPLUG_EN);

3561
	i915_enable_asle_pipestat(dev);
3562 3563 3564 3565

	return 0;
}

3566
static void i915_hpd_irq_setup(struct drm_device *dev)
3567 3568
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3569
	struct drm_mode_config *mode_config = &dev->mode_config;
3570
	struct intel_encoder *intel_encoder;
3571 3572
	u32 hotplug_en;

3573 3574
	assert_spin_locked(&dev_priv->irq_lock);

3575 3576 3577 3578
	if (I915_HAS_HOTPLUG(dev)) {
		hotplug_en = I915_READ(PORT_HOTPLUG_EN);
		hotplug_en &= ~HOTPLUG_INT_EN_MASK;
		/* Note HDMI and DP share hotplug bits */
3579
		/* enable bits are the same for all generations */
3580 3581 3582
		list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
			if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
				hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
3583 3584 3585 3586 3587 3588
		/* Programming the CRT detection parameters tends
		   to generate a spurious hotplug event about three
		   seconds later.  So just do it once.
		*/
		if (IS_G4X(dev))
			hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
3589
		hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
3590
		hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
3591

3592 3593 3594
		/* Ignore TV since it's buggy */
		I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
	}
3595 3596
}

3597
static irqreturn_t i965_irq_handler(int irq, void *arg)
3598 3599 3600 3601 3602 3603 3604 3605
{
	struct drm_device *dev = (struct drm_device *) arg;
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
	u32 iir, new_iir;
	u32 pipe_stats[I915_MAX_PIPES];
	unsigned long irqflags;
	int irq_received;
	int ret = IRQ_NONE, pipe;
3606 3607 3608
	u32 flip_mask =
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3609 3610 3611 3612 3613 3614

	atomic_inc(&dev_priv->irq_received);

	iir = I915_READ(IIR);

	for (;;) {
3615 3616
		bool blc_event = false;

3617
		irq_received = (iir & ~flip_mask) != 0;
3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650

		/* Can't rely on pipestat interrupt bit in iir as it might
		 * have been cleared after the pipestat interrupt was received.
		 * It doesn't set the bit in iir again, but it still produces
		 * interrupts (for non-MSI).
		 */
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
			i915_handle_error(dev, false);

		for_each_pipe(pipe) {
			int reg = PIPESTAT(pipe);
			pipe_stats[pipe] = I915_READ(reg);

			/*
			 * Clear the PIPE*STAT regs before the IIR
			 */
			if (pipe_stats[pipe] & 0x8000ffff) {
				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
					DRM_DEBUG_DRIVER("pipe %c underrun\n",
							 pipe_name(pipe));
				I915_WRITE(reg, pipe_stats[pipe]);
				irq_received = 1;
			}
		}
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);

		if (!irq_received)
			break;

		ret = IRQ_HANDLED;

		/* Consume port.  Then clear IIR or we'll miss events */
3651
		if (iir & I915_DISPLAY_PORT_INTERRUPT) {
3652
			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
3653 3654
			u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ?
								  HOTPLUG_INT_STATUS_G4X :
3655
								  HOTPLUG_INT_STATUS_I915);
3656 3657 3658

			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
				  hotplug_status);
3659 3660

			intel_hpd_irq_handler(dev, hotplug_trigger,
3661
					      IS_G4X(dev) ? hpd_status_g4x : hpd_status_i915);
3662

3663 3664 3665 3666
			if (IS_G4X(dev) &&
			    (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X))
				dp_aux_irq_handler(dev);

3667 3668 3669 3670
			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
			I915_READ(PORT_HOTPLUG_STAT);
		}

3671
		I915_WRITE(IIR, iir & ~flip_mask);
3672 3673 3674 3675 3676 3677 3678 3679
		new_iir = I915_READ(IIR); /* Flush posted writes */

		if (iir & I915_USER_INTERRUPT)
			notify_ring(dev, &dev_priv->ring[RCS]);
		if (iir & I915_BSD_USER_INTERRUPT)
			notify_ring(dev, &dev_priv->ring[VCS]);

		for_each_pipe(pipe) {
3680
			if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
3681 3682
			    i915_handle_vblank(dev, pipe, pipe, iir))
				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
3683 3684 3685

			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
				blc_event = true;
3686 3687

			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3688
				i9xx_pipe_crc_irq_handler(dev, pipe);
3689 3690 3691 3692 3693 3694
		}


		if (blc_event || (iir & I915_ASLE_INTERRUPT))
			intel_opregion_asle_intr(dev);

3695 3696 3697
		if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
			gmbus_irq_handler(dev);

3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715
		/* With MSI, interrupts are only generated when iir
		 * transitions from zero to nonzero.  If another bit got
		 * set while we were handling the existing iir bits, then
		 * we would never get another interrupt.
		 *
		 * This is fine on non-MSI as well, as if we hit this path
		 * we avoid exiting the interrupt handler only to generate
		 * another one.
		 *
		 * Note that for MSI this could cause a stray interrupt report
		 * if an interrupt landed in the time between writing IIR and
		 * the posting read.  This should be rare enough to never
		 * trigger the 99% of 100,000 interrupts test for disabling
		 * stray interrupts.
		 */
		iir = new_iir;
	}

3716
	i915_update_dri1_breadcrumb(dev);
3717

3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728
	return ret;
}

static void i965_irq_uninstall(struct drm_device * dev)
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
	int pipe;

	if (!dev_priv)
		return;

3729 3730
	del_timer_sync(&dev_priv->hotplug_reenable_timer);

3731 3732
	I915_WRITE(PORT_HOTPLUG_EN, 0);
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745

	I915_WRITE(HWSTAM, 0xffffffff);
	for_each_pipe(pipe)
		I915_WRITE(PIPESTAT(pipe), 0);
	I915_WRITE(IMR, 0xffffffff);
	I915_WRITE(IER, 0x0);

	for_each_pipe(pipe)
		I915_WRITE(PIPESTAT(pipe),
			   I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
	I915_WRITE(IIR, I915_READ(IIR));
}

3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780
static void i915_reenable_hotplug_timer_func(unsigned long data)
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *)data;
	struct drm_device *dev = dev_priv->dev;
	struct drm_mode_config *mode_config = &dev->mode_config;
	unsigned long irqflags;
	int i;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
	for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
		struct drm_connector *connector;

		if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
			continue;

		dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;

		list_for_each_entry(connector, &mode_config->connector_list, head) {
			struct intel_connector *intel_connector = to_intel_connector(connector);

			if (intel_connector->encoder->hpd_pin == i) {
				if (connector->polled != intel_connector->polled)
					DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
							 drm_get_connector_name(connector));
				connector->polled = intel_connector->polled;
				if (!connector->polled)
					connector->polled = DRM_CONNECTOR_POLL_HPD;
			}
		}
	}
	if (dev_priv->display.hpd_irq_setup)
		dev_priv->display.hpd_irq_setup(dev);
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

3781 3782
void intel_irq_init(struct drm_device *dev)
{
3783 3784 3785
	struct drm_i915_private *dev_priv = dev->dev_private;

	INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
3786
	INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
3787
	INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
3788
	INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
3789

3790 3791
	setup_timer(&dev_priv->gpu_error.hangcheck_timer,
		    i915_hangcheck_elapsed,
3792
		    (unsigned long) dev);
3793 3794
	setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func,
		    (unsigned long) dev_priv);
3795

3796
	pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
3797

3798 3799 3800 3801
	if (IS_GEN2(dev)) {
		dev->max_vblank_count = 0;
		dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
	} else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
3802 3803
		dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
		dev->driver->get_vblank_counter = gm45_get_vblank_counter;
3804 3805 3806
	} else {
		dev->driver->get_vblank_counter = i915_get_vblank_counter;
		dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
3807 3808
	}

3809
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
3810
		dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
3811 3812
		dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
	}
3813

J
Jesse Barnes 已提交
3814 3815 3816 3817 3818 3819 3820
	if (IS_VALLEYVIEW(dev)) {
		dev->driver->irq_handler = valleyview_irq_handler;
		dev->driver->irq_preinstall = valleyview_irq_preinstall;
		dev->driver->irq_postinstall = valleyview_irq_postinstall;
		dev->driver->irq_uninstall = valleyview_irq_uninstall;
		dev->driver->enable_vblank = valleyview_enable_vblank;
		dev->driver->disable_vblank = valleyview_disable_vblank;
3821
		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3822 3823 3824 3825 3826 3827 3828 3829
	} else if (IS_GEN8(dev)) {
		dev->driver->irq_handler = gen8_irq_handler;
		dev->driver->irq_preinstall = gen8_irq_preinstall;
		dev->driver->irq_postinstall = gen8_irq_postinstall;
		dev->driver->irq_uninstall = gen8_irq_uninstall;
		dev->driver->enable_vblank = gen8_enable_vblank;
		dev->driver->disable_vblank = gen8_disable_vblank;
		dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
3830 3831 3832 3833 3834 3835 3836
	} else if (HAS_PCH_SPLIT(dev)) {
		dev->driver->irq_handler = ironlake_irq_handler;
		dev->driver->irq_preinstall = ironlake_irq_preinstall;
		dev->driver->irq_postinstall = ironlake_irq_postinstall;
		dev->driver->irq_uninstall = ironlake_irq_uninstall;
		dev->driver->enable_vblank = ironlake_enable_vblank;
		dev->driver->disable_vblank = ironlake_disable_vblank;
3837
		dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
3838
	} else {
C
Chris Wilson 已提交
3839 3840 3841 3842 3843
		if (INTEL_INFO(dev)->gen == 2) {
			dev->driver->irq_preinstall = i8xx_irq_preinstall;
			dev->driver->irq_postinstall = i8xx_irq_postinstall;
			dev->driver->irq_handler = i8xx_irq_handler;
			dev->driver->irq_uninstall = i8xx_irq_uninstall;
3844 3845 3846 3847 3848
		} else if (INTEL_INFO(dev)->gen == 3) {
			dev->driver->irq_preinstall = i915_irq_preinstall;
			dev->driver->irq_postinstall = i915_irq_postinstall;
			dev->driver->irq_uninstall = i915_irq_uninstall;
			dev->driver->irq_handler = i915_irq_handler;
3849
			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
C
Chris Wilson 已提交
3850
		} else {
3851 3852 3853 3854
			dev->driver->irq_preinstall = i965_irq_preinstall;
			dev->driver->irq_postinstall = i965_irq_postinstall;
			dev->driver->irq_uninstall = i965_irq_uninstall;
			dev->driver->irq_handler = i965_irq_handler;
3855
			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
C
Chris Wilson 已提交
3856
		}
3857 3858 3859 3860
		dev->driver->enable_vblank = i915_enable_vblank;
		dev->driver->disable_vblank = i915_disable_vblank;
	}
}
3861 3862 3863 3864

void intel_hpd_init(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
3865 3866
	struct drm_mode_config *mode_config = &dev->mode_config;
	struct drm_connector *connector;
3867
	unsigned long irqflags;
3868
	int i;
3869

3870 3871 3872 3873 3874 3875 3876 3877 3878 3879
	for (i = 1; i < HPD_NUM_PINS; i++) {
		dev_priv->hpd_stats[i].hpd_cnt = 0;
		dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
	}
	list_for_each_entry(connector, &mode_config->connector_list, head) {
		struct intel_connector *intel_connector = to_intel_connector(connector);
		connector->polled = intel_connector->polled;
		if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
			connector->polled = DRM_CONNECTOR_POLL_HPD;
	}
3880 3881 3882 3883

	/* Interrupt setup is already guaranteed to be single-threaded, this is
	 * just to make the assert_spin_locked checks happy. */
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3884 3885
	if (dev_priv->display.hpd_irq_setup)
		dev_priv->display.hpd_irq_setup(dev);
3886
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3887
}
3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902

/* Disable interrupts so we can allow Package C8+. */
void hsw_pc8_disable_interrupts(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);

	dev_priv->pc8.regsave.deimr = I915_READ(DEIMR);
	dev_priv->pc8.regsave.sdeimr = I915_READ(SDEIMR);
	dev_priv->pc8.regsave.gtimr = I915_READ(GTIMR);
	dev_priv->pc8.regsave.gtier = I915_READ(GTIER);
	dev_priv->pc8.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR);

3903 3904
	ironlake_disable_display_irq(dev_priv, 0xffffffff);
	ibx_disable_display_interrupt(dev_priv, 0xffffffff);
3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917
	ilk_disable_gt_irq(dev_priv, 0xffffffff);
	snb_disable_pm_irq(dev_priv, 0xffffffff);

	dev_priv->pc8.irqs_disabled = true;

	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

/* Restore interrupts so we can recover from Package C8+. */
void hsw_pc8_restore_interrupts(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	unsigned long irqflags;
3918
	uint32_t val;
3919 3920 3921 3922

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);

	val = I915_READ(DEIMR);
3923
	WARN(val != 0xffffffff, "DEIMR is 0x%08x\n", val);
3924

3925 3926
	val = I915_READ(SDEIMR);
	WARN(val != 0xffffffff, "SDEIMR is 0x%08x\n", val);
3927 3928

	val = I915_READ(GTIMR);
3929
	WARN(val != 0xffffffff, "GTIMR is 0x%08x\n", val);
3930 3931

	val = I915_READ(GEN6_PMIMR);
3932
	WARN(val != 0xffffffff, "GEN6_PMIMR is 0x%08x\n", val);
3933 3934 3935 3936

	dev_priv->pc8.irqs_disabled = false;

	ironlake_enable_display_irq(dev_priv, ~dev_priv->pc8.regsave.deimr);
3937
	ibx_enable_display_interrupt(dev_priv, ~dev_priv->pc8.regsave.sdeimr);
3938 3939 3940 3941 3942 3943
	ilk_enable_gt_irq(dev_priv, ~dev_priv->pc8.regsave.gtimr);
	snb_enable_pm_irq(dev_priv, ~dev_priv->pc8.regsave.gen6_pmimr);
	I915_WRITE(GTIER, dev_priv->pc8.regsave.gtier);

	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}