i915_irq.c 72.5 KB
Newer Older
D
Dave Airlie 已提交
1
/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
L
Linus Torvalds 已提交
2
 */
D
Dave Airlie 已提交
3
/*
L
Linus Torvalds 已提交
4 5
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
 * All Rights Reserved.
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
D
Dave Airlie 已提交
27
 */
L
Linus Torvalds 已提交
28

29 30
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

31
#include <linux/sysrq.h>
32
#include <linux/slab.h>
L
Linus Torvalds 已提交
33 34 35 36
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
#include "i915_drv.h"
C
Chris Wilson 已提交
37
#include "i915_trace.h"
J
Jesse Barnes 已提交
38
#include "intel_drv.h"
L
Linus Torvalds 已提交
39

40
/* For display hotplug interrupt */
41
static void
42
ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
43
{
44 45 46
	if ((dev_priv->irq_mask & mask) != 0) {
		dev_priv->irq_mask &= ~mask;
		I915_WRITE(DEIMR, dev_priv->irq_mask);
47
		POSTING_READ(DEIMR);
48 49 50 51
	}
}

static inline void
52
ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
53
{
54 55 56
	if ((dev_priv->irq_mask & mask) != mask) {
		dev_priv->irq_mask |= mask;
		I915_WRITE(DEIMR, dev_priv->irq_mask);
57
		POSTING_READ(DEIMR);
58 59 60
	}
}

61 62 63 64
void
i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
{
	if ((dev_priv->pipestat[pipe] & mask) != mask) {
65
		u32 reg = PIPESTAT(pipe);
66 67 68 69

		dev_priv->pipestat[pipe] |= mask;
		/* Enable the interrupt, clear any pending status */
		I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
70
		POSTING_READ(reg);
71 72 73 74 75 76 77
	}
}

void
i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
{
	if ((dev_priv->pipestat[pipe] & mask) != 0) {
78
		u32 reg = PIPESTAT(pipe);
79 80 81

		dev_priv->pipestat[pipe] &= ~mask;
		I915_WRITE(reg, dev_priv->pipestat[pipe]);
82
		POSTING_READ(reg);
83 84 85
	}
}

86 87 88
/**
 * intel_enable_asle - enable ASLE interrupt for OpRegion
 */
89
void intel_enable_asle(struct drm_device *dev)
90
{
91 92 93
	drm_i915_private_t *dev_priv = dev->dev_private;
	unsigned long irqflags;

J
Jesse Barnes 已提交
94 95 96 97
	/* FIXME: opregion/asle for VLV */
	if (IS_VALLEYVIEW(dev))
		return;

98
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
99

100
	if (HAS_PCH_SPLIT(dev))
101
		ironlake_enable_display_irq(dev_priv, DE_GSE);
102
	else {
103
		i915_enable_pipestat(dev_priv, 1,
104
				     PIPE_LEGACY_BLC_EVENT_ENABLE);
105
		if (INTEL_INFO(dev)->gen >= 4)
106
			i915_enable_pipestat(dev_priv, 0,
107
					     PIPE_LEGACY_BLC_EVENT_ENABLE);
108
	}
109 110

	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
111 112
}

113 114 115 116 117 118 119 120 121 122 123 124 125
/**
 * i915_pipe_enabled - check if a pipe is enabled
 * @dev: DRM device
 * @pipe: pipe to check
 *
 * Reading certain registers when the pipe is disabled can hang the chip.
 * Use this routine to make sure the PLL is running and the pipe is active
 * before reading such registers if unsure.
 */
static int
i915_pipe_enabled(struct drm_device *dev, int pipe)
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
126
	return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
127 128
}

129 130 131
/* Called from drm generic code, passed a 'crtc', which
 * we use as a pipe index
 */
132
static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
133 134 135 136
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
	unsigned long high_frame;
	unsigned long low_frame;
137
	u32 high1, high2, low;
138 139

	if (!i915_pipe_enabled(dev, pipe)) {
140
		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
141
				"pipe %c\n", pipe_name(pipe));
142 143 144
		return 0;
	}

145 146
	high_frame = PIPEFRAME(pipe);
	low_frame = PIPEFRAMEPIXEL(pipe);
147

148 149 150 151 152 153
	/*
	 * High & low register fields aren't synchronized, so make sure
	 * we get a low value that's stable across two reads of the high
	 * register.
	 */
	do {
154 155 156
		high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
		low   = I915_READ(low_frame)  & PIPE_FRAME_LOW_MASK;
		high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
157 158
	} while (high1 != high2);

159 160 161
	high1 >>= PIPE_FRAME_HIGH_SHIFT;
	low >>= PIPE_FRAME_LOW_SHIFT;
	return (high1 << 8) | low;
162 163
}

164
static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
165 166
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
167
	int reg = PIPE_FRMCOUNT_GM45(pipe);
168 169

	if (!i915_pipe_enabled(dev, pipe)) {
170
		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
171
				 "pipe %c\n", pipe_name(pipe));
172 173 174 175 176 177
		return 0;
	}

	return I915_READ(reg);
}

178
static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
179 180 181 182 183 184 185 186 187 188
			     int *vpos, int *hpos)
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
	u32 vbl = 0, position = 0;
	int vbl_start, vbl_end, htotal, vtotal;
	bool in_vbl = true;
	int ret = 0;

	if (!i915_pipe_enabled(dev, pipe)) {
		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
189
				 "pipe %c\n", pipe_name(pipe));
190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243
		return 0;
	}

	/* Get vtotal. */
	vtotal = 1 + ((I915_READ(VTOTAL(pipe)) >> 16) & 0x1fff);

	if (INTEL_INFO(dev)->gen >= 4) {
		/* No obvious pixelcount register. Only query vertical
		 * scanout position from Display scan line register.
		 */
		position = I915_READ(PIPEDSL(pipe));

		/* Decode into vertical scanout position. Don't have
		 * horizontal scanout position.
		 */
		*vpos = position & 0x1fff;
		*hpos = 0;
	} else {
		/* Have access to pixelcount since start of frame.
		 * We can split this into vertical and horizontal
		 * scanout position.
		 */
		position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;

		htotal = 1 + ((I915_READ(HTOTAL(pipe)) >> 16) & 0x1fff);
		*vpos = position / htotal;
		*hpos = position - (*vpos * htotal);
	}

	/* Query vblank area. */
	vbl = I915_READ(VBLANK(pipe));

	/* Test position against vblank region. */
	vbl_start = vbl & 0x1fff;
	vbl_end = (vbl >> 16) & 0x1fff;

	if ((*vpos < vbl_start) || (*vpos > vbl_end))
		in_vbl = false;

	/* Inside "upper part" of vblank area? Apply corrective offset: */
	if (in_vbl && (*vpos >= vbl_start))
		*vpos = *vpos - vtotal;

	/* Readouts valid? */
	if (vbl > 0)
		ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;

	/* In vblank? */
	if (in_vbl)
		ret |= DRM_SCANOUTPOS_INVBL;

	return ret;
}

244
static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
245 246 247 248
			      int *max_error,
			      struct timeval *vblank_time,
			      unsigned flags)
{
249 250
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct drm_crtc *crtc;
251

252 253
	if (pipe < 0 || pipe >= dev_priv->num_pipe) {
		DRM_ERROR("Invalid crtc %d\n", pipe);
254 255 256 257
		return -EINVAL;
	}

	/* Get drm_crtc to timestamp: */
258 259 260 261 262 263 264 265 266 267
	crtc = intel_get_crtc_for_pipe(dev, pipe);
	if (crtc == NULL) {
		DRM_ERROR("Invalid crtc %d\n", pipe);
		return -EINVAL;
	}

	if (!crtc->enabled) {
		DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
		return -EBUSY;
	}
268 269

	/* Helper routine in DRM core does all the work: */
270 271 272
	return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
						     vblank_time, flags,
						     crtc);
273 274
}

275 276 277 278 279 280 281 282
/*
 * Handle hotplug events outside the interrupt handler proper.
 */
static void i915_hotplug_work_func(struct work_struct *work)
{
	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
						    hotplug_work);
	struct drm_device *dev = dev_priv->dev;
283
	struct drm_mode_config *mode_config = &dev->mode_config;
284 285
	struct intel_encoder *encoder;

286
	mutex_lock(&mode_config->mutex);
287 288
	DRM_DEBUG_KMS("running encoder hotplug functions\n");

289 290 291 292
	list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
		if (encoder->hot_plug)
			encoder->hot_plug(encoder);

293 294
	mutex_unlock(&mode_config->mutex);

295
	/* Just fire off a uevent and let userspace tell us what to do */
296
	drm_helper_hpd_irq_event(dev);
297 298
}

299 300 301
static void i915_handle_rps_change(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
302
	u32 busy_up, busy_down, max_avg, min_avg;
303 304
	u8 new_delay = dev_priv->cur_delay;

305
	I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
306 307
	busy_up = I915_READ(RCPREVBSYTUPAVG);
	busy_down = I915_READ(RCPREVBSYTDNAVG);
308 309 310 311
	max_avg = I915_READ(RCBMAXAVG);
	min_avg = I915_READ(RCBMINAVG);

	/* Handle RCS change request from hw */
312
	if (busy_up > max_avg) {
313 314 315 316
		if (dev_priv->cur_delay != dev_priv->max_delay)
			new_delay = dev_priv->cur_delay - 1;
		if (new_delay < dev_priv->max_delay)
			new_delay = dev_priv->max_delay;
317
	} else if (busy_down < min_avg) {
318 319 320 321 322 323
		if (dev_priv->cur_delay != dev_priv->min_delay)
			new_delay = dev_priv->cur_delay + 1;
		if (new_delay > dev_priv->min_delay)
			new_delay = dev_priv->min_delay;
	}

324 325
	if (ironlake_set_drps(dev, new_delay))
		dev_priv->cur_delay = new_delay;
326 327 328 329

	return;
}

330 331 332 333
static void notify_ring(struct drm_device *dev,
			struct intel_ring_buffer *ring)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
334

335 336 337
	if (ring->obj == NULL)
		return;

338
	trace_i915_gem_request_complete(ring, ring->get_seqno(ring));
339

340
	wake_up_all(&ring->irq_queue);
341 342 343 344 345 346
	if (i915_enable_hangcheck) {
		dev_priv->hangcheck_count = 0;
		mod_timer(&dev_priv->hangcheck_timer,
			  jiffies +
			  msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
	}
347 348
}

349
static void gen6_pm_rps_work(struct work_struct *work)
350
{
351 352
	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
						    rps_work);
353
	u8 new_delay = dev_priv->cur_delay;
354 355 356 357 358 359
	u32 pm_iir, pm_imr;

	spin_lock_irq(&dev_priv->rps_lock);
	pm_iir = dev_priv->pm_iir;
	dev_priv->pm_iir = 0;
	pm_imr = I915_READ(GEN6_PMIMR);
360
	I915_WRITE(GEN6_PMIMR, 0);
361
	spin_unlock_irq(&dev_priv->rps_lock);
362 363 364 365

	if (!pm_iir)
		return;

366
	mutex_lock(&dev_priv->dev->struct_mutex);
367 368 369 370 371 372
	if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
		if (dev_priv->cur_delay != dev_priv->max_delay)
			new_delay = dev_priv->cur_delay + 1;
		if (new_delay > dev_priv->max_delay)
			new_delay = dev_priv->max_delay;
	} else if (pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT)) {
373
		gen6_gt_force_wake_get(dev_priv);
374 375 376 377 378 379 380 381 382 383 384 385 386
		if (dev_priv->cur_delay != dev_priv->min_delay)
			new_delay = dev_priv->cur_delay - 1;
		if (new_delay < dev_priv->min_delay) {
			new_delay = dev_priv->min_delay;
			I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
				   I915_READ(GEN6_RP_INTERRUPT_LIMITS) |
				   ((new_delay << 16) & 0x3f0000));
		} else {
			/* Make sure we continue to get down interrupts
			 * until we hit the minimum frequency */
			I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
				   I915_READ(GEN6_RP_INTERRUPT_LIMITS) & ~0x3f0000);
		}
387
		gen6_gt_force_wake_put(dev_priv);
388 389
	}

390
	gen6_set_rps(dev_priv->dev, new_delay);
391 392
	dev_priv->cur_delay = new_delay;

393 394 395 396 397 398
	/*
	 * rps_lock not held here because clearing is non-destructive. There is
	 * an *extremely* unlikely race with gen6_rps_enable() that is prevented
	 * by holding struct_mutex for the duration of the write.
	 */
	mutex_unlock(&dev_priv->dev->struct_mutex);
399 400
}

401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421
static void snb_gt_irq_handler(struct drm_device *dev,
			       struct drm_i915_private *dev_priv,
			       u32 gt_iir)
{

	if (gt_iir & (GEN6_RENDER_USER_INTERRUPT |
		      GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT))
		notify_ring(dev, &dev_priv->ring[RCS]);
	if (gt_iir & GEN6_BSD_USER_INTERRUPT)
		notify_ring(dev, &dev_priv->ring[VCS]);
	if (gt_iir & GEN6_BLITTER_USER_INTERRUPT)
		notify_ring(dev, &dev_priv->ring[BCS]);

	if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT |
		      GT_GEN6_BSD_CS_ERROR_INTERRUPT |
		      GT_RENDER_CS_ERROR_INTERRUPT)) {
		DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
		i915_handle_error(dev, false);
	}
}

422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446
static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
				u32 pm_iir)
{
	unsigned long flags;

	/*
	 * IIR bits should never already be set because IMR should
	 * prevent an interrupt from being shown in IIR. The warning
	 * displays a case where we've unsafely cleared
	 * dev_priv->pm_iir. Although missing an interrupt of the same
	 * type is not a problem, it displays a problem in the logic.
	 *
	 * The mask bit in IMR is cleared by rps_work.
	 */

	spin_lock_irqsave(&dev_priv->rps_lock, flags);
	WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
	dev_priv->pm_iir |= pm_iir;
	I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
	POSTING_READ(GEN6_PMIMR);
	spin_unlock_irqrestore(&dev_priv->rps_lock, flags);

	queue_work(dev_priv->wq, &dev_priv->rps_work);
}

J
Jesse Barnes 已提交
447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474
static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
{
	struct drm_device *dev = (struct drm_device *) arg;
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
	u32 iir, gt_iir, pm_iir;
	irqreturn_t ret = IRQ_NONE;
	unsigned long irqflags;
	int pipe;
	u32 pipe_stats[I915_MAX_PIPES];
	u32 vblank_status;
	int vblank = 0;
	bool blc_event;

	atomic_inc(&dev_priv->irq_received);

	vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS |
		PIPE_VBLANK_INTERRUPT_STATUS;

	while (true) {
		iir = I915_READ(VLV_IIR);
		gt_iir = I915_READ(GTIIR);
		pm_iir = I915_READ(GEN6_PMIIR);

		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
			goto out;

		ret = IRQ_HANDLED;

475
		snb_gt_irq_handler(dev, dev_priv, gt_iir);
J
Jesse Barnes 已提交
476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511

		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
		for_each_pipe(pipe) {
			int reg = PIPESTAT(pipe);
			pipe_stats[pipe] = I915_READ(reg);

			/*
			 * Clear the PIPE*STAT regs before the IIR
			 */
			if (pipe_stats[pipe] & 0x8000ffff) {
				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
					DRM_DEBUG_DRIVER("pipe %c underrun\n",
							 pipe_name(pipe));
				I915_WRITE(reg, pipe_stats[pipe]);
			}
		}
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);

		/* Consume port.  Then clear IIR or we'll miss events */
		if (iir & I915_DISPLAY_PORT_INTERRUPT) {
			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);

			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
					 hotplug_status);
			if (hotplug_status & dev_priv->hotplug_supported_mask)
				queue_work(dev_priv->wq,
					   &dev_priv->hotplug_work);

			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
			I915_READ(PORT_HOTPLUG_STAT);
		}


		if (iir & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT) {
			drm_handle_vblank(dev, 0);
			vblank++;
512
			intel_finish_page_flip(dev, 0);
J
Jesse Barnes 已提交
513 514 515 516 517
		}

		if (iir & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT) {
			drm_handle_vblank(dev, 1);
			vblank++;
518
			intel_finish_page_flip(dev, 0);
J
Jesse Barnes 已提交
519 520 521 522 523
		}

		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
			blc_event = true;

524 525
		if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
			gen6_queue_rps_work(dev_priv, pm_iir);
J
Jesse Barnes 已提交
526 527 528 529 530 531 532 533 534 535

		I915_WRITE(GTIIR, gt_iir);
		I915_WRITE(GEN6_PMIIR, pm_iir);
		I915_WRITE(VLV_IIR, iir);
	}

out:
	return ret;
}

536
static void pch_irq_handler(struct drm_device *dev, u32 pch_iir)
537 538
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
539
	int pipe;
540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557

	if (pch_iir & SDE_AUDIO_POWER_MASK)
		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
				 (pch_iir & SDE_AUDIO_POWER_MASK) >>
				 SDE_AUDIO_POWER_SHIFT);

	if (pch_iir & SDE_GMBUS)
		DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");

	if (pch_iir & SDE_AUDIO_HDCP_MASK)
		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");

	if (pch_iir & SDE_AUDIO_TRANS_MASK)
		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");

	if (pch_iir & SDE_POISON)
		DRM_ERROR("PCH poison interrupt\n");

558 559 560 561 562
	if (pch_iir & SDE_FDI_MASK)
		for_each_pipe(pipe)
			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
					 pipe_name(pipe),
					 I915_READ(FDI_RX_IIR(pipe)));
563 564 565 566 567 568 569 570 571 572 573 574 575

	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");

	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");

	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
		DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n");
	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
		DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
}

576
static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599
{
	struct drm_device *dev = (struct drm_device *) arg;
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
	int ret = IRQ_NONE;
	u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;

	atomic_inc(&dev_priv->irq_received);

	/* disable master interrupt before clearing iir  */
	de_ier = I915_READ(DEIER);
	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
	POSTING_READ(DEIER);

	de_iir = I915_READ(DEIIR);
	gt_iir = I915_READ(GTIIR);
	pch_iir = I915_READ(SDEIIR);
	pm_iir = I915_READ(GEN6_PMIIR);

	if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 && pm_iir == 0)
		goto done;

	ret = IRQ_HANDLED;

600
	snb_gt_irq_handler(dev, dev_priv, gt_iir);
601 602 603 604 605 606 607 608 609 610 611 612 613 614

	if (de_iir & DE_GSE_IVB)
		intel_opregion_gse_intr(dev);

	if (de_iir & DE_PLANEA_FLIP_DONE_IVB) {
		intel_prepare_page_flip(dev, 0);
		intel_finish_page_flip_plane(dev, 0);
	}

	if (de_iir & DE_PLANEB_FLIP_DONE_IVB) {
		intel_prepare_page_flip(dev, 1);
		intel_finish_page_flip_plane(dev, 1);
	}

615 616 617 618 619
	if (de_iir & DE_PLANEC_FLIP_DONE_IVB) {
		intel_prepare_page_flip(dev, 2);
		intel_finish_page_flip_plane(dev, 2);
	}

620 621 622
	if (de_iir & DE_PIPEA_VBLANK_IVB)
		drm_handle_vblank(dev, 0);

623
	if (de_iir & DE_PIPEB_VBLANK_IVB)
624 625
		drm_handle_vblank(dev, 1);

626 627 628
	if (de_iir & DE_PIPEC_VBLANK_IVB)
		drm_handle_vblank(dev, 2);

629 630 631 632
	/* check event from PCH */
	if (de_iir & DE_PCH_EVENT_IVB) {
		if (pch_iir & SDE_HOTPLUG_MASK_CPT)
			queue_work(dev_priv->wq, &dev_priv->hotplug_work);
633
		pch_irq_handler(dev, pch_iir);
634 635
	}

636 637
	if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
		gen6_queue_rps_work(dev_priv, pm_iir);
638 639 640 641 642 643 644 645 646 647 648 649 650 651

	/* should clear PCH hotplug event before clear CPU irq */
	I915_WRITE(SDEIIR, pch_iir);
	I915_WRITE(GTIIR, gt_iir);
	I915_WRITE(DEIIR, de_iir);
	I915_WRITE(GEN6_PMIIR, pm_iir);

done:
	I915_WRITE(DEIER, de_ier);
	POSTING_READ(DEIER);

	return ret;
}

652 653 654 655 656 657 658 659 660 661
static void ilk_gt_irq_handler(struct drm_device *dev,
			       struct drm_i915_private *dev_priv,
			       u32 gt_iir)
{
	if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
		notify_ring(dev, &dev_priv->ring[RCS]);
	if (gt_iir & GT_BSD_USER_INTERRUPT)
		notify_ring(dev, &dev_priv->ring[VCS]);
}

662
static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
663
{
664
	struct drm_device *dev = (struct drm_device *) arg;
665 666
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
	int ret = IRQ_NONE;
667
	u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
668
	u32 hotplug_mask;
669

670 671
	atomic_inc(&dev_priv->irq_received);

672 673 674
	/* disable master interrupt before clearing iir  */
	de_ier = I915_READ(DEIER);
	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
675
	POSTING_READ(DEIER);
676

677 678
	de_iir = I915_READ(DEIIR);
	gt_iir = I915_READ(GTIIR);
679
	pch_iir = I915_READ(SDEIIR);
680
	pm_iir = I915_READ(GEN6_PMIIR);
681

682 683
	if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 &&
	    (!IS_GEN6(dev) || pm_iir == 0))
684
		goto done;
685

686 687 688 689 690
	if (HAS_PCH_CPT(dev))
		hotplug_mask = SDE_HOTPLUG_MASK_CPT;
	else
		hotplug_mask = SDE_HOTPLUG_MASK;

691
	ret = IRQ_HANDLED;
692

693 694 695 696
	if (IS_GEN5(dev))
		ilk_gt_irq_handler(dev, dev_priv, gt_iir);
	else
		snb_gt_irq_handler(dev, dev_priv, gt_iir);
697

698
	if (de_iir & DE_GSE)
699
		intel_opregion_gse_intr(dev);
700

701
	if (de_iir & DE_PLANEA_FLIP_DONE) {
702
		intel_prepare_page_flip(dev, 0);
703
		intel_finish_page_flip_plane(dev, 0);
704
	}
705

706
	if (de_iir & DE_PLANEB_FLIP_DONE) {
707
		intel_prepare_page_flip(dev, 1);
708
		intel_finish_page_flip_plane(dev, 1);
709
	}
710

711
	if (de_iir & DE_PIPEA_VBLANK)
712 713
		drm_handle_vblank(dev, 0);

714
	if (de_iir & DE_PIPEB_VBLANK)
715 716
		drm_handle_vblank(dev, 1);

717
	/* check event from PCH */
718 719 720
	if (de_iir & DE_PCH_EVENT) {
		if (pch_iir & hotplug_mask)
			queue_work(dev_priv->wq, &dev_priv->hotplug_work);
721
		pch_irq_handler(dev, pch_iir);
722
	}
723

724
	if (de_iir & DE_PCU_EVENT) {
725
		I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
726 727 728
		i915_handle_rps_change(dev);
	}

729 730
	if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
		gen6_queue_rps_work(dev_priv, pm_iir);
731

732 733 734 735
	/* should clear PCH hotplug event before clear CPU irq */
	I915_WRITE(SDEIIR, pch_iir);
	I915_WRITE(GTIIR, gt_iir);
	I915_WRITE(DEIIR, de_iir);
736
	I915_WRITE(GEN6_PMIIR, pm_iir);
737 738

done:
739
	I915_WRITE(DEIER, de_ier);
740
	POSTING_READ(DEIER);
741

742 743 744
	return ret;
}

745 746 747 748 749 750 751 752 753 754 755 756
/**
 * i915_error_work_func - do process context error handling work
 * @work: work struct
 *
 * Fire an error uevent so userspace can see that a hang or error
 * was detected.
 */
static void i915_error_work_func(struct work_struct *work)
{
	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
						    error_work);
	struct drm_device *dev = dev_priv->dev;
757 758 759
	char *error_event[] = { "ERROR=1", NULL };
	char *reset_event[] = { "RESET=1", NULL };
	char *reset_done_event[] = { "ERROR=0", NULL };
760

761 762
	kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);

763
	if (atomic_read(&dev_priv->mm.wedged)) {
764 765
		DRM_DEBUG_DRIVER("resetting chip\n");
		kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
766
		if (!i915_reset(dev)) {
767 768
			atomic_set(&dev_priv->mm.wedged, 0);
			kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
769
		}
770
		complete_all(&dev_priv->error_completion);
771
	}
772 773
}

774
#ifdef CONFIG_DEBUG_FS
775
static struct drm_i915_error_object *
776
i915_error_object_create(struct drm_i915_private *dev_priv,
777
			 struct drm_i915_gem_object *src)
778 779 780
{
	struct drm_i915_error_object *dst;
	int page, page_count;
781
	u32 reloc_offset;
782

783
	if (src == NULL || src->pages == NULL)
784 785
		return NULL;

786
	page_count = src->base.size / PAGE_SIZE;
787

788
	dst = kmalloc(sizeof(*dst) + page_count * sizeof(u32 *), GFP_ATOMIC);
789 790 791
	if (dst == NULL)
		return NULL;

792
	reloc_offset = src->gtt_offset;
793
	for (page = 0; page < page_count; page++) {
794
		unsigned long flags;
795
		void *d;
796

797
		d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
798 799
		if (d == NULL)
			goto unwind;
800

801
		local_irq_save(flags);
802 803
		if (reloc_offset < dev_priv->mm.gtt_mappable_end &&
		    src->has_global_gtt_mapping) {
804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825
			void __iomem *s;

			/* Simply ignore tiling or any overlapping fence.
			 * It's part of the error state, and this hopefully
			 * captures what the GPU read.
			 */

			s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
						     reloc_offset);
			memcpy_fromio(d, s, PAGE_SIZE);
			io_mapping_unmap_atomic(s);
		} else {
			void *s;

			drm_clflush_pages(&src->pages[page], 1);

			s = kmap_atomic(src->pages[page]);
			memcpy(d, s, PAGE_SIZE);
			kunmap_atomic(s);

			drm_clflush_pages(&src->pages[page], 1);
		}
826
		local_irq_restore(flags);
827

828
		dst->pages[page] = d;
829 830

		reloc_offset += PAGE_SIZE;
831 832
	}
	dst->page_count = page_count;
833
	dst->gtt_offset = src->gtt_offset;
834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857

	return dst;

unwind:
	while (page--)
		kfree(dst->pages[page]);
	kfree(dst);
	return NULL;
}

static void
i915_error_object_free(struct drm_i915_error_object *obj)
{
	int page;

	if (obj == NULL)
		return;

	for (page = 0; page < obj->page_count; page++)
		kfree(obj->pages[page]);

	kfree(obj);
}

858 859
void
i915_error_state_free(struct kref *error_ref)
860
{
861 862
	struct drm_i915_error_state *error = container_of(error_ref,
							  typeof(*error), ref);
863 864
	int i;

865 866 867 868 869
	for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
		i915_error_object_free(error->ring[i].batchbuffer);
		i915_error_object_free(error->ring[i].ringbuffer);
		kfree(error->ring[i].requests);
	}
870

871
	kfree(error->active_bo);
872
	kfree(error->overlay);
873 874
	kfree(error);
}
875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895
static void capture_bo(struct drm_i915_error_buffer *err,
		       struct drm_i915_gem_object *obj)
{
	err->size = obj->base.size;
	err->name = obj->base.name;
	err->seqno = obj->last_rendering_seqno;
	err->gtt_offset = obj->gtt_offset;
	err->read_domains = obj->base.read_domains;
	err->write_domain = obj->base.write_domain;
	err->fence_reg = obj->fence_reg;
	err->pinned = 0;
	if (obj->pin_count > 0)
		err->pinned = 1;
	if (obj->user_pin_count > 0)
		err->pinned = -1;
	err->tiling = obj->tiling_mode;
	err->dirty = obj->dirty;
	err->purgeable = obj->madv != I915_MADV_WILLNEED;
	err->ring = obj->ring ? obj->ring->id : -1;
	err->cache_level = obj->cache_level;
}
896

897 898
static u32 capture_active_bo(struct drm_i915_error_buffer *err,
			     int count, struct list_head *head)
899 900 901 902 903
{
	struct drm_i915_gem_object *obj;
	int i = 0;

	list_for_each_entry(obj, head, mm_list) {
904
		capture_bo(err++, obj);
905 906
		if (++i == count)
			break;
907 908 909 910 911 912 913 914 915 916 917 918 919 920
	}

	return i;
}

static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
			     int count, struct list_head *head)
{
	struct drm_i915_gem_object *obj;
	int i = 0;

	list_for_each_entry(obj, head, gtt_list) {
		if (obj->pin_count == 0)
			continue;
921

922 923 924
		capture_bo(err++, obj);
		if (++i == count)
			break;
925 926 927 928 929
	}

	return i;
}

930 931 932 933 934 935 936 937
static void i915_gem_record_fences(struct drm_device *dev,
				   struct drm_i915_error_state *error)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int i;

	/* Fences */
	switch (INTEL_INFO(dev)->gen) {
938
	case 7:
939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959
	case 6:
		for (i = 0; i < 16; i++)
			error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
		break;
	case 5:
	case 4:
		for (i = 0; i < 16; i++)
			error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
		break;
	case 3:
		if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
			for (i = 0; i < 8; i++)
				error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
	case 2:
		for (i = 0; i < 8; i++)
			error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
		break;

	}
}

960 961 962 963 964 965 966 967 968 969 970 971 972 973 974
static struct drm_i915_error_object *
i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
			     struct intel_ring_buffer *ring)
{
	struct drm_i915_gem_object *obj;
	u32 seqno;

	if (!ring->get_seqno)
		return NULL;

	seqno = ring->get_seqno(ring);
	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
		if (obj->ring != ring)
			continue;

975
		if (i915_seqno_passed(seqno, obj->last_rendering_seqno))
976 977 978 979 980 981 982 983 984 985 986 987 988 989
			continue;

		if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
			continue;

		/* We need to copy these to an anonymous buffer as the simplest
		 * method to avoid being overwritten by userspace.
		 */
		return i915_error_object_create(dev_priv, obj);
	}

	return NULL;
}

990 991 992 993 994 995
static void i915_record_ring_state(struct drm_device *dev,
				   struct drm_i915_error_state *error,
				   struct intel_ring_buffer *ring)
{
	struct drm_i915_private *dev_priv = dev->dev_private;

996 997
	if (INTEL_INFO(dev)->gen >= 6) {
		error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
998 999 1000 1001
		error->semaphore_mboxes[ring->id][0]
			= I915_READ(RING_SYNC_0(ring->mmio_base));
		error->semaphore_mboxes[ring->id][1]
			= I915_READ(RING_SYNC_1(ring->mmio_base));
1002
	}
1003

1004
	if (INTEL_INFO(dev)->gen >= 4) {
1005
		error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
1006 1007 1008
		error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
		error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
		error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
1009
		error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
1010 1011 1012 1013 1014
		if (ring->id == RCS) {
			error->instdone1 = I915_READ(INSTDONE1);
			error->bbaddr = I915_READ64(BB_ADDR);
		}
	} else {
1015
		error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
1016 1017 1018 1019 1020
		error->ipeir[ring->id] = I915_READ(IPEIR);
		error->ipehr[ring->id] = I915_READ(IPEHR);
		error->instdone[ring->id] = I915_READ(INSTDONE);
	}

B
Ben Widawsky 已提交
1021
	error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
1022
	error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
1023 1024
	error->seqno[ring->id] = ring->get_seqno(ring);
	error->acthd[ring->id] = intel_ring_get_active_head(ring);
1025 1026
	error->head[ring->id] = I915_READ_HEAD(ring);
	error->tail[ring->id] = I915_READ_TAIL(ring);
1027 1028 1029

	error->cpu_ring_head[ring->id] = ring->head;
	error->cpu_ring_tail[ring->id] = ring->tail;
1030 1031
}

1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072
static void i915_gem_record_rings(struct drm_device *dev,
				  struct drm_i915_error_state *error)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct drm_i915_gem_request *request;
	int i, count;

	for (i = 0; i < I915_NUM_RINGS; i++) {
		struct intel_ring_buffer *ring = &dev_priv->ring[i];

		if (ring->obj == NULL)
			continue;

		i915_record_ring_state(dev, error, ring);

		error->ring[i].batchbuffer =
			i915_error_first_batchbuffer(dev_priv, ring);

		error->ring[i].ringbuffer =
			i915_error_object_create(dev_priv, ring->obj);

		count = 0;
		list_for_each_entry(request, &ring->request_list, list)
			count++;

		error->ring[i].num_requests = count;
		error->ring[i].requests =
			kmalloc(count*sizeof(struct drm_i915_error_request),
				GFP_ATOMIC);
		if (error->ring[i].requests == NULL) {
			error->ring[i].num_requests = 0;
			continue;
		}

		count = 0;
		list_for_each_entry(request, &ring->request_list, list) {
			struct drm_i915_error_request *erq;

			erq = &error->ring[i].requests[count++];
			erq->seqno = request->seqno;
			erq->jiffies = request->emitted_jiffies;
1073
			erq->tail = request->tail;
1074 1075 1076 1077
		}
	}
}

1078 1079 1080 1081 1082 1083 1084 1085 1086
/**
 * i915_capture_error_state - capture an error record for later analysis
 * @dev: drm device
 *
 * Should be called when an error is detected (either a hang or an error
 * interrupt) to capture error state from the time of the error.  Fills
 * out a structure which becomes available in debugfs for user level tools
 * to pick up.
 */
1087 1088 1089
static void i915_capture_error_state(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
1090
	struct drm_i915_gem_object *obj;
1091 1092
	struct drm_i915_error_state *error;
	unsigned long flags;
1093
	int i, pipe;
1094 1095

	spin_lock_irqsave(&dev_priv->error_lock, flags);
1096 1097 1098 1099
	error = dev_priv->first_error;
	spin_unlock_irqrestore(&dev_priv->error_lock, flags);
	if (error)
		return;
1100

1101
	/* Account for pipe specific data like PIPE*STAT */
1102
	error = kzalloc(sizeof(*error), GFP_ATOMIC);
1103
	if (!error) {
1104 1105
		DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
		return;
1106 1107
	}

1108 1109
	DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n",
		 dev->primary->index);
1110

1111
	kref_init(&error->ref);
1112 1113
	error->eir = I915_READ(EIR);
	error->pgtbl_er = I915_READ(PGTBL_ER);
1114 1115 1116 1117 1118 1119 1120 1121 1122 1123

	if (HAS_PCH_SPLIT(dev))
		error->ier = I915_READ(DEIER) | I915_READ(GTIER);
	else if (IS_VALLEYVIEW(dev))
		error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
	else if (IS_GEN2(dev))
		error->ier = I915_READ16(IER);
	else
		error->ier = I915_READ(IER);

1124 1125
	for_each_pipe(pipe)
		error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
1126

1127
	if (INTEL_INFO(dev)->gen >= 6) {
1128
		error->error = I915_READ(ERROR_GEN6);
1129 1130
		error->done_reg = I915_READ(DONE_REG);
	}
1131

1132
	i915_gem_record_fences(dev, error);
1133
	i915_gem_record_rings(dev, error);
1134

1135
	/* Record buffers on the active and pinned lists. */
1136
	error->active_bo = NULL;
1137
	error->pinned_bo = NULL;
1138

1139 1140 1141 1142
	i = 0;
	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
		i++;
	error->active_bo_count = i;
1143 1144 1145
	list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list)
		if (obj->pin_count)
			i++;
1146
	error->pinned_bo_count = i - error->active_bo_count;
1147

1148 1149
	error->active_bo = NULL;
	error->pinned_bo = NULL;
1150 1151
	if (i) {
		error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
1152
					   GFP_ATOMIC);
1153 1154 1155
		if (error->active_bo)
			error->pinned_bo =
				error->active_bo + error->active_bo_count;
1156 1157
	}

1158 1159
	if (error->active_bo)
		error->active_bo_count =
1160 1161 1162
			capture_active_bo(error->active_bo,
					  error->active_bo_count,
					  &dev_priv->mm.active_list);
1163 1164 1165

	if (error->pinned_bo)
		error->pinned_bo_count =
1166 1167 1168
			capture_pinned_bo(error->pinned_bo,
					  error->pinned_bo_count,
					  &dev_priv->mm.gtt_list);
1169

1170 1171
	do_gettimeofday(&error->time);

1172
	error->overlay = intel_overlay_capture_error_state(dev);
1173
	error->display = intel_display_capture_error_state(dev);
1174

1175 1176 1177 1178 1179
	spin_lock_irqsave(&dev_priv->error_lock, flags);
	if (dev_priv->first_error == NULL) {
		dev_priv->first_error = error;
		error = NULL;
	}
1180
	spin_unlock_irqrestore(&dev_priv->error_lock, flags);
1181 1182

	if (error)
1183
		i915_error_state_free(&error->ref);
1184 1185 1186 1187 1188 1189
}

void i915_destroy_error_state(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct drm_i915_error_state *error;
1190
	unsigned long flags;
1191

1192
	spin_lock_irqsave(&dev_priv->error_lock, flags);
1193 1194
	error = dev_priv->first_error;
	dev_priv->first_error = NULL;
1195
	spin_unlock_irqrestore(&dev_priv->error_lock, flags);
1196 1197

	if (error)
1198
		kref_put(&error->ref, i915_error_state_free);
1199
}
1200 1201 1202
#else
#define i915_capture_error_state(x)
#endif
1203

1204
static void i915_report_and_clear_eir(struct drm_device *dev)
1205 1206 1207
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	u32 eir = I915_READ(EIR);
1208
	int pipe;
1209

1210 1211
	if (!eir)
		return;
1212

1213
	pr_err("render error detected, EIR: 0x%08x\n", eir);
1214 1215 1216 1217 1218

	if (IS_G4X(dev)) {
		if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
			u32 ipeir = I915_READ(IPEIR_I965);

1219 1220 1221
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
			pr_err("  INSTDONE: 0x%08x\n",
1222
			       I915_READ(INSTDONE_I965));
1223 1224 1225
			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
			pr_err("  INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1));
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1226
			I915_WRITE(IPEIR_I965, ipeir);
1227
			POSTING_READ(IPEIR_I965);
1228 1229 1230
		}
		if (eir & GM45_ERROR_PAGE_TABLE) {
			u32 pgtbl_err = I915_READ(PGTBL_ER);
1231 1232
			pr_err("page table error\n");
			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
1233
			I915_WRITE(PGTBL_ER, pgtbl_err);
1234
			POSTING_READ(PGTBL_ER);
1235 1236 1237
		}
	}

1238
	if (!IS_GEN2(dev)) {
1239 1240
		if (eir & I915_ERROR_PAGE_TABLE) {
			u32 pgtbl_err = I915_READ(PGTBL_ER);
1241 1242
			pr_err("page table error\n");
			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
1243
			I915_WRITE(PGTBL_ER, pgtbl_err);
1244
			POSTING_READ(PGTBL_ER);
1245 1246 1247 1248
		}
	}

	if (eir & I915_ERROR_MEMORY_REFRESH) {
1249
		pr_err("memory refresh error:\n");
1250
		for_each_pipe(pipe)
1251
			pr_err("pipe %c stat: 0x%08x\n",
1252
			       pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
1253 1254 1255
		/* pipestat has already been acked */
	}
	if (eir & I915_ERROR_INSTRUCTION) {
1256 1257
		pr_err("instruction error\n");
		pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
1258
		if (INTEL_INFO(dev)->gen < 4) {
1259 1260
			u32 ipeir = I915_READ(IPEIR);

1261 1262 1263 1264
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
			pr_err("  INSTDONE: 0x%08x\n", I915_READ(INSTDONE));
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
1265
			I915_WRITE(IPEIR, ipeir);
1266
			POSTING_READ(IPEIR);
1267 1268 1269
		} else {
			u32 ipeir = I915_READ(IPEIR_I965);

1270 1271 1272
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
			pr_err("  INSTDONE: 0x%08x\n",
1273
			       I915_READ(INSTDONE_I965));
1274 1275 1276
			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
			pr_err("  INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1));
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1277
			I915_WRITE(IPEIR_I965, ipeir);
1278
			POSTING_READ(IPEIR_I965);
1279 1280 1281 1282
		}
	}

	I915_WRITE(EIR, eir);
1283
	POSTING_READ(EIR);
1284 1285 1286 1287 1288 1289 1290 1291 1292 1293
	eir = I915_READ(EIR);
	if (eir) {
		/*
		 * some errors might have become stuck,
		 * mask them.
		 */
		DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
		I915_WRITE(EMR, I915_READ(EMR) | eir);
		I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
	}
1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305
}

/**
 * i915_handle_error - handle an error interrupt
 * @dev: drm device
 *
 * Do some basic checking of regsiter state at error interrupt time and
 * dump it to the syslog.  Also call i915_capture_error_state() to make
 * sure we get a record and make it available in debugfs.  Fire a uevent
 * so userspace knows something bad happened (should trigger collection
 * of a ring dump etc.).
 */
1306
void i915_handle_error(struct drm_device *dev, bool wedged)
1307 1308 1309 1310 1311
{
	struct drm_i915_private *dev_priv = dev->dev_private;

	i915_capture_error_state(dev);
	i915_report_and_clear_eir(dev);
1312

1313
	if (wedged) {
1314
		INIT_COMPLETION(dev_priv->error_completion);
1315 1316
		atomic_set(&dev_priv->mm.wedged, 1);

1317 1318 1319
		/*
		 * Wakeup waiting processes so they don't hang
		 */
1320
		wake_up_all(&dev_priv->ring[RCS].irq_queue);
1321
		if (HAS_BSD(dev))
1322
			wake_up_all(&dev_priv->ring[VCS].irq_queue);
1323
		if (HAS_BLT(dev))
1324
			wake_up_all(&dev_priv->ring[BCS].irq_queue);
1325 1326
	}

1327
	queue_work(dev_priv->wq, &dev_priv->error_work);
1328 1329
}

1330 1331 1332 1333 1334
static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1335
	struct drm_i915_gem_object *obj;
1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353
	struct intel_unpin_work *work;
	unsigned long flags;
	bool stall_detected;

	/* Ignore early vblank irqs */
	if (intel_crtc == NULL)
		return;

	spin_lock_irqsave(&dev->event_lock, flags);
	work = intel_crtc->unpin_work;

	if (work == NULL || work->pending || !work->enable_stall_check) {
		/* Either the pending flip IRQ arrived, or we're too early. Don't check */
		spin_unlock_irqrestore(&dev->event_lock, flags);
		return;
	}

	/* Potential stall - if we see that the flip has happened, assume a missed interrupt */
1354
	obj = work->pending_flip_obj;
1355
	if (INTEL_INFO(dev)->gen >= 4) {
1356
		int dspsurf = DSPSURF(intel_crtc->plane);
1357 1358
		stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
					obj->gtt_offset;
1359
	} else {
1360
		int dspaddr = DSPADDR(intel_crtc->plane);
1361
		stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
1362
							crtc->y * crtc->fb->pitches[0] +
1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373
							crtc->x * crtc->fb->bits_per_pixel/8);
	}

	spin_unlock_irqrestore(&dev->event_lock, flags);

	if (stall_detected) {
		DRM_DEBUG_DRIVER("Pageflip stall detected\n");
		intel_prepare_page_flip(dev, intel_crtc->plane);
	}
}

1374 1375 1376
/* Called from drm generic code, passed 'crtc' which
 * we use as a pipe index
 */
1377
static int i915_enable_vblank(struct drm_device *dev, int pipe)
1378 1379
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1380
	unsigned long irqflags;
1381

1382
	if (!i915_pipe_enabled(dev, pipe))
1383
		return -EINVAL;
1384

1385
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1386
	if (INTEL_INFO(dev)->gen >= 4)
1387 1388
		i915_enable_pipestat(dev_priv, pipe,
				     PIPE_START_VBLANK_INTERRUPT_ENABLE);
1389
	else
1390 1391
		i915_enable_pipestat(dev_priv, pipe,
				     PIPE_VBLANK_INTERRUPT_ENABLE);
1392 1393 1394

	/* maintain vblank delivery even in deep C-states */
	if (dev_priv->info->gen == 3)
1395
		I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
1396
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1397

1398 1399 1400
	return 0;
}

1401
static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
1402 1403 1404 1405 1406 1407 1408 1409 1410
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
	unsigned long irqflags;

	if (!i915_pipe_enabled(dev, pipe))
		return -EINVAL;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
	ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
1411
				    DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
1412 1413 1414 1415 1416
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);

	return 0;
}

1417
static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
1418 1419 1420 1421 1422 1423 1424 1425
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
	unsigned long irqflags;

	if (!i915_pipe_enabled(dev, pipe))
		return -EINVAL;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1426 1427
	ironlake_enable_display_irq(dev_priv,
				    DE_PIPEA_VBLANK_IVB << (5 * pipe));
1428 1429 1430 1431 1432
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);

	return 0;
}

J
Jesse Barnes 已提交
1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458
static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
	unsigned long irqflags;
	u32 dpfl, imr;

	if (!i915_pipe_enabled(dev, pipe))
		return -EINVAL;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
	dpfl = I915_READ(VLV_DPFLIPSTAT);
	imr = I915_READ(VLV_IMR);
	if (pipe == 0) {
		dpfl |= PIPEA_VBLANK_INT_EN;
		imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
	} else {
		dpfl |= PIPEA_VBLANK_INT_EN;
		imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
	}
	I915_WRITE(VLV_DPFLIPSTAT, dpfl);
	I915_WRITE(VLV_IMR, imr);
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);

	return 0;
}

1459 1460 1461
/* Called from drm generic code, passed 'crtc' which
 * we use as a pipe index
 */
1462
static void i915_disable_vblank(struct drm_device *dev, int pipe)
1463 1464
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1465
	unsigned long irqflags;
1466

1467
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1468
	if (dev_priv->info->gen == 3)
1469
		I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
1470

1471 1472 1473 1474 1475 1476
	i915_disable_pipestat(dev_priv, pipe,
			      PIPE_VBLANK_INTERRUPT_ENABLE |
			      PIPE_START_VBLANK_INTERRUPT_ENABLE);
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

1477
static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
1478 1479 1480 1481 1482 1483
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
	ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
1484
				     DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
1485
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1486 1487
}

1488
static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
1489 1490 1491 1492 1493
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1494 1495
	ironlake_disable_display_irq(dev_priv,
				     DE_PIPEA_VBLANK_IVB << (pipe * 5));
1496 1497 1498
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

J
Jesse Barnes 已提交
1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519
static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
	unsigned long irqflags;
	u32 dpfl, imr;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
	dpfl = I915_READ(VLV_DPFLIPSTAT);
	imr = I915_READ(VLV_IMR);
	if (pipe == 0) {
		dpfl &= ~PIPEA_VBLANK_INT_EN;
		imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
	} else {
		dpfl &= ~PIPEB_VBLANK_INT_EN;
		imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
	}
	I915_WRITE(VLV_IMR, imr);
	I915_WRITE(VLV_DPFLIPSTAT, dpfl);
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

1520 1521
static u32
ring_last_seqno(struct intel_ring_buffer *ring)
1522
{
1523 1524 1525 1526 1527 1528
	return list_entry(ring->request_list.prev,
			  struct drm_i915_gem_request, list)->seqno;
}

static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
{
B
Ben Widawsky 已提交
1529 1530 1531 1532 1533
	/* We don't check whether the ring even exists before calling this
	 * function. Hence check whether it's initialized. */
	if (ring->obj == NULL)
		return true;

1534 1535 1536
	if (list_empty(&ring->request_list) ||
	    i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) {
		/* Issue a wake-up to catch stuck h/w. */
B
Ben Widawsky 已提交
1537 1538 1539
		if (waitqueue_active(&ring->irq_queue)) {
			DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
				  ring->name);
1540 1541 1542 1543 1544 1545
			wake_up_all(&ring->irq_queue);
			*err = true;
		}
		return true;
	}
	return false;
B
Ben Gamari 已提交
1546 1547
}

1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561
static bool kick_ring(struct intel_ring_buffer *ring)
{
	struct drm_device *dev = ring->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	u32 tmp = I915_READ_CTL(ring);
	if (tmp & RING_WAIT) {
		DRM_ERROR("Kicking stuck wait on %s\n",
			  ring->name);
		I915_WRITE_CTL(ring, tmp);
		return true;
	}
	return false;
}

1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591
static bool i915_hangcheck_hung(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;

	if (dev_priv->hangcheck_count++ > 1) {
		DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
		i915_handle_error(dev, true);

		if (!IS_GEN2(dev)) {
			/* Is the chip hanging on a WAIT_FOR_EVENT?
			 * If so we can simply poke the RB_WAIT bit
			 * and break the hang. This should work on
			 * all but the second generation chipsets.
			 */
			if (kick_ring(&dev_priv->ring[RCS]))
				return false;

			if (HAS_BSD(dev) && kick_ring(&dev_priv->ring[VCS]))
				return false;

			if (HAS_BLT(dev) && kick_ring(&dev_priv->ring[BCS]))
				return false;
		}

		return true;
	}

	return false;
}

B
Ben Gamari 已提交
1592 1593 1594 1595 1596 1597 1598 1599 1600 1601
/**
 * This is called when the chip hasn't reported back with completed
 * batchbuffers in a long time. The first time this is called we simply record
 * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
 * again, we assume the chip is wedged and try to fix it.
 */
void i915_hangcheck_elapsed(unsigned long data)
{
	struct drm_device *dev = (struct drm_device *)data;
	drm_i915_private_t *dev_priv = dev->dev_private;
D
Daniel Vetter 已提交
1602
	uint32_t acthd, instdone, instdone1, acthd_bsd, acthd_blt;
1603 1604
	bool err = false;

1605 1606 1607
	if (!i915_enable_hangcheck)
		return;

1608
	/* If all work is done then ACTHD clearly hasn't advanced. */
1609 1610 1611
	if (i915_hangcheck_ring_idle(&dev_priv->ring[RCS], &err) &&
	    i915_hangcheck_ring_idle(&dev_priv->ring[VCS], &err) &&
	    i915_hangcheck_ring_idle(&dev_priv->ring[BCS], &err)) {
1612 1613 1614 1615
		if (err) {
			if (i915_hangcheck_hung(dev))
				return;

1616
			goto repeat;
1617 1618 1619
		}

		dev_priv->hangcheck_count = 0;
1620 1621
		return;
	}
1622

1623
	if (INTEL_INFO(dev)->gen < 4) {
1624 1625 1626 1627 1628 1629
		instdone = I915_READ(INSTDONE);
		instdone1 = 0;
	} else {
		instdone = I915_READ(INSTDONE_I965);
		instdone1 = I915_READ(INSTDONE1);
	}
D
Daniel Vetter 已提交
1630 1631 1632 1633 1634
	acthd = intel_ring_get_active_head(&dev_priv->ring[RCS]);
	acthd_bsd = HAS_BSD(dev) ?
		intel_ring_get_active_head(&dev_priv->ring[VCS]) : 0;
	acthd_blt = HAS_BLT(dev) ?
		intel_ring_get_active_head(&dev_priv->ring[BCS]) : 0;
B
Ben Gamari 已提交
1635

1636
	if (dev_priv->last_acthd == acthd &&
D
Daniel Vetter 已提交
1637 1638
	    dev_priv->last_acthd_bsd == acthd_bsd &&
	    dev_priv->last_acthd_blt == acthd_blt &&
1639 1640
	    dev_priv->last_instdone == instdone &&
	    dev_priv->last_instdone1 == instdone1) {
1641
		if (i915_hangcheck_hung(dev))
1642 1643 1644 1645 1646
			return;
	} else {
		dev_priv->hangcheck_count = 0;

		dev_priv->last_acthd = acthd;
D
Daniel Vetter 已提交
1647 1648
		dev_priv->last_acthd_bsd = acthd_bsd;
		dev_priv->last_acthd_blt = acthd_blt;
1649 1650 1651
		dev_priv->last_instdone = instdone;
		dev_priv->last_instdone1 = instdone1;
	}
B
Ben Gamari 已提交
1652

1653
repeat:
B
Ben Gamari 已提交
1654
	/* Reset timer case chip hangs without another request being added */
1655 1656
	mod_timer(&dev_priv->hangcheck_timer,
		  jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
B
Ben Gamari 已提交
1657 1658
}

L
Linus Torvalds 已提交
1659 1660
/* drm_dma.h hooks
*/
1661
static void ironlake_irq_preinstall(struct drm_device *dev)
1662 1663 1664
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;

1665 1666 1667
	atomic_set(&dev_priv->irq_received, 0);


1668
	I915_WRITE(HWSTAM, 0xeffe);
1669

1670 1671 1672 1673
	/* XXX hotplug from PCH */

	I915_WRITE(DEIMR, 0xffffffff);
	I915_WRITE(DEIER, 0x0);
1674
	POSTING_READ(DEIER);
1675 1676 1677 1678

	/* and GT */
	I915_WRITE(GTIMR, 0xffffffff);
	I915_WRITE(GTIER, 0x0);
1679
	POSTING_READ(GTIER);
1680 1681 1682 1683

	/* south display irq */
	I915_WRITE(SDEIMR, 0xffffffff);
	I915_WRITE(SDEIER, 0x0);
1684
	POSTING_READ(SDEIER);
1685 1686
}

J
Jesse Barnes 已提交
1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718
static void valleyview_irq_preinstall(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
	int pipe;

	atomic_set(&dev_priv->irq_received, 0);

	/* VLV magic */
	I915_WRITE(VLV_IMR, 0);
	I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
	I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
	I915_WRITE(RING_IMR(BLT_RING_BASE), 0);

	/* and GT */
	I915_WRITE(GTIIR, I915_READ(GTIIR));
	I915_WRITE(GTIIR, I915_READ(GTIIR));
	I915_WRITE(GTIMR, 0xffffffff);
	I915_WRITE(GTIER, 0x0);
	POSTING_READ(GTIER);

	I915_WRITE(DPINVGTT, 0xff);

	I915_WRITE(PORT_HOTPLUG_EN, 0);
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
	for_each_pipe(pipe)
		I915_WRITE(PIPESTAT(pipe), 0xffff);
	I915_WRITE(VLV_IIR, 0xffffffff);
	I915_WRITE(VLV_IMR, 0xffffffff);
	I915_WRITE(VLV_IER, 0x0);
	POSTING_READ(VLV_IER);
}

1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738
/*
 * Enable digital hotplug on the PCH, and configure the DP short pulse
 * duration to 2ms (which is the minimum in the Display Port spec)
 *
 * This register is the same on all known PCH chips.
 */

static void ironlake_enable_pch_hotplug(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
	u32	hotplug;

	hotplug = I915_READ(PCH_PORT_HOTPLUG);
	hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
}

1739
static int ironlake_irq_postinstall(struct drm_device *dev)
1740 1741 1742
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
	/* enable kind of interrupts always enabled */
1743 1744
	u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
			   DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
1745
	u32 render_irqs;
1746
	u32 hotplug_mask;
1747

1748
	dev_priv->irq_mask = ~display_mask;
1749 1750 1751

	/* should always can generate irq */
	I915_WRITE(DEIIR, I915_READ(DEIIR));
1752 1753
	I915_WRITE(DEIMR, dev_priv->irq_mask);
	I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
1754
	POSTING_READ(DEIER);
1755

1756
	dev_priv->gt_irq_mask = ~0;
1757 1758

	I915_WRITE(GTIIR, I915_READ(GTIIR));
1759
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1760

1761 1762 1763
	if (IS_GEN6(dev))
		render_irqs =
			GT_USER_INTERRUPT |
B
Ben Widawsky 已提交
1764 1765
			GEN6_BSD_USER_INTERRUPT |
			GEN6_BLITTER_USER_INTERRUPT;
1766 1767
	else
		render_irqs =
1768
			GT_USER_INTERRUPT |
1769
			GT_PIPE_NOTIFY |
1770 1771
			GT_BSD_USER_INTERRUPT;
	I915_WRITE(GTIER, render_irqs);
1772
	POSTING_READ(GTIER);
1773

1774
	if (HAS_PCH_CPT(dev)) {
1775 1776 1777 1778
		hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
				SDE_PORTB_HOTPLUG_CPT |
				SDE_PORTC_HOTPLUG_CPT |
				SDE_PORTD_HOTPLUG_CPT);
1779
	} else {
1780 1781 1782 1783 1784
		hotplug_mask = (SDE_CRT_HOTPLUG |
				SDE_PORTB_HOTPLUG |
				SDE_PORTC_HOTPLUG |
				SDE_PORTD_HOTPLUG |
				SDE_AUX_MASK);
1785 1786
	}

1787
	dev_priv->pch_irq_mask = ~hotplug_mask;
1788 1789

	I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1790 1791
	I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
	I915_WRITE(SDEIER, hotplug_mask);
1792
	POSTING_READ(SDEIER);
1793

1794 1795
	ironlake_enable_pch_hotplug(dev);

1796 1797 1798 1799 1800 1801 1802
	if (IS_IRONLAKE_M(dev)) {
		/* Clear & enable PCU event interrupts */
		I915_WRITE(DEIIR, DE_PCU_EVENT);
		I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
		ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
	}

1803 1804 1805
	return 0;
}

1806
static int ivybridge_irq_postinstall(struct drm_device *dev)
1807 1808 1809
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
	/* enable kind of interrupts always enabled */
1810 1811 1812 1813 1814
	u32 display_mask =
		DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
		DE_PLANEC_FLIP_DONE_IVB |
		DE_PLANEB_FLIP_DONE_IVB |
		DE_PLANEA_FLIP_DONE_IVB;
1815 1816 1817 1818 1819 1820 1821 1822
	u32 render_irqs;
	u32 hotplug_mask;

	dev_priv->irq_mask = ~display_mask;

	/* should always can generate irq */
	I915_WRITE(DEIIR, I915_READ(DEIIR));
	I915_WRITE(DEIMR, dev_priv->irq_mask);
1823 1824 1825 1826 1827
	I915_WRITE(DEIER,
		   display_mask |
		   DE_PIPEC_VBLANK_IVB |
		   DE_PIPEB_VBLANK_IVB |
		   DE_PIPEA_VBLANK_IVB);
1828 1829 1830 1831 1832 1833 1834
	POSTING_READ(DEIER);

	dev_priv->gt_irq_mask = ~0;

	I915_WRITE(GTIIR, I915_READ(GTIIR));
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);

B
Ben Widawsky 已提交
1835 1836
	render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
		GEN6_BLITTER_USER_INTERRUPT;
1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850
	I915_WRITE(GTIER, render_irqs);
	POSTING_READ(GTIER);

	hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
			SDE_PORTB_HOTPLUG_CPT |
			SDE_PORTC_HOTPLUG_CPT |
			SDE_PORTD_HOTPLUG_CPT);
	dev_priv->pch_irq_mask = ~hotplug_mask;

	I915_WRITE(SDEIIR, I915_READ(SDEIIR));
	I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
	I915_WRITE(SDEIER, hotplug_mask);
	POSTING_READ(SDEIER);

1851 1852
	ironlake_enable_pch_hotplug(dev);

1853 1854 1855
	return 0;
}

J
Jesse Barnes 已提交
1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891
static int valleyview_irq_postinstall(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
	u32 render_irqs;
	u32 enable_mask;
	u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
	u16 msid;

	enable_mask = I915_DISPLAY_PORT_INTERRUPT;
	enable_mask |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
		I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;

	dev_priv->irq_mask = ~enable_mask;

	dev_priv->pipestat[0] = 0;
	dev_priv->pipestat[1] = 0;

	/* Hack for broken MSIs on VLV */
	pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000);
	pci_read_config_word(dev->pdev, 0x98, &msid);
	msid &= 0xff; /* mask out delivery bits */
	msid |= (1<<14);
	pci_write_config_word(dev_priv->dev->pdev, 0x98, msid);

	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
	I915_WRITE(VLV_IER, enable_mask);
	I915_WRITE(VLV_IIR, 0xffffffff);
	I915_WRITE(PIPESTAT(0), 0xffff);
	I915_WRITE(PIPESTAT(1), 0xffff);
	POSTING_READ(VLV_IER);

	I915_WRITE(VLV_IIR, 0xffffffff);
	I915_WRITE(VLV_IIR, 0xffffffff);

	render_irqs = GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT |
		GT_GEN6_BLT_CS_ERROR_INTERRUPT |
B
Ben Widawsky 已提交
1892
		GT_GEN6_BLT_USER_INTERRUPT |
J
Jesse Barnes 已提交
1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960
		GT_GEN6_BSD_USER_INTERRUPT |
		GT_GEN6_BSD_CS_ERROR_INTERRUPT |
		GT_GEN7_L3_PARITY_ERROR_INTERRUPT |
		GT_PIPE_NOTIFY |
		GT_RENDER_CS_ERROR_INTERRUPT |
		GT_SYNC_STATUS |
		GT_USER_INTERRUPT;

	dev_priv->gt_irq_mask = ~render_irqs;

	I915_WRITE(GTIIR, I915_READ(GTIIR));
	I915_WRITE(GTIIR, I915_READ(GTIIR));
	I915_WRITE(GTIMR, 0);
	I915_WRITE(GTIER, render_irqs);
	POSTING_READ(GTIER);

	/* ack & enable invalid PTE error interrupts */
#if 0 /* FIXME: add support to irq handler for checking these bits */
	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
	I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
#endif

	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
#if 0 /* FIXME: check register definitions; some have moved */
	/* Note HDMI and DP share bits */
	if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
		hotplug_en |= HDMIB_HOTPLUG_INT_EN;
	if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
		hotplug_en |= HDMIC_HOTPLUG_INT_EN;
	if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
		hotplug_en |= HDMID_HOTPLUG_INT_EN;
	if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
		hotplug_en |= SDVOC_HOTPLUG_INT_EN;
	if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
		hotplug_en |= SDVOB_HOTPLUG_INT_EN;
	if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
		hotplug_en |= CRT_HOTPLUG_INT_EN;
		hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
	}
#endif

	I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);

	return 0;
}

static void valleyview_irq_uninstall(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
	int pipe;

	if (!dev_priv)
		return;

	for_each_pipe(pipe)
		I915_WRITE(PIPESTAT(pipe), 0xffff);

	I915_WRITE(HWSTAM, 0xffffffff);
	I915_WRITE(PORT_HOTPLUG_EN, 0);
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
	for_each_pipe(pipe)
		I915_WRITE(PIPESTAT(pipe), 0xffff);
	I915_WRITE(VLV_IIR, 0xffffffff);
	I915_WRITE(VLV_IMR, 0xffffffff);
	I915_WRITE(VLV_IER, 0x0);
	POSTING_READ(VLV_IER);
}

1961
static void ironlake_irq_uninstall(struct drm_device *dev)
1962 1963
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1964 1965 1966 1967

	if (!dev_priv)
		return;

1968 1969 1970 1971 1972 1973 1974 1975 1976
	I915_WRITE(HWSTAM, 0xffffffff);

	I915_WRITE(DEIMR, 0xffffffff);
	I915_WRITE(DEIER, 0x0);
	I915_WRITE(DEIIR, I915_READ(DEIIR));

	I915_WRITE(GTIMR, 0xffffffff);
	I915_WRITE(GTIER, 0x0);
	I915_WRITE(GTIIR, I915_READ(GTIIR));
1977 1978 1979 1980

	I915_WRITE(SDEIMR, 0xffffffff);
	I915_WRITE(SDEIER, 0x0);
	I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1981 1982
}

1983
static void i8xx_irq_preinstall(struct drm_device * dev)
L
Linus Torvalds 已提交
1984 1985
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1986
	int pipe;
1987

1988
	atomic_set(&dev_priv->irq_received, 0);
1989

1990 1991
	for_each_pipe(pipe)
		I915_WRITE(PIPESTAT(pipe), 0);
1992 1993 1994
	I915_WRITE16(IMR, 0xffff);
	I915_WRITE16(IER, 0x0);
	POSTING_READ16(IER);
C
Chris Wilson 已提交
1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074
}

static int i8xx_irq_postinstall(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;

	dev_priv->pipestat[0] = 0;
	dev_priv->pipestat[1] = 0;

	I915_WRITE16(EMR,
		     ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));

	/* Unmask the interrupts that we always want on. */
	dev_priv->irq_mask =
		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
		  I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
	I915_WRITE16(IMR, dev_priv->irq_mask);

	I915_WRITE16(IER,
		     I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		     I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
		     I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
		     I915_USER_INTERRUPT);
	POSTING_READ16(IER);

	return 0;
}

static irqreturn_t i8xx_irq_handler(DRM_IRQ_ARGS)
{
	struct drm_device *dev = (struct drm_device *) arg;
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
	u16 iir, new_iir;
	u32 pipe_stats[2];
	unsigned long irqflags;
	int irq_received;
	int pipe;
	u16 flip_mask =
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;

	atomic_inc(&dev_priv->irq_received);

	iir = I915_READ16(IIR);
	if (iir == 0)
		return IRQ_NONE;

	while (iir & ~flip_mask) {
		/* Can't rely on pipestat interrupt bit in iir as it might
		 * have been cleared after the pipestat interrupt was received.
		 * It doesn't set the bit in iir again, but it still produces
		 * interrupts (for non-MSI).
		 */
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
			i915_handle_error(dev, false);

		for_each_pipe(pipe) {
			int reg = PIPESTAT(pipe);
			pipe_stats[pipe] = I915_READ(reg);

			/*
			 * Clear the PIPE*STAT regs before the IIR
			 */
			if (pipe_stats[pipe] & 0x8000ffff) {
				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
					DRM_DEBUG_DRIVER("pipe %c underrun\n",
							 pipe_name(pipe));
				I915_WRITE(reg, pipe_stats[pipe]);
				irq_received = 1;
			}
		}
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);

		I915_WRITE16(IIR, iir & ~flip_mask);
		new_iir = I915_READ16(IIR); /* Flush posted writes */

2075
		i915_update_dri1_breadcrumb(dev);
C
Chris Wilson 已提交
2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118

		if (iir & I915_USER_INTERRUPT)
			notify_ring(dev, &dev_priv->ring[RCS]);

		if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
		    drm_handle_vblank(dev, 0)) {
			if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
				intel_prepare_page_flip(dev, 0);
				intel_finish_page_flip(dev, 0);
				flip_mask &= ~I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT;
			}
		}

		if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
		    drm_handle_vblank(dev, 1)) {
			if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) {
				intel_prepare_page_flip(dev, 1);
				intel_finish_page_flip(dev, 1);
				flip_mask &= ~I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
			}
		}

		iir = new_iir;
	}

	return IRQ_HANDLED;
}

static void i8xx_irq_uninstall(struct drm_device * dev)
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
	int pipe;

	for_each_pipe(pipe) {
		/* Clear enable bits; then clear status bits */
		I915_WRITE(PIPESTAT(pipe), 0);
		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
	}
	I915_WRITE16(IMR, 0xffff);
	I915_WRITE16(IER, 0x0);
	I915_WRITE16(IIR, I915_READ16(IIR));
}

2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130
static void i915_irq_preinstall(struct drm_device * dev)
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
	int pipe;

	atomic_set(&dev_priv->irq_received, 0);

	if (I915_HAS_HOTPLUG(dev)) {
		I915_WRITE(PORT_HOTPLUG_EN, 0);
		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
	}

2131
	I915_WRITE16(HWSTAM, 0xeffe);
2132 2133 2134 2135 2136 2137 2138 2139 2140 2141
	for_each_pipe(pipe)
		I915_WRITE(PIPESTAT(pipe), 0);
	I915_WRITE(IMR, 0xffffffff);
	I915_WRITE(IER, 0x0);
	POSTING_READ(IER);
}

static int i915_irq_postinstall(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2142
	u32 enable_mask;
2143 2144 2145 2146

	dev_priv->pipestat[0] = 0;
	dev_priv->pipestat[1] = 0;

2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164
	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));

	/* Unmask the interrupts that we always want on. */
	dev_priv->irq_mask =
		~(I915_ASLE_INTERRUPT |
		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
		  I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);

	enable_mask =
		I915_ASLE_INTERRUPT |
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
		I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
		I915_USER_INTERRUPT;

2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207
	if (I915_HAS_HOTPLUG(dev)) {
		/* Enable in IER... */
		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
		/* and unmask in IMR */
		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
	}

	I915_WRITE(IMR, dev_priv->irq_mask);
	I915_WRITE(IER, enable_mask);
	POSTING_READ(IER);

	if (I915_HAS_HOTPLUG(dev)) {
		u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);

		if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
			hotplug_en |= HDMIB_HOTPLUG_INT_EN;
		if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
			hotplug_en |= HDMIC_HOTPLUG_INT_EN;
		if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
			hotplug_en |= HDMID_HOTPLUG_INT_EN;
		if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
			hotplug_en |= SDVOC_HOTPLUG_INT_EN;
		if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
			hotplug_en |= SDVOB_HOTPLUG_INT_EN;
		if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
			hotplug_en |= CRT_HOTPLUG_INT_EN;
			hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
		}

		/* Ignore TV since it's buggy */

		I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
	}

	intel_opregion_enable_asle(dev);

	return 0;
}

static irqreturn_t i915_irq_handler(DRM_IRQ_ARGS)
{
	struct drm_device *dev = (struct drm_device *) arg;
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2208
	u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
2209
	unsigned long irqflags;
2210 2211 2212 2213 2214 2215 2216 2217
	u32 flip_mask =
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
	u32 flip[2] = {
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT,
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
	};
	int pipe, ret = IRQ_NONE;
2218 2219 2220 2221

	atomic_inc(&dev_priv->irq_received);

	iir = I915_READ(IIR);
2222 2223
	do {
		bool irq_received = (iir & ~flip_mask) != 0;
2224
		bool blc_event = false;
2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238

		/* Can't rely on pipestat interrupt bit in iir as it might
		 * have been cleared after the pipestat interrupt was received.
		 * It doesn't set the bit in iir again, but it still produces
		 * interrupts (for non-MSI).
		 */
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
			i915_handle_error(dev, false);

		for_each_pipe(pipe) {
			int reg = PIPESTAT(pipe);
			pipe_stats[pipe] = I915_READ(reg);

2239
			/* Clear the PIPE*STAT regs before the IIR */
2240 2241 2242 2243 2244
			if (pipe_stats[pipe] & 0x8000ffff) {
				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
					DRM_DEBUG_DRIVER("pipe %c underrun\n",
							 pipe_name(pipe));
				I915_WRITE(reg, pipe_stats[pipe]);
2245
				irq_received = true;
2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264
			}
		}
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);

		if (!irq_received)
			break;

		/* Consume port.  Then clear IIR or we'll miss events */
		if ((I915_HAS_HOTPLUG(dev)) &&
		    (iir & I915_DISPLAY_PORT_INTERRUPT)) {
			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);

			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
				  hotplug_status);
			if (hotplug_status & dev_priv->hotplug_supported_mask)
				queue_work(dev_priv->wq,
					   &dev_priv->hotplug_work);

			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2265
			POSTING_READ(PORT_HOTPLUG_STAT);
2266 2267
		}

2268
		I915_WRITE(IIR, iir & ~flip_mask);
2269 2270 2271 2272 2273 2274
		new_iir = I915_READ(IIR); /* Flush posted writes */

		if (iir & I915_USER_INTERRUPT)
			notify_ring(dev, &dev_priv->ring[RCS]);

		for_each_pipe(pipe) {
2275 2276 2277
			int plane = pipe;
			if (IS_MOBILE(dev))
				plane = !plane;
2278
			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
2279
			    drm_handle_vblank(dev, pipe)) {
2280 2281 2282 2283 2284
				if (iir & flip[plane]) {
					intel_prepare_page_flip(dev, plane);
					intel_finish_page_flip(dev, pipe);
					flip_mask &= ~flip[plane];
				}
2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308
			}

			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
				blc_event = true;
		}

		if (blc_event || (iir & I915_ASLE_INTERRUPT))
			intel_opregion_asle_intr(dev);

		/* With MSI, interrupts are only generated when iir
		 * transitions from zero to nonzero.  If another bit got
		 * set while we were handling the existing iir bits, then
		 * we would never get another interrupt.
		 *
		 * This is fine on non-MSI as well, as if we hit this path
		 * we avoid exiting the interrupt handler only to generate
		 * another one.
		 *
		 * Note that for MSI this could cause a stray interrupt report
		 * if an interrupt landed in the time between writing IIR and
		 * the posting read.  This should be rare enough to never
		 * trigger the 99% of 100,000 interrupts test for disabling
		 * stray interrupts.
		 */
2309
		ret = IRQ_HANDLED;
2310
		iir = new_iir;
2311
	} while (iir & ~flip_mask);
2312

2313
	i915_update_dri1_breadcrumb(dev);
2314

2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327
	return ret;
}

static void i915_irq_uninstall(struct drm_device * dev)
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
	int pipe;

	if (I915_HAS_HOTPLUG(dev)) {
		I915_WRITE(PORT_HOTPLUG_EN, 0);
		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
	}

2328
	I915_WRITE16(HWSTAM, 0xffff);
2329 2330
	for_each_pipe(pipe) {
		/* Clear enable bits; then clear status bits */
2331
		I915_WRITE(PIPESTAT(pipe), 0);
2332 2333
		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
	}
2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362
	I915_WRITE(IMR, 0xffffffff);
	I915_WRITE(IER, 0x0);

	I915_WRITE(IIR, I915_READ(IIR));
}

static void i965_irq_preinstall(struct drm_device * dev)
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
	int pipe;

	atomic_set(&dev_priv->irq_received, 0);

	if (I915_HAS_HOTPLUG(dev)) {
		I915_WRITE(PORT_HOTPLUG_EN, 0);
		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
	}

	I915_WRITE(HWSTAM, 0xeffe);
	for_each_pipe(pipe)
		I915_WRITE(PIPESTAT(pipe), 0);
	I915_WRITE(IMR, 0xffffffff);
	I915_WRITE(IER, 0x0);
	POSTING_READ(IER);
}

static int i965_irq_postinstall(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2363
	u32 enable_mask;
2364 2365 2366
	u32 error_mask;

	/* Unmask the interrupts that we always want on. */
2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378
	dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
			       I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
			       I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
			       I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
			       I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
			       I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);

	enable_mask = ~dev_priv->irq_mask;
	enable_mask |= I915_USER_INTERRUPT;

	if (IS_G4X(dev))
		enable_mask |= I915_BSD_USER_INTERRUPT;
2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459

	dev_priv->pipestat[0] = 0;
	dev_priv->pipestat[1] = 0;

	if (I915_HAS_HOTPLUG(dev)) {
		/* Enable in IER... */
		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
		/* and unmask in IMR */
		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
	}

	/*
	 * Enable some error detection, note the instruction error mask
	 * bit is reserved, so we leave it masked.
	 */
	if (IS_G4X(dev)) {
		error_mask = ~(GM45_ERROR_PAGE_TABLE |
			       GM45_ERROR_MEM_PRIV |
			       GM45_ERROR_CP_PRIV |
			       I915_ERROR_MEMORY_REFRESH);
	} else {
		error_mask = ~(I915_ERROR_PAGE_TABLE |
			       I915_ERROR_MEMORY_REFRESH);
	}
	I915_WRITE(EMR, error_mask);

	I915_WRITE(IMR, dev_priv->irq_mask);
	I915_WRITE(IER, enable_mask);
	POSTING_READ(IER);

	if (I915_HAS_HOTPLUG(dev)) {
		u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);

		/* Note HDMI and DP share bits */
		if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
			hotplug_en |= HDMIB_HOTPLUG_INT_EN;
		if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
			hotplug_en |= HDMIC_HOTPLUG_INT_EN;
		if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
			hotplug_en |= HDMID_HOTPLUG_INT_EN;
		if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
			hotplug_en |= SDVOC_HOTPLUG_INT_EN;
		if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
			hotplug_en |= SDVOB_HOTPLUG_INT_EN;
		if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
			hotplug_en |= CRT_HOTPLUG_INT_EN;

			/* Programming the CRT detection parameters tends
			   to generate a spurious hotplug event about three
			   seconds later.  So just do it once.
			*/
			if (IS_G4X(dev))
				hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
			hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
		}

		/* Ignore TV since it's buggy */

		I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
	}

	intel_opregion_enable_asle(dev);

	return 0;
}

static irqreturn_t i965_irq_handler(DRM_IRQ_ARGS)
{
	struct drm_device *dev = (struct drm_device *) arg;
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
	u32 iir, new_iir;
	u32 pipe_stats[I915_MAX_PIPES];
	unsigned long irqflags;
	int irq_received;
	int ret = IRQ_NONE, pipe;

	atomic_inc(&dev_priv->irq_received);

	iir = I915_READ(IIR);

	for (;;) {
2460 2461
		bool blc_event = false;

2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517
		irq_received = iir != 0;

		/* Can't rely on pipestat interrupt bit in iir as it might
		 * have been cleared after the pipestat interrupt was received.
		 * It doesn't set the bit in iir again, but it still produces
		 * interrupts (for non-MSI).
		 */
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
			i915_handle_error(dev, false);

		for_each_pipe(pipe) {
			int reg = PIPESTAT(pipe);
			pipe_stats[pipe] = I915_READ(reg);

			/*
			 * Clear the PIPE*STAT regs before the IIR
			 */
			if (pipe_stats[pipe] & 0x8000ffff) {
				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
					DRM_DEBUG_DRIVER("pipe %c underrun\n",
							 pipe_name(pipe));
				I915_WRITE(reg, pipe_stats[pipe]);
				irq_received = 1;
			}
		}
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);

		if (!irq_received)
			break;

		ret = IRQ_HANDLED;

		/* Consume port.  Then clear IIR or we'll miss events */
		if ((I915_HAS_HOTPLUG(dev)) &&
		    (iir & I915_DISPLAY_PORT_INTERRUPT)) {
			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);

			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
				  hotplug_status);
			if (hotplug_status & dev_priv->hotplug_supported_mask)
				queue_work(dev_priv->wq,
					   &dev_priv->hotplug_work);

			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
			I915_READ(PORT_HOTPLUG_STAT);
		}

		I915_WRITE(IIR, iir);
		new_iir = I915_READ(IIR); /* Flush posted writes */

		if (iir & I915_USER_INTERRUPT)
			notify_ring(dev, &dev_priv->ring[RCS]);
		if (iir & I915_BSD_USER_INTERRUPT)
			notify_ring(dev, &dev_priv->ring[VCS]);

2518
		if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
2519 2520
			intel_prepare_page_flip(dev, 0);

2521
		if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT)
2522 2523 2524
			intel_prepare_page_flip(dev, 1);

		for_each_pipe(pipe) {
2525
			if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
2526
			    drm_handle_vblank(dev, pipe)) {
2527 2528
				i915_pageflip_stall_check(dev, pipe);
				intel_finish_page_flip(dev, pipe);
2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556
			}

			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
				blc_event = true;
		}


		if (blc_event || (iir & I915_ASLE_INTERRUPT))
			intel_opregion_asle_intr(dev);

		/* With MSI, interrupts are only generated when iir
		 * transitions from zero to nonzero.  If another bit got
		 * set while we were handling the existing iir bits, then
		 * we would never get another interrupt.
		 *
		 * This is fine on non-MSI as well, as if we hit this path
		 * we avoid exiting the interrupt handler only to generate
		 * another one.
		 *
		 * Note that for MSI this could cause a stray interrupt report
		 * if an interrupt landed in the time between writing IIR and
		 * the posting read.  This should be rare enough to never
		 * trigger the 99% of 100,000 interrupts test for disabling
		 * stray interrupts.
		 */
		iir = new_iir;
	}

2557
	i915_update_dri1_breadcrumb(dev);
2558

2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586
	return ret;
}

static void i965_irq_uninstall(struct drm_device * dev)
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
	int pipe;

	if (!dev_priv)
		return;

	if (I915_HAS_HOTPLUG(dev)) {
		I915_WRITE(PORT_HOTPLUG_EN, 0);
		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
	}

	I915_WRITE(HWSTAM, 0xffffffff);
	for_each_pipe(pipe)
		I915_WRITE(PIPESTAT(pipe), 0);
	I915_WRITE(IMR, 0xffffffff);
	I915_WRITE(IER, 0x0);

	for_each_pipe(pipe)
		I915_WRITE(PIPESTAT(pipe),
			   I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
	I915_WRITE(IIR, I915_READ(IIR));
}

2587 2588
void intel_irq_init(struct drm_device *dev)
{
2589 2590 2591 2592 2593 2594
	struct drm_i915_private *dev_priv = dev->dev_private;

	INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
	INIT_WORK(&dev_priv->error_work, i915_error_work_func);
	INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);

2595 2596
	dev->driver->get_vblank_counter = i915_get_vblank_counter;
	dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
J
Jesse Barnes 已提交
2597 2598
	if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev) ||
	    IS_VALLEYVIEW(dev)) {
2599 2600 2601 2602
		dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
		dev->driver->get_vblank_counter = gm45_get_vblank_counter;
	}

2603 2604 2605 2606
	if (drm_core_check_feature(dev, DRIVER_MODESET))
		dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
	else
		dev->driver->get_vblank_timestamp = NULL;
2607 2608
	dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;

J
Jesse Barnes 已提交
2609 2610 2611 2612 2613 2614 2615 2616
	if (IS_VALLEYVIEW(dev)) {
		dev->driver->irq_handler = valleyview_irq_handler;
		dev->driver->irq_preinstall = valleyview_irq_preinstall;
		dev->driver->irq_postinstall = valleyview_irq_postinstall;
		dev->driver->irq_uninstall = valleyview_irq_uninstall;
		dev->driver->enable_vblank = valleyview_enable_vblank;
		dev->driver->disable_vblank = valleyview_disable_vblank;
	} else if (IS_IVYBRIDGE(dev)) {
2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631
		/* Share pre & uninstall handlers with ILK/SNB */
		dev->driver->irq_handler = ivybridge_irq_handler;
		dev->driver->irq_preinstall = ironlake_irq_preinstall;
		dev->driver->irq_postinstall = ivybridge_irq_postinstall;
		dev->driver->irq_uninstall = ironlake_irq_uninstall;
		dev->driver->enable_vblank = ivybridge_enable_vblank;
		dev->driver->disable_vblank = ivybridge_disable_vblank;
	} else if (HAS_PCH_SPLIT(dev)) {
		dev->driver->irq_handler = ironlake_irq_handler;
		dev->driver->irq_preinstall = ironlake_irq_preinstall;
		dev->driver->irq_postinstall = ironlake_irq_postinstall;
		dev->driver->irq_uninstall = ironlake_irq_uninstall;
		dev->driver->enable_vblank = ironlake_enable_vblank;
		dev->driver->disable_vblank = ironlake_disable_vblank;
	} else {
C
Chris Wilson 已提交
2632 2633 2634 2635 2636
		if (INTEL_INFO(dev)->gen == 2) {
			dev->driver->irq_preinstall = i8xx_irq_preinstall;
			dev->driver->irq_postinstall = i8xx_irq_postinstall;
			dev->driver->irq_handler = i8xx_irq_handler;
			dev->driver->irq_uninstall = i8xx_irq_uninstall;
2637
		} else if (INTEL_INFO(dev)->gen == 3) {
2638 2639 2640
			/* IIR "flip pending" means done if this bit is set */
			I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));

2641 2642 2643 2644
			dev->driver->irq_preinstall = i915_irq_preinstall;
			dev->driver->irq_postinstall = i915_irq_postinstall;
			dev->driver->irq_uninstall = i915_irq_uninstall;
			dev->driver->irq_handler = i915_irq_handler;
C
Chris Wilson 已提交
2645
		} else {
2646 2647 2648 2649
			dev->driver->irq_preinstall = i965_irq_preinstall;
			dev->driver->irq_postinstall = i965_irq_postinstall;
			dev->driver->irq_uninstall = i965_irq_uninstall;
			dev->driver->irq_handler = i965_irq_handler;
C
Chris Wilson 已提交
2650
		}
2651 2652 2653 2654
		dev->driver->enable_vblank = i915_enable_vblank;
		dev->driver->disable_vblank = i915_disable_vblank;
	}
}