i915_irq.c 114.7 KB
Newer Older
D
Dave Airlie 已提交
1
/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
L
Linus Torvalds 已提交
2
 */
D
Dave Airlie 已提交
3
/*
L
Linus Torvalds 已提交
4 5
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
 * All Rights Reserved.
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
D
Dave Airlie 已提交
27
 */
L
Linus Torvalds 已提交
28

29 30
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

31
#include <linux/circ_buf.h>
32 33 34
#include <linux/slab.h>
#include <linux/sysrq.h>

35
#include <drm/drm_drv.h>
36 37
#include <drm/drm_irq.h>

38
#include "display/intel_display_types.h"
39 40 41 42 43
#include "display/intel_fifo_underrun.h"
#include "display/intel_hotplug.h"
#include "display/intel_lpe_audio.h"
#include "display/intel_psr.h"

44
#include "gt/intel_gt.h"
45
#include "gt/intel_gt_irq.h"
46
#include "gt/intel_gt_pm_irq.h"
47
#include "gt/intel_rps.h"
48

L
Linus Torvalds 已提交
49
#include "i915_drv.h"
50
#include "i915_irq.h"
C
Chris Wilson 已提交
51
#include "i915_trace.h"
52
#include "intel_pm.h"
L
Linus Torvalds 已提交
53

54 55 56 57 58 59 60 61
/**
 * DOC: interrupt handling
 *
 * These functions provide the basic support for enabling and disabling the
 * interrupt handling support. There's a lot more functionality in i915_irq.c
 * and related files, but that will be described in separate chapters.
 */

62 63
typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val);

64 65 66 67
static const u32 hpd_ilk[HPD_NUM_PINS] = {
	[HPD_PORT_A] = DE_DP_A_HOTPLUG,
};

68 69 70 71
static const u32 hpd_ivb[HPD_NUM_PINS] = {
	[HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
};

72 73 74 75
static const u32 hpd_bdw[HPD_NUM_PINS] = {
	[HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
};

76
static const u32 hpd_ibx[HPD_NUM_PINS] = {
77 78 79 80
	[HPD_CRT] = SDE_CRT_HOTPLUG,
	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
81
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG,
82 83
};

84
static const u32 hpd_cpt[HPD_NUM_PINS] = {
85
	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
86
	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
87 88
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
89
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
90 91
};

X
Xiong Zhang 已提交
92
static const u32 hpd_spt[HPD_NUM_PINS] = {
93
	[HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
X
Xiong Zhang 已提交
94 95 96
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
97
	[HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT,
X
Xiong Zhang 已提交
98 99
};

100
static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
101 102 103 104 105
	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
106
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN,
107 108
};

109
static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
110 111 112 113 114
	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
115
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
116 117
};

118
static const u32 hpd_status_i915[HPD_NUM_PINS] = {
119 120 121 122 123
	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
124
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
125 126
};

127 128
/* BXT hpd list */
static const u32 hpd_bxt[HPD_NUM_PINS] = {
129
	[HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
130
	[HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
131
	[HPD_PORT_C] = BXT_DE_PORT_HP_DDIC,
132 133
};

134 135 136 137
static const u32 hpd_gen11[HPD_NUM_PINS] = {
	[HPD_PORT_C] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG,
	[HPD_PORT_D] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG,
	[HPD_PORT_E] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG,
138
	[HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG,
139 140
};

141 142 143 144 145 146
static const u32 hpd_gen12[HPD_NUM_PINS] = {
	[HPD_PORT_D] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG,
	[HPD_PORT_E] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG,
	[HPD_PORT_F] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG,
	[HPD_PORT_G] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG,
	[HPD_PORT_H] = GEN12_TC5_HOTPLUG | GEN12_TBT5_HOTPLUG,
147
	[HPD_PORT_I] = GEN12_TC6_HOTPLUG | GEN12_TBT6_HOTPLUG,
148 149
};

150
static const u32 hpd_icp[HPD_NUM_PINS] = {
151 152 153 154 155 156
	[HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(PORT_A),
	[HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(PORT_B),
	[HPD_PORT_C] = SDE_TC_HOTPLUG_ICP(PORT_TC1),
	[HPD_PORT_D] = SDE_TC_HOTPLUG_ICP(PORT_TC2),
	[HPD_PORT_E] = SDE_TC_HOTPLUG_ICP(PORT_TC3),
	[HPD_PORT_F] = SDE_TC_HOTPLUG_ICP(PORT_TC4),
157 158
};

159
static const u32 hpd_tgp[HPD_NUM_PINS] = {
160 161 162 163 164 165 166 167 168
	[HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(PORT_A),
	[HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(PORT_B),
	[HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(PORT_C),
	[HPD_PORT_D] = SDE_TC_HOTPLUG_ICP(PORT_TC1),
	[HPD_PORT_E] = SDE_TC_HOTPLUG_ICP(PORT_TC2),
	[HPD_PORT_F] = SDE_TC_HOTPLUG_ICP(PORT_TC3),
	[HPD_PORT_G] = SDE_TC_HOTPLUG_ICP(PORT_TC4),
	[HPD_PORT_H] = SDE_TC_HOTPLUG_ICP(PORT_TC5),
	[HPD_PORT_I] = SDE_TC_HOTPLUG_ICP(PORT_TC6),
169 170
};

171 172 173 174 175 176 177 178
static void
intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
{
	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);

	drm_crtc_handle_vblank(&crtc->base);
}

179 180
void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
		    i915_reg_t iir, i915_reg_t ier)
181
{
182 183
	intel_uncore_write(uncore, imr, 0xffffffff);
	intel_uncore_posting_read(uncore, imr);
184

185
	intel_uncore_write(uncore, ier, 0);
186 187

	/* IIR can theoretically queue up two events. Be paranoid. */
188 189 190 191
	intel_uncore_write(uncore, iir, 0xffffffff);
	intel_uncore_posting_read(uncore, iir);
	intel_uncore_write(uncore, iir, 0xffffffff);
	intel_uncore_posting_read(uncore, iir);
192 193
}

194
void gen2_irq_reset(struct intel_uncore *uncore)
195
{
196 197
	intel_uncore_write16(uncore, GEN2_IMR, 0xffff);
	intel_uncore_posting_read16(uncore, GEN2_IMR);
198

199
	intel_uncore_write16(uncore, GEN2_IER, 0);
200 201

	/* IIR can theoretically queue up two events. Be paranoid. */
202 203 204 205
	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
	intel_uncore_posting_read16(uncore, GEN2_IIR);
	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
	intel_uncore_posting_read16(uncore, GEN2_IIR);
206 207
}

208 209 210
/*
 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
 */
211
static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
212
{
213
	u32 val = intel_uncore_read(uncore, reg);
214 215 216 217

	if (val == 0)
		return;

218 219 220
	drm_WARN(&uncore->i915->drm, 1,
		 "Interrupt register 0x%x is not zero: 0x%08x\n",
		 i915_mmio_reg_offset(reg), val);
221 222 223 224
	intel_uncore_write(uncore, reg, 0xffffffff);
	intel_uncore_posting_read(uncore, reg);
	intel_uncore_write(uncore, reg, 0xffffffff);
	intel_uncore_posting_read(uncore, reg);
225
}
226

227
static void gen2_assert_iir_is_zero(struct intel_uncore *uncore)
228
{
229
	u16 val = intel_uncore_read16(uncore, GEN2_IIR);
230 231 232 233

	if (val == 0)
		return;

234 235 236
	drm_WARN(&uncore->i915->drm, 1,
		 "Interrupt register 0x%x is not zero: 0x%08x\n",
		 i915_mmio_reg_offset(GEN2_IIR), val);
237 238 239 240
	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
	intel_uncore_posting_read16(uncore, GEN2_IIR);
	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
	intel_uncore_posting_read16(uncore, GEN2_IIR);
241 242
}

243 244 245 246
void gen3_irq_init(struct intel_uncore *uncore,
		   i915_reg_t imr, u32 imr_val,
		   i915_reg_t ier, u32 ier_val,
		   i915_reg_t iir)
247
{
248
	gen3_assert_iir_is_zero(uncore, iir);
249

250 251 252
	intel_uncore_write(uncore, ier, ier_val);
	intel_uncore_write(uncore, imr, imr_val);
	intel_uncore_posting_read(uncore, imr);
253 254
}

255 256
void gen2_irq_init(struct intel_uncore *uncore,
		   u32 imr_val, u32 ier_val)
257
{
258
	gen2_assert_iir_is_zero(uncore);
259

260 261 262
	intel_uncore_write16(uncore, GEN2_IER, ier_val);
	intel_uncore_write16(uncore, GEN2_IMR, imr_val);
	intel_uncore_posting_read16(uncore, GEN2_IMR);
263 264
}

265 266 267
/* For display hotplug interrupt */
static inline void
i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
268 269
				     u32 mask,
				     u32 bits)
270
{
271
	u32 val;
272

273
	lockdep_assert_held(&dev_priv->irq_lock);
274
	drm_WARN_ON(&dev_priv->drm, bits & ~mask);
275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294

	val = I915_READ(PORT_HOTPLUG_EN);
	val &= ~mask;
	val |= bits;
	I915_WRITE(PORT_HOTPLUG_EN, val);
}

/**
 * i915_hotplug_interrupt_update - update hotplug interrupt enable
 * @dev_priv: driver private
 * @mask: bits to update
 * @bits: bits to enable
 * NOTE: the HPD enable bits are modified both inside and outside
 * of an interrupt context. To avoid that read-modify-write cycles
 * interfer, these bits are protected by a spinlock. Since this
 * function is usually not called from a context where the lock is
 * held already, this function acquires the lock itself. A non-locking
 * version is also available.
 */
void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
295 296
				   u32 mask,
				   u32 bits)
297 298 299 300 301 302
{
	spin_lock_irq(&dev_priv->irq_lock);
	i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
	spin_unlock_irq(&dev_priv->irq_lock);
}

303 304 305 306 307 308
/**
 * ilk_update_display_irq - update DEIMR
 * @dev_priv: driver private
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
309
void ilk_update_display_irq(struct drm_i915_private *dev_priv,
310 311
			    u32 interrupt_mask,
			    u32 enabled_irq_mask)
312
{
313
	u32 new_val;
314

315
	lockdep_assert_held(&dev_priv->irq_lock);
316

317
	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
318

319
	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
320 321
		return;

322 323 324 325 326 327
	new_val = dev_priv->irq_mask;
	new_val &= ~interrupt_mask;
	new_val |= (~enabled_irq_mask & interrupt_mask);

	if (new_val != dev_priv->irq_mask) {
		dev_priv->irq_mask = new_val;
328
		I915_WRITE(DEIMR, dev_priv->irq_mask);
329
		POSTING_READ(DEIMR);
330 331 332
	}
}

333
/**
334 335 336 337 338
 * bdw_update_port_irq - update DE port interrupt
 * @dev_priv: driver private
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
339
static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
340 341
				u32 interrupt_mask,
				u32 enabled_irq_mask)
342
{
343 344
	u32 new_val;
	u32 old_val;
345

346
	lockdep_assert_held(&dev_priv->irq_lock);
347

348
	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
349

350
	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
351 352 353 354 355 356 357 358 359 360 361 362 363 364
		return;

	old_val = I915_READ(GEN8_DE_PORT_IMR);

	new_val = old_val;
	new_val &= ~interrupt_mask;
	new_val |= (~enabled_irq_mask & interrupt_mask);

	if (new_val != old_val) {
		I915_WRITE(GEN8_DE_PORT_IMR, new_val);
		POSTING_READ(GEN8_DE_PORT_IMR);
	}
}

365 366 367 368 369 370 371 372 373
/**
 * bdw_update_pipe_irq - update DE pipe interrupt
 * @dev_priv: driver private
 * @pipe: pipe whose interrupt to update
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
			 enum pipe pipe,
374 375
			 u32 interrupt_mask,
			 u32 enabled_irq_mask)
376
{
377
	u32 new_val;
378

379
	lockdep_assert_held(&dev_priv->irq_lock);
380

381
	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
382

383
	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
384 385 386 387 388 389 390 391 392 393 394 395 396
		return;

	new_val = dev_priv->de_irq_mask[pipe];
	new_val &= ~interrupt_mask;
	new_val |= (~enabled_irq_mask & interrupt_mask);

	if (new_val != dev_priv->de_irq_mask[pipe]) {
		dev_priv->de_irq_mask[pipe] = new_val;
		I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
		POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
	}
}

397 398 399 400 401 402
/**
 * ibx_display_interrupt_update - update SDEIMR
 * @dev_priv: driver private
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
403
void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
404 405
				  u32 interrupt_mask,
				  u32 enabled_irq_mask)
406
{
407
	u32 sdeimr = I915_READ(SDEIMR);
408 409 410
	sdeimr &= ~interrupt_mask;
	sdeimr |= (~enabled_irq_mask & interrupt_mask);

411
	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
412

413
	lockdep_assert_held(&dev_priv->irq_lock);
414

415
	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
416 417
		return;

418 419 420
	I915_WRITE(SDEIMR, sdeimr);
	POSTING_READ(SDEIMR);
}
421

422 423
u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
			      enum pipe pipe)
424
{
425 426
	u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
	u32 enable_mask = status_mask << 16;
427

428
	lockdep_assert_held(&dev_priv->irq_lock);
429

430 431
	if (INTEL_GEN(dev_priv) < 5)
		goto out;
432 433

	/*
434 435
	 * On pipe A we don't support the PSR interrupt yet,
	 * on pipe B and C the same bit MBZ.
436
	 */
437 438
	if (drm_WARN_ON_ONCE(&dev_priv->drm,
			     status_mask & PIPE_A_PSR_STATUS_VLV))
439
		return 0;
440 441 442 443
	/*
	 * On pipe B and C we don't support the PSR interrupt yet, on pipe
	 * A the same bit is for perf counters which we don't use either.
	 */
444 445
	if (drm_WARN_ON_ONCE(&dev_priv->drm,
			     status_mask & PIPE_B_PSR_STATUS_VLV))
446
		return 0;
447 448 449 450 451 452 453 454 455

	enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
			 SPRITE0_FLIP_DONE_INT_EN_VLV |
			 SPRITE1_FLIP_DONE_INT_EN_VLV);
	if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
		enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
	if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
		enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;

456
out:
457 458 459 460 461
	drm_WARN_ONCE(&dev_priv->drm,
		      enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
		      pipe_name(pipe), enable_mask, status_mask);
462

463 464 465
	return enable_mask;
}

466 467
void i915_enable_pipestat(struct drm_i915_private *dev_priv,
			  enum pipe pipe, u32 status_mask)
468
{
469
	i915_reg_t reg = PIPESTAT(pipe);
470 471
	u32 enable_mask;

472 473 474
	drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
		      "pipe %c: status_mask=0x%x\n",
		      pipe_name(pipe), status_mask);
475 476

	lockdep_assert_held(&dev_priv->irq_lock);
477
	drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
478 479 480 481 482 483 484 485 486

	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
		return;

	dev_priv->pipestat_irq_mask[pipe] |= status_mask;
	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);

	I915_WRITE(reg, enable_mask | status_mask);
	POSTING_READ(reg);
487 488
}

489 490
void i915_disable_pipestat(struct drm_i915_private *dev_priv,
			   enum pipe pipe, u32 status_mask)
491
{
492
	i915_reg_t reg = PIPESTAT(pipe);
493 494
	u32 enable_mask;

495 496 497
	drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
		      "pipe %c: status_mask=0x%x\n",
		      pipe_name(pipe), status_mask);
498 499

	lockdep_assert_held(&dev_priv->irq_lock);
500
	drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
501 502 503 504 505 506 507 508 509

	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
		return;

	dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);

	I915_WRITE(reg, enable_mask | status_mask);
	POSTING_READ(reg);
510 511
}

512 513 514 515 516 517 518 519
static bool i915_has_asle(struct drm_i915_private *dev_priv)
{
	if (!dev_priv->opregion.asle)
		return false;

	return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
}

520
/**
521
 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
522
 * @dev_priv: i915 device private
523
 */
524
static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
525
{
526
	if (!i915_has_asle(dev_priv))
527 528
		return;

529
	spin_lock_irq(&dev_priv->irq_lock);
530

531
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
532
	if (INTEL_GEN(dev_priv) >= 4)
533
		i915_enable_pipestat(dev_priv, PIPE_A,
534
				     PIPE_LEGACY_BLC_EVENT_STATUS);
535

536
	spin_unlock_irq(&dev_priv->irq_lock);
537 538
}

539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588
/*
 * This timing diagram depicts the video signal in and
 * around the vertical blanking period.
 *
 * Assumptions about the fictitious mode used in this example:
 *  vblank_start >= 3
 *  vsync_start = vblank_start + 1
 *  vsync_end = vblank_start + 2
 *  vtotal = vblank_start + 3
 *
 *           start of vblank:
 *           latch double buffered registers
 *           increment frame counter (ctg+)
 *           generate start of vblank interrupt (gen4+)
 *           |
 *           |          frame start:
 *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
 *           |          may be shifted forward 1-3 extra lines via PIPECONF
 *           |          |
 *           |          |  start of vsync:
 *           |          |  generate vsync interrupt
 *           |          |  |
 * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
 *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
 * ----va---> <-----------------vb--------------------> <--------va-------------
 *       |          |       <----vs----->                     |
 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
 *       |          |                                         |
 *       last visible pixel                                   first visible pixel
 *                  |                                         increment frame counter (gen3/4)
 *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
 *
 * x  = horizontal active
 * _  = horizontal blanking
 * hs = horizontal sync
 * va = vertical active
 * vb = vertical blanking
 * vs = vertical sync
 * vbs = vblank_start (number)
 *
 * Summary:
 * - most events happen at the start of horizontal sync
 * - frame start happens at the start of horizontal blank, 1-4 lines
 *   (depending on PIPECONF settings) after the start of vblank
 * - gen3/4 pixel and frame counter are synchronized with the start
 *   of horizontal active on the first line of vertical active
 */

589 590 591
/* Called from drm generic code, passed a 'crtc', which
 * we use as a pipe index
 */
592
u32 i915_get_vblank_counter(struct drm_crtc *crtc)
593
{
594 595
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
596
	const struct drm_display_mode *mode = &vblank->hwmode;
597
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
598
	i915_reg_t high_frame, low_frame;
599
	u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
600
	unsigned long irqflags;
601

602 603 604 605 606 607 608 609 610 611 612 613 614 615
	/*
	 * On i965gm TV output the frame counter only works up to
	 * the point when we enable the TV encoder. After that the
	 * frame counter ceases to work and reads zero. We need a
	 * vblank wait before enabling the TV encoder and so we
	 * have to enable vblank interrupts while the frame counter
	 * is still in a working state. However the core vblank code
	 * does not like us returning non-zero frame counter values
	 * when we've told it that we don't have a working frame
	 * counter. Thus we must stop non-zero values leaking out.
	 */
	if (!vblank->max_vblank_count)
		return 0;

616 617 618 619 620
	htotal = mode->crtc_htotal;
	hsync_start = mode->crtc_hsync_start;
	vbl_start = mode->crtc_vblank_start;
	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
		vbl_start = DIV_ROUND_UP(vbl_start, 2);
621

622 623 624 625 626 627
	/* Convert to pixel count */
	vbl_start *= htotal;

	/* Start of vblank event occurs at start of hsync */
	vbl_start -= htotal - hsync_start;

628 629
	high_frame = PIPEFRAME(pipe);
	low_frame = PIPEFRAMEPIXEL(pipe);
630

631 632
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);

633 634 635 636 637 638
	/*
	 * High & low register fields aren't synchronized, so make sure
	 * we get a low value that's stable across two reads of the high
	 * register.
	 */
	do {
639 640 641
		high1 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK;
		low   = intel_de_read_fw(dev_priv, low_frame);
		high2 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK;
642 643
	} while (high1 != high2);

644 645
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);

646
	high1 >>= PIPE_FRAME_HIGH_SHIFT;
647
	pixel = low & PIPE_PIXEL_MASK;
648
	low >>= PIPE_FRAME_LOW_SHIFT;
649 650 651 652 653 654

	/*
	 * The frame counter increments at beginning of active.
	 * Cook up a vblank counter by also checking the pixel
	 * counter against vblank start.
	 */
655
	return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
656 657
}

658
u32 g4x_get_vblank_counter(struct drm_crtc *crtc)
659
{
660 661
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
662

663
	return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
664 665
}

666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697
/*
 * On certain encoders on certain platforms, pipe
 * scanline register will not work to get the scanline,
 * since the timings are driven from the PORT or issues
 * with scanline register updates.
 * This function will use Framestamp and current
 * timestamp registers to calculate the scanline.
 */
static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
{
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
	struct drm_vblank_crtc *vblank =
		&crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
	const struct drm_display_mode *mode = &vblank->hwmode;
	u32 vblank_start = mode->crtc_vblank_start;
	u32 vtotal = mode->crtc_vtotal;
	u32 htotal = mode->crtc_htotal;
	u32 clock = mode->crtc_clock;
	u32 scanline, scan_prev_time, scan_curr_time, scan_post_time;

	/*
	 * To avoid the race condition where we might cross into the
	 * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR
	 * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR
	 * during the same frame.
	 */
	do {
		/*
		 * This field provides read back of the display
		 * pipe frame time stamp. The time stamp value
		 * is sampled at every start of vertical blank.
		 */
698 699
		scan_prev_time = intel_de_read_fw(dev_priv,
						  PIPE_FRMTMSTMP(crtc->pipe));
700 701 702 703 704

		/*
		 * The TIMESTAMP_CTR register has the current
		 * time stamp value.
		 */
705
		scan_curr_time = intel_de_read_fw(dev_priv, IVB_TIMESTAMP_CTR);
706

707 708
		scan_post_time = intel_de_read_fw(dev_priv,
						  PIPE_FRMTMSTMP(crtc->pipe));
709 710 711 712 713 714 715 716 717 718
	} while (scan_post_time != scan_prev_time);

	scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
					clock), 1000 * htotal);
	scanline = min(scanline, vtotal - 1);
	scanline = (scanline + vblank_start) % vtotal;

	return scanline;
}

719 720 721 722
/*
 * intel_de_read_fw(), only for fast reads of display block, no need for
 * forcewake etc.
 */
723 724 725
static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
{
	struct drm_device *dev = crtc->base.dev;
726
	struct drm_i915_private *dev_priv = to_i915(dev);
727 728
	const struct drm_display_mode *mode;
	struct drm_vblank_crtc *vblank;
729
	enum pipe pipe = crtc->pipe;
730
	int position, vtotal;
731

732 733 734
	if (!crtc->active)
		return -1;

735 736 737
	vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
	mode = &vblank->hwmode;

738 739 740
	if (mode->private_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP)
		return __intel_get_crtc_scanline_from_timestamp(crtc);

741
	vtotal = mode->crtc_vtotal;
742 743 744
	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
		vtotal /= 2;

745
	if (IS_GEN(dev_priv, 2))
746
		position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
747
	else
748
		position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
749

750 751 752 753 754 755 756 757 758 759 760 761
	/*
	 * On HSW, the DSL reg (0x70000) appears to return 0 if we
	 * read it just before the start of vblank.  So try it again
	 * so we don't accidentally end up spanning a vblank frame
	 * increment, causing the pipe_update_end() code to squak at us.
	 *
	 * The nature of this problem means we can't simply check the ISR
	 * bit and return the vblank start value; nor can we use the scanline
	 * debug register in the transcoder as it appears to have the same
	 * problem.  We may need to extend this to include other platforms,
	 * but so far testing only shows the problem on HSW.
	 */
762
	if (HAS_DDI(dev_priv) && !position) {
763 764 765 766
		int i, temp;

		for (i = 0; i < 100; i++) {
			udelay(1);
767
			temp = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
768 769 770 771 772 773 774
			if (temp != position) {
				position = temp;
				break;
			}
		}
	}

775
	/*
776 777
	 * See update_scanline_offset() for the details on the
	 * scanline_offset adjustment.
778
	 */
779
	return (position + crtc->scanline_offset) % vtotal;
780 781
}

782 783 784 785 786
static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
				     bool in_vblank_irq,
				     int *vpos, int *hpos,
				     ktime_t *stime, ktime_t *etime,
				     const struct drm_display_mode *mode)
787
{
788
	struct drm_device *dev = _crtc->dev;
789
	struct drm_i915_private *dev_priv = to_i915(dev);
790
	struct intel_crtc *crtc = to_intel_crtc(_crtc);
791
	enum pipe pipe = crtc->pipe;
792
	int position;
793
	int vbl_start, vbl_end, hsync_start, htotal, vtotal;
794
	unsigned long irqflags;
795 796 797
	bool use_scanline_counter = INTEL_GEN(dev_priv) >= 5 ||
		IS_G4X(dev_priv) || IS_GEN(dev_priv, 2) ||
		mode->private_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER;
798

799
	if (drm_WARN_ON(&dev_priv->drm, !mode->crtc_clock)) {
800 801 802
		drm_dbg(&dev_priv->drm,
			"trying to get scanoutpos for disabled "
			"pipe %c\n", pipe_name(pipe));
803
		return false;
804 805
	}

806
	htotal = mode->crtc_htotal;
807
	hsync_start = mode->crtc_hsync_start;
808 809 810
	vtotal = mode->crtc_vtotal;
	vbl_start = mode->crtc_vblank_start;
	vbl_end = mode->crtc_vblank_end;
811

812 813 814 815 816 817
	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
		vbl_start = DIV_ROUND_UP(vbl_start, 2);
		vbl_end /= 2;
		vtotal /= 2;
	}

818 819 820 821 822 823
	/*
	 * Lock uncore.lock, as we will do multiple timing critical raw
	 * register reads, potentially with preemption disabled, so the
	 * following code must not block on uncore.lock.
	 */
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
824

825 826 827 828 829 830
	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */

	/* Get optional system timestamp before query. */
	if (stime)
		*stime = ktime_get();

831
	if (use_scanline_counter) {
832 833 834
		/* No obvious pixelcount register. Only query vertical
		 * scanout position from Display scan line register.
		 */
835
		position = __intel_get_crtc_scanline(crtc);
836 837 838 839 840
	} else {
		/* Have access to pixelcount since start of frame.
		 * We can split this into vertical and horizontal
		 * scanout position.
		 */
841
		position = (intel_de_read_fw(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
842

843 844 845 846
		/* convert to pixel counts */
		vbl_start *= htotal;
		vbl_end *= htotal;
		vtotal *= htotal;
847

848 849 850 851 852 853 854 855 856 857 858 859
		/*
		 * In interlaced modes, the pixel counter counts all pixels,
		 * so one field will have htotal more pixels. In order to avoid
		 * the reported position from jumping backwards when the pixel
		 * counter is beyond the length of the shorter field, just
		 * clamp the position the length of the shorter field. This
		 * matches how the scanline counter based position works since
		 * the scanline counter doesn't count the two half lines.
		 */
		if (position >= vtotal)
			position = vtotal - 1;

860 861 862 863 864 865 866 867 868 869
		/*
		 * Start of vblank interrupt is triggered at start of hsync,
		 * just prior to the first active line of vblank. However we
		 * consider lines to start at the leading edge of horizontal
		 * active. So, should we get here before we've crossed into
		 * the horizontal active of the first line in vblank, we would
		 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
		 * always add htotal-hsync_start to the current pixel position.
		 */
		position = (position + htotal - hsync_start) % vtotal;
870 871
	}

872 873 874 875 876 877 878 879
	/* Get optional system timestamp after query. */
	if (etime)
		*etime = ktime_get();

	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */

	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);

880 881 882 883 884 885 886 887 888 889
	/*
	 * While in vblank, position will be negative
	 * counting up towards 0 at vbl_end. And outside
	 * vblank, position will be positive counting
	 * up since vbl_end.
	 */
	if (position >= vbl_start)
		position -= vbl_end;
	else
		position += vtotal - vbl_end;
890

891
	if (use_scanline_counter) {
892 893 894 895 896 897
		*vpos = position;
		*hpos = 0;
	} else {
		*vpos = position / htotal;
		*hpos = position - (*vpos * htotal);
	}
898

899
	return true;
900 901
}

902 903 904 905 906
bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error,
				     ktime_t *vblank_time, bool in_vblank_irq)
{
	return drm_crtc_vblank_helper_get_vblank_timestamp_internal(
		crtc, max_error, vblank_time, in_vblank_irq,
907
		i915_get_crtc_scanoutpos);
908 909
}

910 911
int intel_get_crtc_scanline(struct intel_crtc *crtc)
{
912
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
913 914 915 916 917 918 919 920 921 922
	unsigned long irqflags;
	int position;

	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
	position = __intel_get_crtc_scanline(crtc);
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);

	return position;
}

923
/**
924
 * ivb_parity_work - Workqueue called when a parity error interrupt
925 926 927 928 929 930 931
 * occurred.
 * @work: workqueue struct
 *
 * Doesn't actually do anything except notify userspace. As a consequence of
 * this event, userspace should try to remap the bad rows since statistically
 * it is likely the same row is more likely to go bad again.
 */
932
static void ivb_parity_work(struct work_struct *work)
933
{
934
	struct drm_i915_private *dev_priv =
935
		container_of(work, typeof(*dev_priv), l3_parity.error_work);
936
	struct intel_gt *gt = &dev_priv->gt;
937
	u32 error_status, row, bank, subbank;
938
	char *parity_event[6];
939 940
	u32 misccpctl;
	u8 slice = 0;
941 942 943 944 945

	/* We must turn off DOP level clock gating to access the L3 registers.
	 * In order to prevent a get/put style interface, acquire struct mutex
	 * any time we access those registers.
	 */
946
	mutex_lock(&dev_priv->drm.struct_mutex);
947

948
	/* If we've screwed up tracking, just let the interrupt fire again */
949
	if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice))
950 951
		goto out;

952 953 954 955
	misccpctl = I915_READ(GEN7_MISCCPCTL);
	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
	POSTING_READ(GEN7_MISCCPCTL);

956
	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
957
		i915_reg_t reg;
958

959
		slice--;
960 961
		if (drm_WARN_ON_ONCE(&dev_priv->drm,
				     slice >= NUM_L3_SLICES(dev_priv)))
962
			break;
963

964
		dev_priv->l3_parity.which_slice &= ~(1<<slice);
965

966
		reg = GEN7_L3CDERRST1(slice);
967

968 969 970 971 972 973 974 975 976 977 978 979 980 981 982
		error_status = I915_READ(reg);
		row = GEN7_PARITY_ERROR_ROW(error_status);
		bank = GEN7_PARITY_ERROR_BANK(error_status);
		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);

		I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
		POSTING_READ(reg);

		parity_event[0] = I915_L3_PARITY_UEVENT "=1";
		parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
		parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
		parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
		parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
		parity_event[5] = NULL;

983
		kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
984
				   KOBJ_CHANGE, parity_event);
985

986 987
		DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
			  slice, row, bank, subbank);
988

989 990 991 992 993
		kfree(parity_event[4]);
		kfree(parity_event[3]);
		kfree(parity_event[2]);
		kfree(parity_event[1]);
	}
994

995
	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
996

997
out:
998
	drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice);
999 1000 1001
	spin_lock_irq(&gt->irq_lock);
	gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv));
	spin_unlock_irq(&gt->irq_lock);
1002

1003
	mutex_unlock(&dev_priv->drm.struct_mutex);
1004 1005
}

1006
static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1007
{
1008 1009
	switch (pin) {
	case HPD_PORT_C:
1010
		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1);
1011
	case HPD_PORT_D:
1012
		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2);
1013
	case HPD_PORT_E:
1014
		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3);
1015
	case HPD_PORT_F:
1016 1017 1018 1019 1020 1021
		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4);
	default:
		return false;
	}
}

1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041
static bool gen12_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
{
	switch (pin) {
	case HPD_PORT_D:
		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1);
	case HPD_PORT_E:
		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2);
	case HPD_PORT_F:
		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3);
	case HPD_PORT_G:
		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4);
	case HPD_PORT_H:
		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC5);
	case HPD_PORT_I:
		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC6);
	default:
		return false;
	}
}

1042
static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1043
{
1044 1045
	switch (pin) {
	case HPD_PORT_A:
1046
		return val & PORTA_HOTPLUG_LONG_DETECT;
1047
	case HPD_PORT_B:
1048
		return val & PORTB_HOTPLUG_LONG_DETECT;
1049
	case HPD_PORT_C:
1050 1051 1052 1053 1054 1055
		return val & PORTC_HOTPLUG_LONG_DETECT;
	default:
		return false;
	}
}

1056
static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1057
{
1058 1059
	switch (pin) {
	case HPD_PORT_A:
1060
		return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_A);
1061
	case HPD_PORT_B:
1062
		return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_B);
1063
	case HPD_PORT_C:
1064
		return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_C);
1065 1066 1067 1068 1069
	default:
		return false;
	}
}

1070
static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1071
{
1072 1073
	switch (pin) {
	case HPD_PORT_C:
1074
		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1);
1075
	case HPD_PORT_D:
1076
		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2);
1077
	case HPD_PORT_E:
1078
		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3);
1079
	case HPD_PORT_F:
1080 1081 1082 1083 1084 1085
		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4);
	default:
		return false;
	}
}

1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105
static bool tgp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
{
	switch (pin) {
	case HPD_PORT_D:
		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1);
	case HPD_PORT_E:
		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2);
	case HPD_PORT_F:
		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3);
	case HPD_PORT_G:
		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4);
	case HPD_PORT_H:
		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC5);
	case HPD_PORT_I:
		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC6);
	default:
		return false;
	}
}

1106
static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val)
1107
{
1108 1109
	switch (pin) {
	case HPD_PORT_E:
1110 1111 1112 1113 1114 1115
		return val & PORTE_HOTPLUG_LONG_DETECT;
	default:
		return false;
	}
}

1116
static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1117
{
1118 1119
	switch (pin) {
	case HPD_PORT_A:
1120
		return val & PORTA_HOTPLUG_LONG_DETECT;
1121
	case HPD_PORT_B:
1122
		return val & PORTB_HOTPLUG_LONG_DETECT;
1123
	case HPD_PORT_C:
1124
		return val & PORTC_HOTPLUG_LONG_DETECT;
1125
	case HPD_PORT_D:
1126 1127 1128 1129 1130 1131
		return val & PORTD_HOTPLUG_LONG_DETECT;
	default:
		return false;
	}
}

1132
static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1133
{
1134 1135
	switch (pin) {
	case HPD_PORT_A:
1136 1137 1138 1139 1140 1141
		return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
	default:
		return false;
	}
}

1142
static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1143
{
1144 1145
	switch (pin) {
	case HPD_PORT_B:
1146
		return val & PORTB_HOTPLUG_LONG_DETECT;
1147
	case HPD_PORT_C:
1148
		return val & PORTC_HOTPLUG_LONG_DETECT;
1149
	case HPD_PORT_D:
1150 1151 1152
		return val & PORTD_HOTPLUG_LONG_DETECT;
	default:
		return false;
1153 1154 1155
	}
}

1156
static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1157
{
1158 1159
	switch (pin) {
	case HPD_PORT_B:
1160
		return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1161
	case HPD_PORT_C:
1162
		return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1163
	case HPD_PORT_D:
1164 1165 1166
		return val & PORTD_HOTPLUG_INT_LONG_PULSE;
	default:
		return false;
1167 1168 1169
	}
}

1170 1171 1172 1173 1174 1175 1176
/*
 * Get a bit mask of pins that have triggered, and which ones may be long.
 * This can be called multiple times with the same masks to accumulate
 * hotplug detection results from several registers.
 *
 * Note that the caller is expected to zero out the masks initially.
 */
1177 1178 1179 1180
static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
			       u32 *pin_mask, u32 *long_mask,
			       u32 hotplug_trigger, u32 dig_hotplug_reg,
			       const u32 hpd[HPD_NUM_PINS],
1181
			       bool long_pulse_detect(enum hpd_pin pin, u32 val))
1182
{
1183
	enum hpd_pin pin;
1184

1185 1186
	BUILD_BUG_ON(BITS_PER_TYPE(*pin_mask) < HPD_NUM_PINS);

1187 1188
	for_each_hpd_pin(pin) {
		if ((hpd[pin] & hotplug_trigger) == 0)
1189
			continue;
1190

1191
		*pin_mask |= BIT(pin);
1192

1193
		if (long_pulse_detect(pin, dig_hotplug_reg))
1194
			*long_mask |= BIT(pin);
1195 1196
	}

1197 1198 1199
	drm_dbg(&dev_priv->drm,
		"hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
		hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
1200 1201 1202

}

1203
static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
1204
{
1205
	wake_up_all(&dev_priv->gmbus_wait_queue);
1206 1207
}

1208
static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
1209
{
1210
	wake_up_all(&dev_priv->gmbus_wait_queue);
1211 1212
}

1213
#if defined(CONFIG_DEBUG_FS)
1214 1215
static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
					 enum pipe pipe,
1216 1217 1218
					 u32 crc0, u32 crc1,
					 u32 crc2, u32 crc3,
					 u32 crc4)
1219
{
T
Tomeu Vizoso 已提交
1220
	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
1221
	struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
1222 1223 1224
	u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };

	trace_intel_pipe_crc(crtc, crcs);
1225

1226
	spin_lock(&pipe_crc->lock);
1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237
	/*
	 * For some not yet identified reason, the first CRC is
	 * bonkers. So let's just wait for the next vblank and read
	 * out the buggy result.
	 *
	 * On GEN8+ sometimes the second CRC is bonkers as well, so
	 * don't trust that one either.
	 */
	if (pipe_crc->skipped <= 0 ||
	    (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
		pipe_crc->skipped++;
T
Tomeu Vizoso 已提交
1238
		spin_unlock(&pipe_crc->lock);
1239
		return;
T
Tomeu Vizoso 已提交
1240
	}
1241 1242 1243 1244 1245
	spin_unlock(&pipe_crc->lock);

	drm_crtc_add_crc_entry(&crtc->base, true,
				drm_crtc_accurate_vblank_count(&crtc->base),
				crcs);
1246
}
1247 1248
#else
static inline void
1249 1250
display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
			     enum pipe pipe,
1251 1252 1253
			     u32 crc0, u32 crc1,
			     u32 crc2, u32 crc3,
			     u32 crc4) {}
1254 1255
#endif

1256

1257 1258
static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
				     enum pipe pipe)
D
Daniel Vetter 已提交
1259
{
1260
	display_pipe_crc_irq_handler(dev_priv, pipe,
1261 1262
				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
				     0, 0, 0, 0);
D
Daniel Vetter 已提交
1263 1264
}

1265 1266
static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
				     enum pipe pipe)
1267
{
1268
	display_pipe_crc_irq_handler(dev_priv, pipe,
1269 1270 1271 1272 1273
				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
				     I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
				     I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
				     I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
				     I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1274
}
1275

1276 1277
static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
				      enum pipe pipe)
1278
{
1279
	u32 res1, res2;
1280

1281
	if (INTEL_GEN(dev_priv) >= 3)
1282 1283 1284 1285
		res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
	else
		res1 = 0;

1286
	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1287 1288 1289
		res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
	else
		res2 = 0;
1290

1291
	display_pipe_crc_irq_handler(dev_priv, pipe,
1292 1293 1294 1295
				     I915_READ(PIPE_CRC_RES_RED(pipe)),
				     I915_READ(PIPE_CRC_RES_GREEN(pipe)),
				     I915_READ(PIPE_CRC_RES_BLUE(pipe)),
				     res1, res2);
1296
}
1297

1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310
static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
{
	enum pipe pipe;

	for_each_pipe(dev_priv, pipe) {
		I915_WRITE(PIPESTAT(pipe),
			   PIPESTAT_INT_STATUS_MASK |
			   PIPE_FIFO_UNDERRUN_STATUS);

		dev_priv->pipestat_irq_mask[pipe] = 0;
	}
}

1311 1312
static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
				  u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1313
{
1314
	enum pipe pipe;
1315

1316
	spin_lock(&dev_priv->irq_lock);
1317 1318 1319 1320 1321 1322

	if (!dev_priv->display_irqs_enabled) {
		spin_unlock(&dev_priv->irq_lock);
		return;
	}

1323
	for_each_pipe(dev_priv, pipe) {
1324
		i915_reg_t reg;
1325
		u32 status_mask, enable_mask, iir_bit = 0;
1326

1327 1328 1329 1330 1331 1332 1333
		/*
		 * PIPESTAT bits get signalled even when the interrupt is
		 * disabled with the mask bits, and some of the status bits do
		 * not generate interrupts at all (like the underrun bit). Hence
		 * we need to be careful that we only handle what we want to
		 * handle.
		 */
1334 1335

		/* fifo underruns are filterered in the underrun handler. */
1336
		status_mask = PIPE_FIFO_UNDERRUN_STATUS;
1337 1338

		switch (pipe) {
1339
		default:
1340 1341 1342 1343 1344 1345
		case PIPE_A:
			iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
			break;
		case PIPE_B:
			iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
			break;
1346 1347 1348
		case PIPE_C:
			iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
			break;
1349 1350
		}
		if (iir & iir_bit)
1351
			status_mask |= dev_priv->pipestat_irq_mask[pipe];
1352

1353
		if (!status_mask)
1354 1355 1356
			continue;

		reg = PIPESTAT(pipe);
1357 1358
		pipe_stats[pipe] = I915_READ(reg) & status_mask;
		enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
1359 1360 1361

		/*
		 * Clear the PIPE*STAT regs before the IIR
1362 1363 1364 1365 1366 1367
		 *
		 * Toggle the enable bits to make sure we get an
		 * edge in the ISR pipe event bit if we don't clear
		 * all the enabled status bits. Otherwise the edge
		 * triggered IIR on i965/g4x wouldn't notice that
		 * an interrupt is still pending.
1368
		 */
1369 1370 1371 1372
		if (pipe_stats[pipe]) {
			I915_WRITE(reg, pipe_stats[pipe]);
			I915_WRITE(reg, enable_mask);
		}
1373
	}
1374
	spin_unlock(&dev_priv->irq_lock);
1375 1376
}

1377 1378 1379 1380 1381 1382 1383
static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
				      u16 iir, u32 pipe_stats[I915_MAX_PIPES])
{
	enum pipe pipe;

	for_each_pipe(dev_priv, pipe) {
		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1384
			intel_handle_vblank(dev_priv, pipe);
1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401

		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
			i9xx_pipe_crc_irq_handler(dev_priv, pipe);

		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
	}
}

static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
{
	bool blc_event = false;
	enum pipe pipe;

	for_each_pipe(dev_priv, pipe) {
		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1402
			intel_handle_vblank(dev_priv, pipe);
1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425

		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
			blc_event = true;

		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
			i9xx_pipe_crc_irq_handler(dev_priv, pipe);

		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
	}

	if (blc_event || (iir & I915_ASLE_INTERRUPT))
		intel_opregion_asle_intr(dev_priv);
}

static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
{
	bool blc_event = false;
	enum pipe pipe;

	for_each_pipe(dev_priv, pipe) {
		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1426
			intel_handle_vblank(dev_priv, pipe);
1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444

		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
			blc_event = true;

		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
			i9xx_pipe_crc_irq_handler(dev_priv, pipe);

		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
	}

	if (blc_event || (iir & I915_ASLE_INTERRUPT))
		intel_opregion_asle_intr(dev_priv);

	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
		gmbus_irq_handler(dev_priv);
}

1445
static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1446 1447 1448
					    u32 pipe_stats[I915_MAX_PIPES])
{
	enum pipe pipe;
1449

1450
	for_each_pipe(dev_priv, pipe) {
1451
		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1452
			intel_handle_vblank(dev_priv, pipe);
1453 1454

		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1455
			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1456

1457 1458
		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1459 1460 1461
	}

	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1462
		gmbus_irq_handler(dev_priv);
1463 1464
}

1465
static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
1466
{
1467 1468 1469 1470 1471 1472 1473 1474 1475
	u32 hotplug_status = 0, hotplug_status_mask;
	int i;

	if (IS_G4X(dev_priv) ||
	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
		hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
			DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
	else
		hotplug_status_mask = HOTPLUG_INT_STATUS_I915;
1476

1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492
	/*
	 * We absolutely have to clear all the pending interrupt
	 * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port
	 * interrupt bit won't have an edge, and the i965/g4x
	 * edge triggered IIR will not notice that an interrupt
	 * is still pending. We can't use PORT_HOTPLUG_EN to
	 * guarantee the edge as the act of toggling the enable
	 * bits can itself generate a new hotplug interrupt :(
	 */
	for (i = 0; i < 10; i++) {
		u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask;

		if (tmp == 0)
			return hotplug_status;

		hotplug_status |= tmp;
1493
		I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1494 1495
	}

1496 1497 1498
	drm_WARN_ONCE(&dev_priv->drm, 1,
		      "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
		      I915_READ(PORT_HOTPLUG_STAT));
1499

1500 1501 1502
	return hotplug_status;
}

1503
static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1504 1505 1506
				 u32 hotplug_status)
{
	u32 pin_mask = 0, long_mask = 0;
1507

1508 1509
	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
	    IS_CHERRYVIEW(dev_priv)) {
1510
		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1511

1512
		if (hotplug_trigger) {
1513 1514 1515
			intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
					   hotplug_trigger, hotplug_trigger,
					   hpd_status_g4x,
1516 1517
					   i9xx_port_hotplug_long_detect);

1518
			intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1519
		}
1520 1521

		if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1522
			dp_aux_irq_handler(dev_priv);
1523 1524
	} else {
		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1525

1526
		if (hotplug_trigger) {
1527 1528 1529
			intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
					   hotplug_trigger, hotplug_trigger,
					   hpd_status_i915,
1530
					   i9xx_port_hotplug_long_detect);
1531
			intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1532
		}
1533
	}
1534 1535
}

1536
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
J
Jesse Barnes 已提交
1537
{
1538
	struct drm_i915_private *dev_priv = arg;
J
Jesse Barnes 已提交
1539 1540
	irqreturn_t ret = IRQ_NONE;

1541 1542 1543
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

1544
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
1545
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1546

1547
	do {
1548
		u32 iir, gt_iir, pm_iir;
1549
		u32 pipe_stats[I915_MAX_PIPES] = {};
1550
		u32 hotplug_status = 0;
1551
		u32 ier = 0;
1552

J
Jesse Barnes 已提交
1553 1554
		gt_iir = I915_READ(GTIIR);
		pm_iir = I915_READ(GEN6_PMIIR);
1555
		iir = I915_READ(VLV_IIR);
J
Jesse Barnes 已提交
1556 1557

		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1558
			break;
J
Jesse Barnes 已提交
1559 1560 1561

		ret = IRQ_HANDLED;

1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574
		/*
		 * Theory on interrupt generation, based on empirical evidence:
		 *
		 * x = ((VLV_IIR & VLV_IER) ||
		 *      (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
		 *       (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
		 *
		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
		 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
		 * guarantee the CPU interrupt will be raised again even if we
		 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
		 * bits this time around.
		 */
1575
		I915_WRITE(VLV_MASTER_IER, 0);
1576 1577
		ier = I915_READ(VLV_IER);
		I915_WRITE(VLV_IER, 0);
1578 1579 1580 1581 1582 1583

		if (gt_iir)
			I915_WRITE(GTIIR, gt_iir);
		if (pm_iir)
			I915_WRITE(GEN6_PMIIR, pm_iir);

1584
		if (iir & I915_DISPLAY_PORT_INTERRUPT)
1585
			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1586

1587 1588
		/* Call regardless, as some status bits might not be
		 * signalled in iir */
1589
		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1590

1591 1592 1593 1594
		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
			   I915_LPE_PIPE_B_INTERRUPT))
			intel_lpe_audio_irq_handler(dev_priv);

1595 1596 1597 1598 1599 1600
		/*
		 * VLV_IIR is single buffered, and reflects the level
		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
		 */
		if (iir)
			I915_WRITE(VLV_IIR, iir);
1601

1602
		I915_WRITE(VLV_IER, ier);
1603
		I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
1604

1605
		if (gt_iir)
1606
			gen6_gt_irq_handler(&dev_priv->gt, gt_iir);
1607
		if (pm_iir)
1608
			gen6_rps_irq_handler(&dev_priv->gt.rps, pm_iir);
1609

1610
		if (hotplug_status)
1611
			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1612

1613
		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1614
	} while (0);
J
Jesse Barnes 已提交
1615

1616
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1617

J
Jesse Barnes 已提交
1618 1619 1620
	return ret;
}

1621 1622
static irqreturn_t cherryview_irq_handler(int irq, void *arg)
{
1623
	struct drm_i915_private *dev_priv = arg;
1624 1625
	irqreturn_t ret = IRQ_NONE;

1626 1627 1628
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

1629
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
1630
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1631

1632
	do {
1633
		u32 master_ctl, iir;
1634
		u32 pipe_stats[I915_MAX_PIPES] = {};
1635
		u32 hotplug_status = 0;
1636 1637
		u32 ier = 0;

1638 1639
		master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
		iir = I915_READ(VLV_IIR);
1640

1641 1642
		if (master_ctl == 0 && iir == 0)
			break;
1643

1644 1645
		ret = IRQ_HANDLED;

1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658
		/*
		 * Theory on interrupt generation, based on empirical evidence:
		 *
		 * x = ((VLV_IIR & VLV_IER) ||
		 *      ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
		 *       (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
		 *
		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
		 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
		 * guarantee the CPU interrupt will be raised again even if we
		 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
		 * bits this time around.
		 */
1659
		I915_WRITE(GEN8_MASTER_IRQ, 0);
1660 1661
		ier = I915_READ(VLV_IER);
		I915_WRITE(VLV_IER, 0);
1662

1663
		gen8_gt_irq_handler(&dev_priv->gt, master_ctl);
1664

1665
		if (iir & I915_DISPLAY_PORT_INTERRUPT)
1666
			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1667

1668 1669
		/* Call regardless, as some status bits might not be
		 * signalled in iir */
1670
		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1671

1672 1673 1674 1675 1676
		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
			   I915_LPE_PIPE_B_INTERRUPT |
			   I915_LPE_PIPE_C_INTERRUPT))
			intel_lpe_audio_irq_handler(dev_priv);

1677 1678 1679 1680 1681 1682 1683
		/*
		 * VLV_IIR is single buffered, and reflects the level
		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
		 */
		if (iir)
			I915_WRITE(VLV_IIR, iir);

1684
		I915_WRITE(VLV_IER, ier);
1685
		I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
1686 1687

		if (hotplug_status)
1688
			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1689

1690
		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1691
	} while (0);
1692

1693
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1694

1695 1696 1697
	return ret;
}

1698 1699
static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
				u32 hotplug_trigger,
1700 1701 1702 1703
				const u32 hpd[HPD_NUM_PINS])
{
	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;

1704 1705 1706 1707 1708 1709
	/*
	 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
	 * unless we touch the hotplug register, even if hotplug_trigger is
	 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
	 * errors.
	 */
1710
	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1711 1712 1713 1714 1715 1716 1717 1718
	if (!hotplug_trigger) {
		u32 mask = PORTA_HOTPLUG_STATUS_MASK |
			PORTD_HOTPLUG_STATUS_MASK |
			PORTC_HOTPLUG_STATUS_MASK |
			PORTB_HOTPLUG_STATUS_MASK;
		dig_hotplug_reg &= ~mask;
	}

1719
	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1720 1721
	if (!hotplug_trigger)
		return;
1722

1723
	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
1724 1725 1726
			   dig_hotplug_reg, hpd,
			   pch_port_hotplug_long_detect);

1727
	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1728 1729
}

1730
static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1731
{
1732
	enum pipe pipe;
1733
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1734

1735
	ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
1736

1737 1738 1739
	if (pch_iir & SDE_AUDIO_POWER_MASK) {
		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
			       SDE_AUDIO_POWER_SHIFT);
1740 1741
		drm_dbg(&dev_priv->drm, "PCH audio power change on port %d\n",
			port_name(port));
1742
	}
1743

1744
	if (pch_iir & SDE_AUX_MASK)
1745
		dp_aux_irq_handler(dev_priv);
1746

1747
	if (pch_iir & SDE_GMBUS)
1748
		gmbus_irq_handler(dev_priv);
1749 1750

	if (pch_iir & SDE_AUDIO_HDCP_MASK)
1751
		drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n");
1752 1753

	if (pch_iir & SDE_AUDIO_TRANS_MASK)
1754
		drm_dbg(&dev_priv->drm, "PCH transcoder audio interrupt\n");
1755 1756

	if (pch_iir & SDE_POISON)
1757
		drm_err(&dev_priv->drm, "PCH poison interrupt\n");
1758

1759
	if (pch_iir & SDE_FDI_MASK) {
1760
		for_each_pipe(dev_priv, pipe)
1761 1762 1763
			drm_dbg(&dev_priv->drm, "  pipe %c FDI IIR: 0x%08x\n",
				pipe_name(pipe),
				I915_READ(FDI_RX_IIR(pipe)));
1764
	}
1765 1766

	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1767
		drm_dbg(&dev_priv->drm, "PCH transcoder CRC done interrupt\n");
1768 1769

	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1770 1771
		drm_dbg(&dev_priv->drm,
			"PCH transcoder CRC error interrupt\n");
1772 1773

	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1774
		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
1775 1776

	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1777
		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
1778 1779
}

1780
static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
1781 1782
{
	u32 err_int = I915_READ(GEN7_ERR_INT);
D
Daniel Vetter 已提交
1783
	enum pipe pipe;
1784

1785
	if (err_int & ERR_INT_POISON)
1786
		drm_err(&dev_priv->drm, "Poison interrupt\n");
1787

1788
	for_each_pipe(dev_priv, pipe) {
1789 1790
		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1791

D
Daniel Vetter 已提交
1792
		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1793 1794
			if (IS_IVYBRIDGE(dev_priv))
				ivb_pipe_crc_irq_handler(dev_priv, pipe);
D
Daniel Vetter 已提交
1795
			else
1796
				hsw_pipe_crc_irq_handler(dev_priv, pipe);
D
Daniel Vetter 已提交
1797 1798
		}
	}
1799

1800 1801 1802
	I915_WRITE(GEN7_ERR_INT, err_int);
}

1803
static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
1804 1805
{
	u32 serr_int = I915_READ(SERR_INT);
1806
	enum pipe pipe;
1807

1808
	if (serr_int & SERR_INT_POISON)
1809
		drm_err(&dev_priv->drm, "PCH poison interrupt\n");
1810

1811 1812 1813
	for_each_pipe(dev_priv, pipe)
		if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
			intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
1814 1815

	I915_WRITE(SERR_INT, serr_int);
1816 1817
}

1818
static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1819
{
1820
	enum pipe pipe;
1821
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1822

1823
	ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
1824

1825 1826 1827
	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
			       SDE_AUDIO_POWER_SHIFT_CPT);
1828 1829
		drm_dbg(&dev_priv->drm, "PCH audio power change on port %c\n",
			port_name(port));
1830
	}
1831 1832

	if (pch_iir & SDE_AUX_MASK_CPT)
1833
		dp_aux_irq_handler(dev_priv);
1834 1835

	if (pch_iir & SDE_GMBUS_CPT)
1836
		gmbus_irq_handler(dev_priv);
1837 1838

	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1839
		drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n");
1840 1841

	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1842
		drm_dbg(&dev_priv->drm, "Audio CP change interrupt\n");
1843

1844
	if (pch_iir & SDE_FDI_MASK_CPT) {
1845
		for_each_pipe(dev_priv, pipe)
1846 1847 1848
			drm_dbg(&dev_priv->drm, "  pipe %c FDI IIR: 0x%08x\n",
				pipe_name(pipe),
				I915_READ(FDI_RX_IIR(pipe)));
1849
	}
1850 1851

	if (pch_iir & SDE_ERROR_CPT)
1852
		cpt_serr_int_handler(dev_priv);
1853 1854
}

1855
static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1856
{
1857
	u32 ddi_hotplug_trigger, tc_hotplug_trigger;
1858
	u32 pin_mask = 0, long_mask = 0;
1859 1860
	bool (*tc_port_hotplug_long_detect)(enum hpd_pin pin, u32 val);
	const u32 *pins;
1861

1862 1863 1864 1865 1866
	if (HAS_PCH_TGP(dev_priv)) {
		ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP;
		tc_hotplug_trigger = pch_iir & SDE_TC_MASK_TGP;
		tc_port_hotplug_long_detect = tgp_tc_port_hotplug_long_detect;
		pins = hpd_tgp;
M
Matt Roper 已提交
1867 1868 1869 1870
	} else if (HAS_PCH_JSP(dev_priv)) {
		ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP;
		tc_hotplug_trigger = 0;
		pins = hpd_tgp;
1871
	} else if (HAS_PCH_MCC(dev_priv)) {
1872 1873
		ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
		tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_ICP(PORT_TC1);
1874
		tc_port_hotplug_long_detect = icp_tc_port_hotplug_long_detect;
1875
		pins = hpd_icp;
1876
	} else {
1877 1878 1879
		drm_WARN(&dev_priv->drm, !HAS_PCH_ICP(dev_priv),
			 "Unrecognized PCH type 0x%x\n",
			 INTEL_PCH_TYPE(dev_priv));
M
Matt Roper 已提交
1880

1881 1882
		ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
		tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP;
1883 1884
		tc_port_hotplug_long_detect = icp_tc_port_hotplug_long_detect;
		pins = hpd_icp;
1885 1886
	}

1887 1888 1889 1890 1891 1892 1893 1894
	if (ddi_hotplug_trigger) {
		u32 dig_hotplug_reg;

		dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI);
		I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg);

		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
				   ddi_hotplug_trigger,
1895
				   dig_hotplug_reg, pins,
1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906
				   icp_ddi_port_hotplug_long_detect);
	}

	if (tc_hotplug_trigger) {
		u32 dig_hotplug_reg;

		dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC);
		I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg);

		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
				   tc_hotplug_trigger,
1907
				   dig_hotplug_reg, pins,
1908
				   tc_port_hotplug_long_detect);
1909 1910 1911 1912 1913 1914 1915 1916 1917
	}

	if (pin_mask)
		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);

	if (pch_iir & SDE_GMBUS_ICP)
		gmbus_irq_handler(dev_priv);
}

1918
static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930
{
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
		~SDE_PORTE_HOTPLUG_SPT;
	u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
	u32 pin_mask = 0, long_mask = 0;

	if (hotplug_trigger) {
		u32 dig_hotplug_reg;

		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
		I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);

1931 1932
		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
				   hotplug_trigger, dig_hotplug_reg, hpd_spt,
1933
				   spt_port_hotplug_long_detect);
1934 1935 1936 1937 1938 1939 1940 1941
	}

	if (hotplug2_trigger) {
		u32 dig_hotplug_reg;

		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
		I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);

1942 1943
		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
				   hotplug2_trigger, dig_hotplug_reg, hpd_spt,
1944 1945 1946 1947
				   spt_port_hotplug2_long_detect);
	}

	if (pin_mask)
1948
		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1949 1950

	if (pch_iir & SDE_GMBUS_CPT)
1951
		gmbus_irq_handler(dev_priv);
1952 1953
}

1954 1955
static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
				u32 hotplug_trigger,
1956 1957 1958 1959 1960 1961 1962
				const u32 hpd[HPD_NUM_PINS])
{
	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;

	dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);

1963
	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
1964 1965 1966
			   dig_hotplug_reg, hpd,
			   ilk_port_hotplug_long_detect);

1967
	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1968 1969
}

1970 1971
static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
				    u32 de_iir)
1972
{
1973
	enum pipe pipe;
1974 1975
	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;

1976
	if (hotplug_trigger)
1977
		ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk);
1978 1979

	if (de_iir & DE_AUX_CHANNEL_A)
1980
		dp_aux_irq_handler(dev_priv);
1981 1982

	if (de_iir & DE_GSE)
1983
		intel_opregion_asle_intr(dev_priv);
1984 1985

	if (de_iir & DE_POISON)
1986
		drm_err(&dev_priv->drm, "Poison interrupt\n");
1987

1988
	for_each_pipe(dev_priv, pipe) {
1989
		if (de_iir & DE_PIPE_VBLANK(pipe))
1990
			intel_handle_vblank(dev_priv, pipe);
1991

1992
		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
1993
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1994

1995
		if (de_iir & DE_PIPE_CRC_DONE(pipe))
1996
			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1997 1998 1999 2000 2001 2002
	}

	/* check event from PCH */
	if (de_iir & DE_PCH_EVENT) {
		u32 pch_iir = I915_READ(SDEIIR);

2003 2004
		if (HAS_PCH_CPT(dev_priv))
			cpt_irq_handler(dev_priv, pch_iir);
2005
		else
2006
			ibx_irq_handler(dev_priv, pch_iir);
2007 2008 2009 2010 2011

		/* should clear PCH hotplug event before clear CPU irq */
		I915_WRITE(SDEIIR, pch_iir);
	}

2012
	if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT)
2013
		gen5_rps_irq_handler(&dev_priv->gt.rps);
2014 2015
}

2016 2017
static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
				    u32 de_iir)
2018
{
2019
	enum pipe pipe;
2020 2021
	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;

2022
	if (hotplug_trigger)
2023
		ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb);
2024 2025

	if (de_iir & DE_ERR_INT_IVB)
2026
		ivb_err_int_handler(dev_priv);
2027

2028 2029 2030 2031 2032 2033
	if (de_iir & DE_EDP_PSR_INT_HSW) {
		u32 psr_iir = I915_READ(EDP_PSR_IIR);

		intel_psr_irq_handler(dev_priv, psr_iir);
		I915_WRITE(EDP_PSR_IIR, psr_iir);
	}
2034

2035
	if (de_iir & DE_AUX_CHANNEL_A_IVB)
2036
		dp_aux_irq_handler(dev_priv);
2037 2038

	if (de_iir & DE_GSE_IVB)
2039
		intel_opregion_asle_intr(dev_priv);
2040

2041
	for_each_pipe(dev_priv, pipe) {
2042
		if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
2043
			intel_handle_vblank(dev_priv, pipe);
2044 2045 2046
	}

	/* check event from PCH */
2047
	if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
2048 2049
		u32 pch_iir = I915_READ(SDEIIR);

2050
		cpt_irq_handler(dev_priv, pch_iir);
2051 2052 2053 2054 2055 2056

		/* clear PCH hotplug event before clear CPU irq */
		I915_WRITE(SDEIIR, pch_iir);
	}
}

2057 2058 2059 2060 2061 2062 2063 2064
/*
 * To handle irqs with the minimum potential races with fresh interrupts, we:
 * 1 - Disable Master Interrupt Control.
 * 2 - Find the source(s) of the interrupt.
 * 3 - Clear the Interrupt Identity bits (IIR).
 * 4 - Process the interrupt(s) that had bits set in the IIRs.
 * 5 - Re-enable Master Interrupt Control.
 */
2065
static irqreturn_t ilk_irq_handler(int irq, void *arg)
2066
{
2067
	struct drm_i915_private *dev_priv = arg;
2068
	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2069
	irqreturn_t ret = IRQ_NONE;
2070

2071 2072 2073
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

2074
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2075
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2076

2077 2078 2079 2080
	/* disable master interrupt before clearing iir  */
	de_ier = I915_READ(DEIER);
	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);

2081 2082 2083 2084 2085
	/* Disable south interrupts. We'll only write to SDEIIR once, so further
	 * interrupts will will be stored on its back queue, and then we'll be
	 * able to process them after we restore SDEIER (as soon as we restore
	 * it, we'll get an interrupt if SDEIIR still has something to process
	 * due to its back queue). */
2086
	if (!HAS_PCH_NOP(dev_priv)) {
2087 2088 2089
		sde_ier = I915_READ(SDEIER);
		I915_WRITE(SDEIER, 0);
	}
2090

2091 2092
	/* Find, clear, then process each source of interrupt */

2093
	gt_iir = I915_READ(GTIIR);
2094
	if (gt_iir) {
2095 2096
		I915_WRITE(GTIIR, gt_iir);
		ret = IRQ_HANDLED;
2097
		if (INTEL_GEN(dev_priv) >= 6)
2098
			gen6_gt_irq_handler(&dev_priv->gt, gt_iir);
2099
		else
2100
			gen5_gt_irq_handler(&dev_priv->gt, gt_iir);
2101 2102
	}

2103 2104
	de_iir = I915_READ(DEIIR);
	if (de_iir) {
2105 2106
		I915_WRITE(DEIIR, de_iir);
		ret = IRQ_HANDLED;
2107 2108
		if (INTEL_GEN(dev_priv) >= 7)
			ivb_display_irq_handler(dev_priv, de_iir);
2109
		else
2110
			ilk_display_irq_handler(dev_priv, de_iir);
2111 2112
	}

2113
	if (INTEL_GEN(dev_priv) >= 6) {
2114 2115 2116 2117
		u32 pm_iir = I915_READ(GEN6_PMIIR);
		if (pm_iir) {
			I915_WRITE(GEN6_PMIIR, pm_iir);
			ret = IRQ_HANDLED;
2118
			gen6_rps_irq_handler(&dev_priv->gt.rps, pm_iir);
2119
		}
2120
	}
2121 2122

	I915_WRITE(DEIER, de_ier);
2123
	if (!HAS_PCH_NOP(dev_priv))
2124
		I915_WRITE(SDEIER, sde_ier);
2125

2126
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2127
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2128

2129 2130 2131
	return ret;
}

2132 2133
static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
				u32 hotplug_trigger,
2134
				const u32 hpd[HPD_NUM_PINS])
2135
{
2136
	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2137

2138 2139
	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2140

2141
	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
2142
			   dig_hotplug_reg, hpd,
2143
			   bxt_port_hotplug_long_detect);
2144

2145
	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2146 2147
}

2148 2149 2150
static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
{
	u32 pin_mask = 0, long_mask = 0;
2151 2152
	u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
	u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
2153 2154 2155 2156 2157 2158 2159 2160 2161 2162
	long_pulse_detect_func long_pulse_detect;
	const u32 *hpd;

	if (INTEL_GEN(dev_priv) >= 12) {
		long_pulse_detect = gen12_port_hotplug_long_detect;
		hpd = hpd_gen12;
	} else {
		long_pulse_detect = gen11_port_hotplug_long_detect;
		hpd = hpd_gen11;
	}
2163 2164

	if (trigger_tc) {
2165 2166
		u32 dig_hotplug_reg;

2167 2168 2169 2170
		dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL);
		I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);

		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tc,
2171
				   dig_hotplug_reg, hpd, long_pulse_detect);
2172 2173 2174 2175 2176 2177 2178 2179 2180
	}

	if (trigger_tbt) {
		u32 dig_hotplug_reg;

		dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL);
		I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);

		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tbt,
2181
				   dig_hotplug_reg, hpd, long_pulse_detect);
2182 2183 2184
	}

	if (pin_mask)
2185
		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2186
	else
2187 2188
		drm_err(&dev_priv->drm,
			"Unexpected DE HPD interrupt 0x%08x\n", iir);
2189 2190
}

2191 2192
static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
{
2193
	u32 mask;
2194

2195 2196 2197
	if (INTEL_GEN(dev_priv) >= 12)
		return TGL_DE_PORT_AUX_DDIA |
			TGL_DE_PORT_AUX_DDIB |
2198 2199 2200 2201 2202 2203 2204 2205
			TGL_DE_PORT_AUX_DDIC |
			TGL_DE_PORT_AUX_USBC1 |
			TGL_DE_PORT_AUX_USBC2 |
			TGL_DE_PORT_AUX_USBC3 |
			TGL_DE_PORT_AUX_USBC4 |
			TGL_DE_PORT_AUX_USBC5 |
			TGL_DE_PORT_AUX_USBC6;

2206 2207

	mask = GEN8_AUX_CHANNEL_A;
2208 2209 2210 2211 2212
	if (INTEL_GEN(dev_priv) >= 9)
		mask |= GEN9_AUX_CHANNEL_B |
			GEN9_AUX_CHANNEL_C |
			GEN9_AUX_CHANNEL_D;

2213
	if (IS_CNL_WITH_PORT_F(dev_priv) || IS_GEN(dev_priv, 11))
2214 2215
		mask |= CNL_AUX_CHANNEL_F;

2216 2217
	if (IS_GEN(dev_priv, 11))
		mask |= ICL_AUX_CHANNEL_E;
2218 2219 2220 2221

	return mask;
}

2222 2223
static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
{
2224 2225 2226
	if (INTEL_GEN(dev_priv) >= 11)
		return GEN11_DE_PIPE_IRQ_FAULT_ERRORS;
	else if (INTEL_GEN(dev_priv) >= 9)
2227 2228 2229 2230 2231
		return GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
	else
		return GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
}

2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242
static void
gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
{
	bool found = false;

	if (iir & GEN8_DE_MISC_GSE) {
		intel_opregion_asle_intr(dev_priv);
		found = true;
	}

	if (iir & GEN8_DE_EDP_PSR) {
2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255
		u32 psr_iir;
		i915_reg_t iir_reg;

		if (INTEL_GEN(dev_priv) >= 12)
			iir_reg = TRANS_PSR_IIR(dev_priv->psr.transcoder);
		else
			iir_reg = EDP_PSR_IIR;

		psr_iir = I915_READ(iir_reg);
		I915_WRITE(iir_reg, psr_iir);

		if (psr_iir)
			found = true;
2256 2257 2258 2259 2260

		intel_psr_irq_handler(dev_priv, psr_iir);
	}

	if (!found)
2261
		drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt\n");
2262 2263
}

2264 2265
static irqreturn_t
gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2266 2267
{
	irqreturn_t ret = IRQ_NONE;
2268
	u32 iir;
2269
	enum pipe pipe;
J
Jesse Barnes 已提交
2270

2271
	if (master_ctl & GEN8_DE_MISC_IRQ) {
2272 2273 2274
		iir = I915_READ(GEN8_DE_MISC_IIR);
		if (iir) {
			I915_WRITE(GEN8_DE_MISC_IIR, iir);
2275
			ret = IRQ_HANDLED;
2276 2277
			gen8_de_misc_irq_handler(dev_priv, iir);
		} else {
2278 2279
			drm_err(&dev_priv->drm,
				"The master control interrupt lied (DE MISC)!\n");
2280
		}
2281 2282
	}

2283 2284 2285 2286 2287 2288 2289
	if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
		iir = I915_READ(GEN11_DE_HPD_IIR);
		if (iir) {
			I915_WRITE(GEN11_DE_HPD_IIR, iir);
			ret = IRQ_HANDLED;
			gen11_hpd_irq_handler(dev_priv, iir);
		} else {
2290 2291
			drm_err(&dev_priv->drm,
				"The master control interrupt lied, (DE HPD)!\n");
2292 2293 2294
		}
	}

2295
	if (master_ctl & GEN8_DE_PORT_IRQ) {
2296 2297 2298
		iir = I915_READ(GEN8_DE_PORT_IIR);
		if (iir) {
			u32 tmp_mask;
2299
			bool found = false;
2300

2301
			I915_WRITE(GEN8_DE_PORT_IIR, iir);
2302
			ret = IRQ_HANDLED;
J
Jesse Barnes 已提交
2303

2304
			if (iir & gen8_de_port_aux_mask(dev_priv)) {
2305
				dp_aux_irq_handler(dev_priv);
2306 2307 2308
				found = true;
			}

2309
			if (IS_GEN9_LP(dev_priv)) {
2310 2311
				tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
				if (tmp_mask) {
2312 2313
					bxt_hpd_irq_handler(dev_priv, tmp_mask,
							    hpd_bxt);
2314 2315 2316 2317 2318
					found = true;
				}
			} else if (IS_BROADWELL(dev_priv)) {
				tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
				if (tmp_mask) {
2319 2320
					ilk_hpd_irq_handler(dev_priv,
							    tmp_mask, hpd_bdw);
2321 2322
					found = true;
				}
2323 2324
			}

2325
			if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
2326
				gmbus_irq_handler(dev_priv);
S
Shashank Sharma 已提交
2327 2328 2329
				found = true;
			}

2330
			if (!found)
2331 2332
				drm_err(&dev_priv->drm,
					"Unexpected DE Port interrupt\n");
2333
		}
2334
		else
2335 2336
			drm_err(&dev_priv->drm,
				"The master control interrupt lied (DE PORT)!\n");
2337 2338
	}

2339
	for_each_pipe(dev_priv, pipe) {
2340
		u32 fault_errors;
2341

2342 2343
		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
			continue;
2344

2345 2346
		iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
		if (!iir) {
2347 2348
			drm_err(&dev_priv->drm,
				"The master control interrupt lied (DE PIPE)!\n");
2349 2350
			continue;
		}
2351

2352 2353
		ret = IRQ_HANDLED;
		I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
2354

2355
		if (iir & GEN8_PIPE_VBLANK)
2356
			intel_handle_vblank(dev_priv, pipe);
2357

2358
		if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
2359
			hsw_pipe_crc_irq_handler(dev_priv, pipe);
2360

2361 2362
		if (iir & GEN8_PIPE_FIFO_UNDERRUN)
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2363

2364
		fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv);
2365
		if (fault_errors)
2366 2367 2368 2369
			drm_err(&dev_priv->drm,
				"Fault errors on pipe %c: 0x%08x\n",
				pipe_name(pipe),
				fault_errors);
2370 2371
	}

2372
	if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2373
	    master_ctl & GEN8_DE_PCH_IRQ) {
2374 2375 2376 2377 2378
		/*
		 * FIXME(BDW): Assume for now that the new interrupt handling
		 * scheme also closed the SDE interrupt handling race we've seen
		 * on older pch-split platforms. But this needs testing.
		 */
2379 2380 2381
		iir = I915_READ(SDEIIR);
		if (iir) {
			I915_WRITE(SDEIIR, iir);
2382
			ret = IRQ_HANDLED;
2383

2384 2385
			if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
				icp_irq_handler(dev_priv, iir);
2386
			else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
2387
				spt_irq_handler(dev_priv, iir);
2388
			else
2389
				cpt_irq_handler(dev_priv, iir);
2390 2391 2392 2393 2394
		} else {
			/*
			 * Like on previous PCH there seems to be something
			 * fishy going on with forwarding PCH interrupts.
			 */
2395 2396
			drm_dbg(&dev_priv->drm,
				"The master control interrupt lied (SDE)!\n");
2397
		}
2398 2399
	}

2400 2401 2402
	return ret;
}

2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420
static inline u32 gen8_master_intr_disable(void __iomem * const regs)
{
	raw_reg_write(regs, GEN8_MASTER_IRQ, 0);

	/*
	 * Now with master disabled, get a sample of level indications
	 * for this interrupt. Indications will be cleared on related acks.
	 * New indications can and will light up during processing,
	 * and will generate new interrupt after enabling master.
	 */
	return raw_reg_read(regs, GEN8_MASTER_IRQ);
}

static inline void gen8_master_intr_enable(void __iomem * const regs)
{
	raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
}

2421 2422
static irqreturn_t gen8_irq_handler(int irq, void *arg)
{
2423
	struct drm_i915_private *dev_priv = arg;
2424
	void __iomem * const regs = dev_priv->uncore.regs;
2425 2426 2427 2428 2429
	u32 master_ctl;

	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

2430 2431 2432
	master_ctl = gen8_master_intr_disable(regs);
	if (!master_ctl) {
		gen8_master_intr_enable(regs);
2433
		return IRQ_NONE;
2434
	}
2435

2436 2437
	/* Find, queue (onto bottom-halves), then clear each source */
	gen8_gt_irq_handler(&dev_priv->gt, master_ctl);
2438 2439 2440

	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
	if (master_ctl & ~GEN8_GT_IRQS) {
2441
		disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2442
		gen8_de_irq_handler(dev_priv, master_ctl);
2443
		enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2444
	}
2445

2446
	gen8_master_intr_enable(regs);
2447

2448
	return IRQ_HANDLED;
2449 2450
}

2451
static u32
2452
gen11_gu_misc_irq_ack(struct intel_gt *gt, const u32 master_ctl)
2453
{
2454
	void __iomem * const regs = gt->uncore->regs;
2455
	u32 iir;
2456 2457

	if (!(master_ctl & GEN11_GU_MISC_IRQ))
2458 2459 2460 2461 2462
		return 0;

	iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
	if (likely(iir))
		raw_reg_write(regs, GEN11_GU_MISC_IIR, iir);
2463

2464
	return iir;
2465 2466 2467
}

static void
2468
gen11_gu_misc_irq_handler(struct intel_gt *gt, const u32 iir)
2469 2470
{
	if (iir & GEN11_GU_MISC_GSE)
2471
		intel_opregion_asle_intr(gt->i915);
2472 2473
}

2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491
static inline u32 gen11_master_intr_disable(void __iomem * const regs)
{
	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);

	/*
	 * Now with master disabled, get a sample of level indications
	 * for this interrupt. Indications will be cleared on related acks.
	 * New indications can and will light up during processing,
	 * and will generate new interrupt after enabling master.
	 */
	return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
}

static inline void gen11_master_intr_enable(void __iomem * const regs)
{
	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
}

2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510
static void
gen11_display_irq_handler(struct drm_i915_private *i915)
{
	void __iomem * const regs = i915->uncore.regs;
	const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);

	disable_rpm_wakeref_asserts(&i915->runtime_pm);
	/*
	 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
	 * for the display related bits.
	 */
	raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 0x0);
	gen8_de_irq_handler(i915, disp_ctl);
	raw_reg_write(regs, GEN11_DISPLAY_INT_CTL,
		      GEN11_DISPLAY_IRQ_ENABLE);

	enable_rpm_wakeref_asserts(&i915->runtime_pm);
}

2511 2512 2513 2514
static __always_inline irqreturn_t
__gen11_irq_handler(struct drm_i915_private * const i915,
		    u32 (*intr_disable)(void __iomem * const regs),
		    void (*intr_enable)(void __iomem * const regs))
M
Mika Kuoppala 已提交
2515
{
2516
	void __iomem * const regs = i915->uncore.regs;
2517
	struct intel_gt *gt = &i915->gt;
M
Mika Kuoppala 已提交
2518
	u32 master_ctl;
2519
	u32 gu_misc_iir;
M
Mika Kuoppala 已提交
2520 2521 2522 2523

	if (!intel_irqs_enabled(i915))
		return IRQ_NONE;

2524
	master_ctl = intr_disable(regs);
2525
	if (!master_ctl) {
2526
		intr_enable(regs);
M
Mika Kuoppala 已提交
2527
		return IRQ_NONE;
2528
	}
M
Mika Kuoppala 已提交
2529

2530
	/* Find, queue (onto bottom-halves), then clear each source */
2531
	gen11_gt_irq_handler(gt, master_ctl);
M
Mika Kuoppala 已提交
2532 2533

	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2534 2535
	if (master_ctl & GEN11_DISPLAY_IRQ)
		gen11_display_irq_handler(i915);
M
Mika Kuoppala 已提交
2536

2537
	gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl);
2538

2539
	intr_enable(regs);
M
Mika Kuoppala 已提交
2540

2541
	gen11_gu_misc_irq_handler(gt, gu_misc_iir);
2542

M
Mika Kuoppala 已提交
2543 2544 2545
	return IRQ_HANDLED;
}

2546 2547 2548 2549 2550 2551 2552
static irqreturn_t gen11_irq_handler(int irq, void *arg)
{
	return __gen11_irq_handler(arg,
				   gen11_master_intr_disable,
				   gen11_master_intr_enable);
}

2553 2554 2555
/* Called from drm generic code, passed 'crtc' which
 * we use as a pipe index
 */
2556
int i8xx_enable_vblank(struct drm_crtc *crtc)
2557
{
2558 2559
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2560
	unsigned long irqflags;
2561

2562
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2563
	i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2564
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2565

2566 2567 2568
	return 0;
}

2569
int i915gm_enable_vblank(struct drm_crtc *crtc)
2570
{
2571
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2572

2573 2574 2575 2576 2577 2578 2579 2580
	/*
	 * Vblank interrupts fail to wake the device up from C2+.
	 * Disabling render clock gating during C-states avoids
	 * the problem. There is a small power cost so we do this
	 * only when vblank interrupts are actually enabled.
	 */
	if (dev_priv->vblank_enabled++ == 0)
		I915_WRITE(SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
2581

2582
	return i8xx_enable_vblank(crtc);
2583 2584
}

2585
int i965_enable_vblank(struct drm_crtc *crtc)
2586
{
2587 2588
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2589 2590 2591
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2592 2593
	i915_enable_pipestat(dev_priv, pipe,
			     PIPE_START_VBLANK_INTERRUPT_STATUS);
2594 2595 2596 2597 2598
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);

	return 0;
}

2599
int ilk_enable_vblank(struct drm_crtc *crtc)
J
Jesse Barnes 已提交
2600
{
2601 2602
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
J
Jesse Barnes 已提交
2603
	unsigned long irqflags;
2604
	u32 bit = INTEL_GEN(dev_priv) >= 7 ?
2605
		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
J
Jesse Barnes 已提交
2606 2607

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2608
	ilk_enable_display_irq(dev_priv, bit);
J
Jesse Barnes 已提交
2609 2610
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);

2611 2612 2613 2614
	/* Even though there is no DMC, frame counter can get stuck when
	 * PSR is active as no frames are generated.
	 */
	if (HAS_PSR(dev_priv))
2615
		drm_crtc_vblank_restore(crtc);
2616

J
Jesse Barnes 已提交
2617 2618 2619
	return 0;
}

2620
int bdw_enable_vblank(struct drm_crtc *crtc)
2621
{
2622 2623
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2624 2625 2626
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2627
	bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2628
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2629

2630 2631 2632 2633
	/* Even if there is no DMC, frame counter can get stuck when
	 * PSR is active as no frames are generated, so check only for PSR.
	 */
	if (HAS_PSR(dev_priv))
2634
		drm_crtc_vblank_restore(crtc);
2635

2636 2637 2638
	return 0;
}

2639 2640 2641
/* Called from drm generic code, passed 'crtc' which
 * we use as a pipe index
 */
2642
void i8xx_disable_vblank(struct drm_crtc *crtc)
2643
{
2644 2645
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2646
	unsigned long irqflags;
2647

2648
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2649
	i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2650 2651 2652
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

2653
void i915gm_disable_vblank(struct drm_crtc *crtc)
2654
{
2655
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2656

2657
	i8xx_disable_vblank(crtc);
2658

2659 2660
	if (--dev_priv->vblank_enabled == 0)
		I915_WRITE(SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
2661 2662
}

2663
void i965_disable_vblank(struct drm_crtc *crtc)
2664
{
2665 2666
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2667 2668 2669
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2670 2671
	i915_disable_pipestat(dev_priv, pipe,
			      PIPE_START_VBLANK_INTERRUPT_STATUS);
2672 2673 2674
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

2675
void ilk_disable_vblank(struct drm_crtc *crtc)
J
Jesse Barnes 已提交
2676
{
2677 2678
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
J
Jesse Barnes 已提交
2679
	unsigned long irqflags;
2680
	u32 bit = INTEL_GEN(dev_priv) >= 7 ?
2681
		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
J
Jesse Barnes 已提交
2682 2683

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2684
	ilk_disable_display_irq(dev_priv, bit);
J
Jesse Barnes 已提交
2685 2686 2687
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

2688
void bdw_disable_vblank(struct drm_crtc *crtc)
2689
{
2690 2691
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2692 2693 2694
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2695
	bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2696 2697 2698
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

2699
static void ibx_irq_reset(struct drm_i915_private *dev_priv)
P
Paulo Zanoni 已提交
2700
{
2701 2702
	struct intel_uncore *uncore = &dev_priv->uncore;

2703
	if (HAS_PCH_NOP(dev_priv))
P
Paulo Zanoni 已提交
2704 2705
		return;

2706
	GEN3_IRQ_RESET(uncore, SDE);
2707

2708
	if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
2709
		I915_WRITE(SERR_INT, 0xffffffff);
P
Paulo Zanoni 已提交
2710
}
2711

P
Paulo Zanoni 已提交
2712 2713 2714 2715 2716 2717 2718 2719
/*
 * SDEIER is also touched by the interrupt handler to work around missed PCH
 * interrupts. Hence we can't update it after the interrupt handler is enabled -
 * instead we unconditionally enable all PCH interrupt sources here, but then
 * only unmask them as needed with SDEIMR.
 *
 * This function needs to be called before interrupts are enabled.
 */
2720
static void ibx_irq_pre_postinstall(struct drm_i915_private *dev_priv)
P
Paulo Zanoni 已提交
2721
{
2722
	if (HAS_PCH_NOP(dev_priv))
P
Paulo Zanoni 已提交
2723 2724
		return;

2725
	drm_WARN_ON(&dev_priv->drm, I915_READ(SDEIER) != 0);
P
Paulo Zanoni 已提交
2726 2727 2728 2729
	I915_WRITE(SDEIER, 0xffffffff);
	POSTING_READ(SDEIER);
}

2730 2731
static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
{
2732 2733
	struct intel_uncore *uncore = &dev_priv->uncore;

2734
	if (IS_CHERRYVIEW(dev_priv))
2735
		intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
2736
	else
2737
		intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK);
2738

2739
	i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
2740
	intel_uncore_write(uncore, PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2741

2742
	i9xx_pipestat_irq_reset(dev_priv);
2743

2744
	GEN3_IRQ_RESET(uncore, VLV_);
2745
	dev_priv->irq_mask = ~0u;
2746 2747
}

2748 2749
static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
{
2750 2751
	struct intel_uncore *uncore = &dev_priv->uncore;

2752
	u32 pipestat_mask;
2753
	u32 enable_mask;
2754 2755
	enum pipe pipe;

2756
	pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
2757 2758 2759 2760 2761

	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
	for_each_pipe(dev_priv, pipe)
		i915_enable_pipestat(dev_priv, pipe, pipestat_mask);

2762 2763
	enable_mask = I915_DISPLAY_PORT_INTERRUPT |
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2764 2765 2766 2767
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
		I915_LPE_PIPE_A_INTERRUPT |
		I915_LPE_PIPE_B_INTERRUPT;

2768
	if (IS_CHERRYVIEW(dev_priv))
2769 2770
		enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
			I915_LPE_PIPE_C_INTERRUPT;
2771

2772
	drm_WARN_ON(&dev_priv->drm, dev_priv->irq_mask != ~0u);
2773

2774 2775
	dev_priv->irq_mask = ~enable_mask;

2776
	GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask);
2777 2778 2779 2780
}

/* drm_dma.h hooks
*/
2781
static void ilk_irq_reset(struct drm_i915_private *dev_priv)
2782
{
2783
	struct intel_uncore *uncore = &dev_priv->uncore;
2784

2785
	GEN3_IRQ_RESET(uncore, DE);
2786
	if (IS_GEN(dev_priv, 7))
2787
		intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff);
2788

2789
	if (IS_HASWELL(dev_priv)) {
2790 2791
		intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
		intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
2792 2793
	}

2794
	gen5_gt_irq_reset(&dev_priv->gt);
2795

2796
	ibx_irq_reset(dev_priv);
2797 2798
}

2799
static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
J
Jesse Barnes 已提交
2800
{
2801 2802 2803
	I915_WRITE(VLV_MASTER_IER, 0);
	POSTING_READ(VLV_MASTER_IER);

2804
	gen5_gt_irq_reset(&dev_priv->gt);
J
Jesse Barnes 已提交
2805

2806
	spin_lock_irq(&dev_priv->irq_lock);
2807 2808
	if (dev_priv->display_irqs_enabled)
		vlv_display_irq_reset(dev_priv);
2809
	spin_unlock_irq(&dev_priv->irq_lock);
J
Jesse Barnes 已提交
2810 2811
}

2812
static void gen8_irq_reset(struct drm_i915_private *dev_priv)
2813
{
2814
	struct intel_uncore *uncore = &dev_priv->uncore;
2815
	enum pipe pipe;
2816

2817
	gen8_master_intr_disable(dev_priv->uncore.regs);
2818

2819
	gen8_gt_irq_reset(&dev_priv->gt);
2820

2821 2822
	intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
	intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
2823

2824
	for_each_pipe(dev_priv, pipe)
2825 2826
		if (intel_display_power_is_enabled(dev_priv,
						   POWER_DOMAIN_PIPE(pipe)))
2827
			GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
2828

2829 2830 2831
	GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
	GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
2832

2833
	if (HAS_PCH_SPLIT(dev_priv))
2834
		ibx_irq_reset(dev_priv);
2835
}
2836

2837
static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
M
Mika Kuoppala 已提交
2838
{
2839
	struct intel_uncore *uncore = &dev_priv->uncore;
2840
	enum pipe pipe;
M
Mika Kuoppala 已提交
2841

2842
	intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0);
M
Mika Kuoppala 已提交
2843

2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860
	if (INTEL_GEN(dev_priv) >= 12) {
		enum transcoder trans;

		for (trans = TRANSCODER_A; trans <= TRANSCODER_D; trans++) {
			enum intel_display_power_domain domain;

			domain = POWER_DOMAIN_TRANSCODER(trans);
			if (!intel_display_power_is_enabled(dev_priv, domain))
				continue;

			intel_uncore_write(uncore, TRANS_PSR_IMR(trans), 0xffffffff);
			intel_uncore_write(uncore, TRANS_PSR_IIR(trans), 0xffffffff);
		}
	} else {
		intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
		intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
	}
2861

M
Mika Kuoppala 已提交
2862 2863 2864
	for_each_pipe(dev_priv, pipe)
		if (intel_display_power_is_enabled(dev_priv,
						   POWER_DOMAIN_PIPE(pipe)))
2865
			GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
M
Mika Kuoppala 已提交
2866

2867 2868 2869
	GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
	GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
	GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_);
2870

2871
	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
2872
		GEN3_IRQ_RESET(uncore, SDE);
M
Mika Kuoppala 已提交
2873 2874
}

2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887
static void gen11_irq_reset(struct drm_i915_private *dev_priv)
{
	struct intel_uncore *uncore = &dev_priv->uncore;

	gen11_master_intr_disable(dev_priv->uncore.regs);

	gen11_gt_irq_reset(&dev_priv->gt);
	gen11_display_irq_reset(dev_priv);

	GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
}

2888
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
2889
				     u8 pipe_mask)
2890
{
2891 2892
	struct intel_uncore *uncore = &dev_priv->uncore;

2893
	u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
2894
	enum pipe pipe;
2895

2896
	spin_lock_irq(&dev_priv->irq_lock);
2897 2898 2899 2900 2901 2902

	if (!intel_irqs_enabled(dev_priv)) {
		spin_unlock_irq(&dev_priv->irq_lock);
		return;
	}

2903
	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
2904
		GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
2905 2906
				  dev_priv->de_irq_mask[pipe],
				  ~dev_priv->de_irq_mask[pipe] | extra_ier);
2907

2908
	spin_unlock_irq(&dev_priv->irq_lock);
2909 2910
}

2911
void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
2912
				     u8 pipe_mask)
2913
{
2914
	struct intel_uncore *uncore = &dev_priv->uncore;
2915 2916
	enum pipe pipe;

2917
	spin_lock_irq(&dev_priv->irq_lock);
2918 2919 2920 2921 2922 2923

	if (!intel_irqs_enabled(dev_priv)) {
		spin_unlock_irq(&dev_priv->irq_lock);
		return;
	}

2924
	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
2925
		GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
2926

2927 2928 2929
	spin_unlock_irq(&dev_priv->irq_lock);

	/* make sure we're done processing display irqs */
2930
	intel_synchronize_irq(dev_priv);
2931 2932
}

2933
static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
2934
{
2935
	struct intel_uncore *uncore = &dev_priv->uncore;
2936 2937 2938 2939

	I915_WRITE(GEN8_MASTER_IRQ, 0);
	POSTING_READ(GEN8_MASTER_IRQ);

2940
	gen8_gt_irq_reset(&dev_priv->gt);
2941

2942
	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
2943

2944
	spin_lock_irq(&dev_priv->irq_lock);
2945 2946
	if (dev_priv->display_irqs_enabled)
		vlv_display_irq_reset(dev_priv);
2947
	spin_unlock_irq(&dev_priv->irq_lock);
2948 2949
}

2950
static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
2951 2952 2953 2954 2955
				  const u32 hpd[HPD_NUM_PINS])
{
	struct intel_encoder *encoder;
	u32 enabled_irqs = 0;

2956
	for_each_intel_encoder(&dev_priv->drm, encoder)
2957 2958 2959 2960 2961 2962
		if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
			enabled_irqs |= hpd[encoder->hpd_pin];

	return enabled_irqs;
}

2963
static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
2964
{
2965
	u32 hotplug;
2966 2967 2968

	/*
	 * Enable digital hotplug on the PCH, and configure the DP short pulse
2969 2970
	 * duration to 2ms (which is the minimum in the Display Port spec).
	 * The pulse duration bits are reserved on LPT+.
2971
	 */
2972
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
2973 2974 2975
	hotplug &= ~(PORTB_PULSE_DURATION_MASK |
		     PORTC_PULSE_DURATION_MASK |
		     PORTD_PULSE_DURATION_MASK);
2976
	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
2977 2978
	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
2979 2980 2981 2982
	/*
	 * When CPU and PCH are on the same package, port A
	 * HPD must be enabled in both north and south.
	 */
2983
	if (HAS_PCH_LPT_LP(dev_priv))
2984
		hotplug |= PORTA_HOTPLUG_ENABLE;
2985
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
2986
}
X
Xiong Zhang 已提交
2987

2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004
static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug_irqs, enabled_irqs;

	if (HAS_PCH_IBX(dev_priv)) {
		hotplug_irqs = SDE_HOTPLUG_MASK;
		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
	} else {
		hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
	}

	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);

	ibx_hpd_detection_setup(dev_priv);
}

3005 3006 3007
static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv,
				    u32 ddi_hotplug_enable_mask,
				    u32 tc_hotplug_enable_mask)
3008 3009 3010 3011
{
	u32 hotplug;

	hotplug = I915_READ(SHOTPLUG_CTL_DDI);
3012
	hotplug |= ddi_hotplug_enable_mask;
3013 3014
	I915_WRITE(SHOTPLUG_CTL_DDI, hotplug);

3015 3016 3017 3018 3019
	if (tc_hotplug_enable_mask) {
		hotplug = I915_READ(SHOTPLUG_CTL_TC);
		hotplug |= tc_hotplug_enable_mask;
		I915_WRITE(SHOTPLUG_CTL_TC, hotplug);
	}
3020 3021
}

3022 3023 3024 3025
static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv,
			      u32 sde_ddi_mask, u32 sde_tc_mask,
			      u32 ddi_enable_mask, u32 tc_enable_mask,
			      const u32 *pins)
3026 3027 3028
{
	u32 hotplug_irqs, enabled_irqs;

3029 3030
	hotplug_irqs = sde_ddi_mask | sde_tc_mask;
	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, pins);
3031

3032 3033
	I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);

3034 3035
	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);

3036
	icp_hpd_detection_setup(dev_priv, ddi_enable_mask, tc_enable_mask);
3037 3038
}

3039 3040 3041 3042
/*
 * EHL doesn't need most of gen11_hpd_irq_setup, it's handling only the
 * equivalent of SDE.
 */
3043 3044
static void mcc_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
3045
	icp_hpd_irq_setup(dev_priv,
3046 3047
			  SDE_DDI_MASK_ICP, SDE_TC_HOTPLUG_ICP(PORT_TC1),
			  ICP_DDI_HPD_ENABLE_MASK, ICP_TC_HPD_ENABLE(PORT_TC1),
3048
			  hpd_icp);
3049 3050
}

M
Matt Roper 已提交
3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063
/*
 * JSP behaves exactly the same as MCC above except that port C is mapped to
 * the DDI-C pins instead of the TC1 pins.  This means we should follow TGP's
 * masks & tables rather than ICP's masks & tables.
 */
static void jsp_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
	icp_hpd_irq_setup(dev_priv,
			  SDE_DDI_MASK_TGP, 0,
			  TGP_DDI_HPD_ENABLE_MASK, 0,
			  hpd_tgp);
}

3064 3065 3066 3067 3068 3069 3070 3071 3072 3073
static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug;

	hotplug = I915_READ(GEN11_TC_HOTPLUG_CTL);
	hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
	I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug);
3074 3075 3076 3077 3078 3079 3080

	hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL);
	hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
	I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug);
3081 3082 3083 3084 3085
}

static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug_irqs, enabled_irqs;
3086
	const u32 *hpd;
3087 3088
	u32 val;

3089 3090
	hpd = INTEL_GEN(dev_priv) >= 12 ? hpd_gen12 : hpd_gen11;
	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd);
3091
	hotplug_irqs = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK;
3092 3093 3094 3095 3096 3097 3098

	val = I915_READ(GEN11_DE_HPD_IMR);
	val &= ~hotplug_irqs;
	I915_WRITE(GEN11_DE_HPD_IMR, val);
	POSTING_READ(GEN11_DE_HPD_IMR);

	gen11_hpd_detection_setup(dev_priv);
3099

3100
	if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP)
3101 3102 3103
		icp_hpd_irq_setup(dev_priv, SDE_DDI_MASK_TGP, SDE_TC_MASK_TGP,
				  TGP_DDI_HPD_ENABLE_MASK,
				  TGP_TC_HPD_ENABLE_MASK, hpd_tgp);
3104
	else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3105 3106 3107
		icp_hpd_irq_setup(dev_priv, SDE_DDI_MASK_ICP, SDE_TC_MASK_ICP,
				  ICP_DDI_HPD_ENABLE_MASK,
				  ICP_TC_HPD_ENABLE_MASK, hpd_icp);
3108 3109
}

3110
static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3111
{
3112 3113 3114 3115 3116 3117 3118 3119 3120
	u32 val, hotplug;

	/* Display WA #1179 WaHardHangonHotPlug: cnp */
	if (HAS_PCH_CNP(dev_priv)) {
		val = I915_READ(SOUTH_CHICKEN1);
		val &= ~CHASSIS_CLK_REQ_DURATION_MASK;
		val |= CHASSIS_CLK_REQ_DURATION(0xf);
		I915_WRITE(SOUTH_CHICKEN1, val);
	}
3121 3122 3123

	/* Enable digital hotplug on the PCH */
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3124 3125 3126 3127
	hotplug |= PORTA_HOTPLUG_ENABLE |
		   PORTB_HOTPLUG_ENABLE |
		   PORTC_HOTPLUG_ENABLE |
		   PORTD_HOTPLUG_ENABLE;
3128 3129 3130 3131 3132
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);

	hotplug = I915_READ(PCH_PORT_HOTPLUG2);
	hotplug |= PORTE_HOTPLUG_ENABLE;
	I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3133 3134
}

3135 3136 3137 3138
static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug_irqs, enabled_irqs;

3139 3140 3141
	if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
		I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);

3142 3143 3144 3145 3146 3147 3148 3149
	hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);

	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);

	spt_hpd_detection_setup(dev_priv);
}

3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165
static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug;

	/*
	 * Enable digital hotplug on the CPU, and configure the DP short pulse
	 * duration to 2ms (which is the minimum in the Display Port spec)
	 * The pulse duration bits are reserved on HSW+.
	 */
	hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
	hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
	hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE |
		   DIGITAL_PORTA_PULSE_DURATION_2ms;
	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
}

3166
static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3167
{
3168
	u32 hotplug_irqs, enabled_irqs;
3169

3170
	if (INTEL_GEN(dev_priv) >= 8) {
3171
		hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
3172
		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw);
3173 3174

		bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3175
	} else if (INTEL_GEN(dev_priv) >= 7) {
3176
		hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
3177
		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb);
3178 3179

		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3180 3181
	} else {
		hotplug_irqs = DE_DP_A_HOTPLUG;
3182
		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk);
3183

3184 3185
		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
	}
3186

3187
	ilk_hpd_detection_setup(dev_priv);
3188

3189
	ibx_hpd_irq_setup(dev_priv);
3190 3191
}

3192 3193
static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv,
				      u32 enabled_irqs)
3194
{
3195
	u32 hotplug;
3196

3197
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3198 3199 3200
	hotplug |= PORTA_HOTPLUG_ENABLE |
		   PORTB_HOTPLUG_ENABLE |
		   PORTC_HOTPLUG_ENABLE;
3201

3202 3203 3204
	drm_dbg_kms(&dev_priv->drm,
		    "Invert bit setting: hp_ctl:%x hp_port:%x\n",
		    hotplug, enabled_irqs);
3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220
	hotplug &= ~BXT_DDI_HPD_INVERT_MASK;

	/*
	 * For BXT invert bit has to be set based on AOB design
	 * for HPD detection logic, update it based on VBT fields.
	 */
	if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
		hotplug |= BXT_DDIA_HPD_INVERT;
	if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) &&
	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_B))
		hotplug |= BXT_DDIB_HPD_INVERT;
	if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) &&
	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_C))
		hotplug |= BXT_DDIC_HPD_INVERT;

3221
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3222 3223
}

3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240
static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
	__bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK);
}

static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug_irqs, enabled_irqs;

	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
	hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;

	bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);

	__bxt_hpd_detection_setup(dev_priv, enabled_irqs);
}

3241
static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
P
Paulo Zanoni 已提交
3242
{
3243
	u32 mask;
3244

3245
	if (HAS_PCH_NOP(dev_priv))
D
Daniel Vetter 已提交
3246 3247
		return;

3248
	if (HAS_PCH_IBX(dev_priv))
3249
		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3250
	else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3251
		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3252 3253
	else
		mask = SDE_GMBUS_CPT;
3254

3255
	gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
P
Paulo Zanoni 已提交
3256
	I915_WRITE(SDEIMR, ~mask);
3257 3258 3259

	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
	    HAS_PCH_LPT(dev_priv))
3260
		ibx_hpd_detection_setup(dev_priv);
3261 3262
	else
		spt_hpd_detection_setup(dev_priv);
P
Paulo Zanoni 已提交
3263 3264
}

3265
static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
3266
{
3267
	struct intel_uncore *uncore = &dev_priv->uncore;
3268 3269
	u32 display_mask, extra_mask;

3270
	if (INTEL_GEN(dev_priv) >= 7) {
3271
		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3272
				DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
3273
		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3274 3275
			      DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
			      DE_DP_A_HOTPLUG_IVB);
3276 3277
	} else {
		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3278 3279
				DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
				DE_PIPEA_CRC_DONE | DE_POISON);
3280 3281 3282
		extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
			      DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
			      DE_DP_A_HOTPLUG);
3283
	}
3284

3285
	if (IS_HASWELL(dev_priv)) {
3286
		gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
3287 3288 3289
		display_mask |= DE_EDP_PSR_INT_HSW;
	}

3290
	dev_priv->irq_mask = ~display_mask;
3291

3292
	ibx_irq_pre_postinstall(dev_priv);
P
Paulo Zanoni 已提交
3293

3294 3295
	GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
		      display_mask | extra_mask);
3296

3297
	gen5_gt_irq_postinstall(&dev_priv->gt);
3298

3299 3300
	ilk_hpd_detection_setup(dev_priv);

3301
	ibx_irq_postinstall(dev_priv);
3302

3303
	if (IS_IRONLAKE_M(dev_priv)) {
3304 3305 3306
		/* Enable PCU event interrupts
		 *
		 * spinlocking not required here for correctness since interrupt
3307 3308
		 * setup is guaranteed to run in single-threaded context. But we
		 * need it to make the assert_spin_locked happy. */
3309
		spin_lock_irq(&dev_priv->irq_lock);
3310
		ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
3311
		spin_unlock_irq(&dev_priv->irq_lock);
3312
	}
3313 3314
}

3315 3316
void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
{
3317
	lockdep_assert_held(&dev_priv->irq_lock);
3318 3319 3320 3321 3322 3323

	if (dev_priv->display_irqs_enabled)
		return;

	dev_priv->display_irqs_enabled = true;

3324 3325
	if (intel_irqs_enabled(dev_priv)) {
		vlv_display_irq_reset(dev_priv);
3326
		vlv_display_irq_postinstall(dev_priv);
3327
	}
3328 3329 3330 3331
}

void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
{
3332
	lockdep_assert_held(&dev_priv->irq_lock);
3333 3334 3335 3336 3337 3338

	if (!dev_priv->display_irqs_enabled)
		return;

	dev_priv->display_irqs_enabled = false;

3339
	if (intel_irqs_enabled(dev_priv))
3340
		vlv_display_irq_reset(dev_priv);
3341 3342
}

3343

3344
static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
3345
{
3346
	gen5_gt_irq_postinstall(&dev_priv->gt);
J
Jesse Barnes 已提交
3347

3348
	spin_lock_irq(&dev_priv->irq_lock);
3349 3350
	if (dev_priv->display_irqs_enabled)
		vlv_display_irq_postinstall(dev_priv);
3351 3352
	spin_unlock_irq(&dev_priv->irq_lock);

J
Jesse Barnes 已提交
3353
	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3354
	POSTING_READ(VLV_MASTER_IER);
3355 3356
}

3357 3358
static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
{
3359 3360
	struct intel_uncore *uncore = &dev_priv->uncore;

3361 3362
	u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) |
		GEN8_PIPE_CDCLK_CRC_DONE;
3363
	u32 de_pipe_enables;
3364 3365
	u32 de_port_masked = GEN8_AUX_CHANNEL_A;
	u32 de_port_enables;
3366
	u32 de_misc_masked = GEN8_DE_EDP_PSR;
3367
	enum pipe pipe;
3368

3369 3370 3371
	if (INTEL_GEN(dev_priv) <= 10)
		de_misc_masked |= GEN8_DE_MISC_GSE;

3372
	if (INTEL_GEN(dev_priv) >= 9) {
3373 3374
		de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
				  GEN9_AUX_CHANNEL_D;
3375
		if (IS_GEN9_LP(dev_priv))
3376 3377
			de_port_masked |= BXT_DE_PORT_GMBUS;
	}
3378

3379 3380 3381
	if (INTEL_GEN(dev_priv) >= 11)
		de_port_masked |= ICL_AUX_CHANNEL_E;

3382
	if (IS_CNL_WITH_PORT_F(dev_priv) || INTEL_GEN(dev_priv) >= 11)
R
Rodrigo Vivi 已提交
3383 3384
		de_port_masked |= CNL_AUX_CHANNEL_F;

3385 3386 3387
	de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
					   GEN8_PIPE_FIFO_UNDERRUN;

3388
	de_port_enables = de_port_masked;
3389
	if (IS_GEN9_LP(dev_priv))
3390 3391
		de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
	else if (IS_BROADWELL(dev_priv))
3392 3393
		de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;

3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408
	if (INTEL_GEN(dev_priv) >= 12) {
		enum transcoder trans;

		for (trans = TRANSCODER_A; trans <= TRANSCODER_D; trans++) {
			enum intel_display_power_domain domain;

			domain = POWER_DOMAIN_TRANSCODER(trans);
			if (!intel_display_power_is_enabled(dev_priv, domain))
				continue;

			gen3_assert_iir_is_zero(uncore, TRANS_PSR_IIR(trans));
		}
	} else {
		gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
	}
3409

M
Mika Kahola 已提交
3410 3411
	for_each_pipe(dev_priv, pipe) {
		dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
3412

3413
		if (intel_display_power_is_enabled(dev_priv,
3414
				POWER_DOMAIN_PIPE(pipe)))
3415
			GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
3416 3417
					  dev_priv->de_irq_mask[pipe],
					  de_pipe_enables);
M
Mika Kahola 已提交
3418
	}
3419

3420 3421
	GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
	GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
3422

3423 3424
	if (INTEL_GEN(dev_priv) >= 11) {
		u32 de_hpd_masked = 0;
3425 3426
		u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
				     GEN11_DE_TBT_HOTPLUG_MASK;
3427

3428 3429
		GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked,
			      de_hpd_enables);
3430 3431
		gen11_hpd_detection_setup(dev_priv);
	} else if (IS_GEN9_LP(dev_priv)) {
3432
		bxt_hpd_detection_setup(dev_priv);
3433
	} else if (IS_BROADWELL(dev_priv)) {
3434
		ilk_hpd_detection_setup(dev_priv);
3435
	}
3436 3437
}

3438
static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
3439
{
3440
	if (HAS_PCH_SPLIT(dev_priv))
3441
		ibx_irq_pre_postinstall(dev_priv);
P
Paulo Zanoni 已提交
3442

3443
	gen8_gt_irq_postinstall(&dev_priv->gt);
3444 3445
	gen8_de_irq_postinstall(dev_priv);

3446
	if (HAS_PCH_SPLIT(dev_priv))
3447
		ibx_irq_postinstall(dev_priv);
3448

3449
	gen8_master_intr_enable(dev_priv->uncore.regs);
3450 3451
}

3452
static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
3453 3454 3455
{
	u32 mask = SDE_GMBUS_ICP;

3456
	drm_WARN_ON(&dev_priv->drm, I915_READ(SDEIER) != 0);
3457 3458 3459
	I915_WRITE(SDEIER, 0xffffffff);
	POSTING_READ(SDEIER);

3460
	gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
3461 3462
	I915_WRITE(SDEIMR, ~mask);

3463 3464 3465
	if (HAS_PCH_TGP(dev_priv))
		icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK,
					TGP_TC_HPD_ENABLE_MASK);
3466
	else if (HAS_PCH_JSP(dev_priv))
3467
		icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK, 0);
3468 3469 3470
	else if (HAS_PCH_MCC(dev_priv))
		icp_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK,
					ICP_TC_HPD_ENABLE(PORT_TC1));
3471 3472 3473
	else
		icp_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK,
					ICP_TC_HPD_ENABLE_MASK);
3474 3475
}

3476
static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
M
Mika Kuoppala 已提交
3477
{
3478
	struct intel_uncore *uncore = &dev_priv->uncore;
3479
	u32 gu_misc_masked = GEN11_GU_MISC_GSE;
M
Mika Kuoppala 已提交
3480

3481
	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3482
		icp_irq_postinstall(dev_priv);
3483

3484
	gen11_gt_irq_postinstall(&dev_priv->gt);
M
Mika Kuoppala 已提交
3485 3486
	gen8_de_irq_postinstall(dev_priv);

3487
	GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
3488

M
Mika Kuoppala 已提交
3489 3490
	I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);

3491
	gen11_master_intr_enable(uncore->regs);
3492
	POSTING_READ(GEN11_GFX_MSTR_IRQ);
M
Mika Kuoppala 已提交
3493 3494
}

3495
static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
3496
{
3497
	gen8_gt_irq_postinstall(&dev_priv->gt);
3498

3499
	spin_lock_irq(&dev_priv->irq_lock);
3500 3501
	if (dev_priv->display_irqs_enabled)
		vlv_display_irq_postinstall(dev_priv);
3502 3503
	spin_unlock_irq(&dev_priv->irq_lock);

3504
	I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3505 3506 3507
	POSTING_READ(GEN8_MASTER_IRQ);
}

3508
static void i8xx_irq_reset(struct drm_i915_private *dev_priv)
L
Linus Torvalds 已提交
3509
{
3510
	struct intel_uncore *uncore = &dev_priv->uncore;
3511

3512 3513
	i9xx_pipestat_irq_reset(dev_priv);

3514
	GEN2_IRQ_RESET(uncore);
C
Chris Wilson 已提交
3515 3516
}

3517
static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv)
C
Chris Wilson 已提交
3518
{
3519
	struct intel_uncore *uncore = &dev_priv->uncore;
3520
	u16 enable_mask;
C
Chris Wilson 已提交
3521

3522 3523 3524 3525
	intel_uncore_write16(uncore,
			     EMR,
			     ~(I915_ERROR_PAGE_TABLE |
			       I915_ERROR_MEMORY_REFRESH));
C
Chris Wilson 已提交
3526 3527 3528 3529

	/* Unmask the interrupts that we always want on. */
	dev_priv->irq_mask =
		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3530 3531
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
		  I915_MASTER_ERROR_INTERRUPT);
C
Chris Wilson 已提交
3532

3533 3534 3535
	enable_mask =
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3536
		I915_MASTER_ERROR_INTERRUPT |
3537 3538
		I915_USER_INTERRUPT;

3539
	GEN2_IRQ_INIT(uncore, dev_priv->irq_mask, enable_mask);
C
Chris Wilson 已提交
3540

3541 3542
	/* Interrupt setup is already guaranteed to be single-threaded, this is
	 * just to make the assert_spin_locked check happy. */
3543
	spin_lock_irq(&dev_priv->irq_lock);
3544 3545
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3546
	spin_unlock_irq(&dev_priv->irq_lock);
C
Chris Wilson 已提交
3547 3548
}

3549
static void i8xx_error_irq_ack(struct drm_i915_private *i915,
3550 3551
			       u16 *eir, u16 *eir_stuck)
{
3552
	struct intel_uncore *uncore = &i915->uncore;
3553 3554
	u16 emr;

3555
	*eir = intel_uncore_read16(uncore, EIR);
3556 3557

	if (*eir)
3558
		intel_uncore_write16(uncore, EIR, *eir);
3559

3560
	*eir_stuck = intel_uncore_read16(uncore, EIR);
3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573
	if (*eir_stuck == 0)
		return;

	/*
	 * Toggle all EMR bits to make sure we get an edge
	 * in the ISR master error bit if we don't clear
	 * all the EIR bits. Otherwise the edge triggered
	 * IIR on i965/g4x wouldn't notice that an interrupt
	 * is still pending. Also some EIR bits can't be
	 * cleared except by handling the underlying error
	 * (or by a GPU reset) so we mask any bit that
	 * remains set.
	 */
3574 3575 3576
	emr = intel_uncore_read16(uncore, EMR);
	intel_uncore_write16(uncore, EMR, 0xffff);
	intel_uncore_write16(uncore, EMR, emr | *eir_stuck);
3577 3578 3579 3580 3581 3582 3583 3584
}

static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
				   u16 eir, u16 eir_stuck)
{
	DRM_DEBUG("Master Error: EIR 0x%04x\n", eir);

	if (eir_stuck)
3585 3586
		drm_dbg(&dev_priv->drm, "EIR stuck: 0x%04x, masked\n",
			eir_stuck);
3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622
}

static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
			       u32 *eir, u32 *eir_stuck)
{
	u32 emr;

	*eir = I915_READ(EIR);

	I915_WRITE(EIR, *eir);

	*eir_stuck = I915_READ(EIR);
	if (*eir_stuck == 0)
		return;

	/*
	 * Toggle all EMR bits to make sure we get an edge
	 * in the ISR master error bit if we don't clear
	 * all the EIR bits. Otherwise the edge triggered
	 * IIR on i965/g4x wouldn't notice that an interrupt
	 * is still pending. Also some EIR bits can't be
	 * cleared except by handling the underlying error
	 * (or by a GPU reset) so we mask any bit that
	 * remains set.
	 */
	emr = I915_READ(EMR);
	I915_WRITE(EMR, 0xffffffff);
	I915_WRITE(EMR, emr | *eir_stuck);
}

static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
				   u32 eir, u32 eir_stuck)
{
	DRM_DEBUG("Master Error, EIR 0x%08x\n", eir);

	if (eir_stuck)
3623 3624
		drm_dbg(&dev_priv->drm, "EIR stuck: 0x%08x, masked\n",
			eir_stuck);
3625 3626
}

3627
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
C
Chris Wilson 已提交
3628
{
3629
	struct drm_i915_private *dev_priv = arg;
3630
	irqreturn_t ret = IRQ_NONE;
C
Chris Wilson 已提交
3631

3632 3633 3634
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

3635
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
3636
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3637

3638
	do {
3639
		u32 pipe_stats[I915_MAX_PIPES] = {};
3640
		u16 eir = 0, eir_stuck = 0;
3641
		u16 iir;
3642

3643
		iir = intel_uncore_read16(&dev_priv->uncore, GEN2_IIR);
3644 3645 3646 3647
		if (iir == 0)
			break;

		ret = IRQ_HANDLED;
C
Chris Wilson 已提交
3648

3649 3650 3651
		/* Call regardless, as some status bits might not be
		 * signalled in iir */
		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
C
Chris Wilson 已提交
3652

3653 3654 3655
		if (iir & I915_MASTER_ERROR_INTERRUPT)
			i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);

3656
		intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir);
C
Chris Wilson 已提交
3657 3658

		if (iir & I915_USER_INTERRUPT)
3659
			intel_engine_signal_breadcrumbs(dev_priv->engine[RCS0]);
C
Chris Wilson 已提交
3660

3661 3662
		if (iir & I915_MASTER_ERROR_INTERRUPT)
			i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
C
Chris Wilson 已提交
3663

3664 3665
		i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
	} while (0);
3666

3667
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
C
Chris Wilson 已提交
3668

3669
	return ret;
C
Chris Wilson 已提交
3670 3671
}

3672
static void i915_irq_reset(struct drm_i915_private *dev_priv)
3673
{
3674
	struct intel_uncore *uncore = &dev_priv->uncore;
3675

3676
	if (I915_HAS_HOTPLUG(dev_priv)) {
3677
		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3678 3679 3680
		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
	}

3681 3682
	i9xx_pipestat_irq_reset(dev_priv);

3683
	GEN3_IRQ_RESET(uncore, GEN2_);
3684 3685
}

3686
static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
3687
{
3688
	struct intel_uncore *uncore = &dev_priv->uncore;
3689
	u32 enable_mask;
3690

3691 3692
	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE |
			  I915_ERROR_MEMORY_REFRESH));
3693 3694 3695 3696 3697

	/* Unmask the interrupts that we always want on. */
	dev_priv->irq_mask =
		~(I915_ASLE_INTERRUPT |
		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3698 3699
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
		  I915_MASTER_ERROR_INTERRUPT);
3700 3701 3702 3703 3704

	enable_mask =
		I915_ASLE_INTERRUPT |
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3705
		I915_MASTER_ERROR_INTERRUPT |
3706 3707
		I915_USER_INTERRUPT;

3708
	if (I915_HAS_HOTPLUG(dev_priv)) {
3709 3710 3711 3712 3713 3714
		/* Enable in IER... */
		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
		/* and unmask in IMR */
		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
	}

3715
	GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
3716

3717 3718
	/* Interrupt setup is already guaranteed to be single-threaded, this is
	 * just to make the assert_spin_locked check happy. */
3719
	spin_lock_irq(&dev_priv->irq_lock);
3720 3721
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3722
	spin_unlock_irq(&dev_priv->irq_lock);
3723

3724
	i915_enable_asle_pipestat(dev_priv);
3725 3726
}

3727
static irqreturn_t i915_irq_handler(int irq, void *arg)
3728
{
3729
	struct drm_i915_private *dev_priv = arg;
3730
	irqreturn_t ret = IRQ_NONE;
3731

3732 3733 3734
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

3735
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
3736
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3737

3738
	do {
3739
		u32 pipe_stats[I915_MAX_PIPES] = {};
3740
		u32 eir = 0, eir_stuck = 0;
3741 3742
		u32 hotplug_status = 0;
		u32 iir;
3743

3744
		iir = I915_READ(GEN2_IIR);
3745 3746 3747 3748 3749 3750 3751 3752
		if (iir == 0)
			break;

		ret = IRQ_HANDLED;

		if (I915_HAS_HOTPLUG(dev_priv) &&
		    iir & I915_DISPLAY_PORT_INTERRUPT)
			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
3753

3754 3755 3756
		/* Call regardless, as some status bits might not be
		 * signalled in iir */
		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
3757

3758 3759 3760
		if (iir & I915_MASTER_ERROR_INTERRUPT)
			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);

3761
		I915_WRITE(GEN2_IIR, iir);
3762 3763

		if (iir & I915_USER_INTERRUPT)
3764
			intel_engine_signal_breadcrumbs(dev_priv->engine[RCS0]);
3765

3766 3767
		if (iir & I915_MASTER_ERROR_INTERRUPT)
			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
3768

3769 3770 3771 3772 3773
		if (hotplug_status)
			i9xx_hpd_irq_handler(dev_priv, hotplug_status);

		i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
	} while (0);
3774

3775
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3776

3777 3778 3779
	return ret;
}

3780
static void i965_irq_reset(struct drm_i915_private *dev_priv)
3781
{
3782
	struct intel_uncore *uncore = &dev_priv->uncore;
3783

3784
	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3785
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3786

3787 3788
	i9xx_pipestat_irq_reset(dev_priv);

3789
	GEN3_IRQ_RESET(uncore, GEN2_);
3790 3791
}

3792
static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
3793
{
3794
	struct intel_uncore *uncore = &dev_priv->uncore;
3795
	u32 enable_mask;
3796 3797
	u32 error_mask;

3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812
	/*
	 * Enable some error detection, note the instruction error mask
	 * bit is reserved, so we leave it masked.
	 */
	if (IS_G4X(dev_priv)) {
		error_mask = ~(GM45_ERROR_PAGE_TABLE |
			       GM45_ERROR_MEM_PRIV |
			       GM45_ERROR_CP_PRIV |
			       I915_ERROR_MEMORY_REFRESH);
	} else {
		error_mask = ~(I915_ERROR_PAGE_TABLE |
			       I915_ERROR_MEMORY_REFRESH);
	}
	I915_WRITE(EMR, error_mask);

3813
	/* Unmask the interrupts that we always want on. */
3814 3815 3816 3817 3818
	dev_priv->irq_mask =
		~(I915_ASLE_INTERRUPT |
		  I915_DISPLAY_PORT_INTERRUPT |
		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3819
		  I915_MASTER_ERROR_INTERRUPT);
3820

3821 3822 3823 3824 3825
	enable_mask =
		I915_ASLE_INTERRUPT |
		I915_DISPLAY_PORT_INTERRUPT |
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3826
		I915_MASTER_ERROR_INTERRUPT |
3827
		I915_USER_INTERRUPT;
3828

3829
	if (IS_G4X(dev_priv))
3830
		enable_mask |= I915_BSD_USER_INTERRUPT;
3831

3832
	GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
3833

3834 3835
	/* Interrupt setup is already guaranteed to be single-threaded, this is
	 * just to make the assert_spin_locked check happy. */
3836
	spin_lock_irq(&dev_priv->irq_lock);
3837 3838 3839
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3840
	spin_unlock_irq(&dev_priv->irq_lock);
3841

3842
	i915_enable_asle_pipestat(dev_priv);
3843 3844
}

3845
static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
3846 3847 3848
{
	u32 hotplug_en;

3849
	lockdep_assert_held(&dev_priv->irq_lock);
3850

3851 3852
	/* Note HDMI and DP share hotplug bits */
	/* enable bits are the same for all generations */
3853
	hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
3854 3855 3856 3857
	/* Programming the CRT detection parameters tends
	   to generate a spurious hotplug event about three
	   seconds later.  So just do it once.
	*/
3858
	if (IS_G4X(dev_priv))
3859 3860 3861 3862
		hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
	hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;

	/* Ignore TV since it's buggy */
3863
	i915_hotplug_interrupt_update_locked(dev_priv,
3864 3865 3866 3867
					     HOTPLUG_INT_EN_MASK |
					     CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
					     CRT_HOTPLUG_ACTIVATION_PERIOD_64,
					     hotplug_en);
3868 3869
}

3870
static irqreturn_t i965_irq_handler(int irq, void *arg)
3871
{
3872
	struct drm_i915_private *dev_priv = arg;
3873
	irqreturn_t ret = IRQ_NONE;
3874

3875 3876 3877
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

3878
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
3879
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3880

3881
	do {
3882
		u32 pipe_stats[I915_MAX_PIPES] = {};
3883
		u32 eir = 0, eir_stuck = 0;
3884 3885
		u32 hotplug_status = 0;
		u32 iir;
3886

3887
		iir = I915_READ(GEN2_IIR);
3888
		if (iir == 0)
3889 3890 3891 3892
			break;

		ret = IRQ_HANDLED;

3893 3894 3895 3896 3897 3898
		if (iir & I915_DISPLAY_PORT_INTERRUPT)
			hotplug_status = i9xx_hpd_irq_ack(dev_priv);

		/* Call regardless, as some status bits might not be
		 * signalled in iir */
		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
3899

3900 3901 3902
		if (iir & I915_MASTER_ERROR_INTERRUPT)
			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);

3903
		I915_WRITE(GEN2_IIR, iir);
3904 3905

		if (iir & I915_USER_INTERRUPT)
3906
			intel_engine_signal_breadcrumbs(dev_priv->engine[RCS0]);
3907

3908
		if (iir & I915_BSD_USER_INTERRUPT)
3909
			intel_engine_signal_breadcrumbs(dev_priv->engine[VCS0]);
3910

3911 3912
		if (iir & I915_MASTER_ERROR_INTERRUPT)
			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
3913

3914 3915 3916 3917 3918
		if (hotplug_status)
			i9xx_hpd_irq_handler(dev_priv, hotplug_status);

		i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
	} while (0);
3919

3920
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3921

3922 3923 3924
	return ret;
}

3925 3926 3927 3928 3929 3930 3931
/**
 * intel_irq_init - initializes irq support
 * @dev_priv: i915 device instance
 *
 * This function initializes all the irq support including work items, timers
 * and all the vtables. It does not setup the interrupt itself though.
 */
3932
void intel_irq_init(struct drm_i915_private *dev_priv)
3933
{
3934
	struct drm_device *dev = &dev_priv->drm;
3935
	int i;
3936

3937 3938
	intel_hpd_init_work(dev_priv);

3939
	INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work);
3940 3941
	for (i = 0; i < MAX_L3_SLICES; ++i)
		dev_priv->l3_parity.remap_info[i] = NULL;
3942

3943
	/* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */
3944
	if (HAS_GT_UC(dev_priv) && INTEL_GEN(dev_priv) < 11)
3945
		dev_priv->gt.pm_guc_events = GUC_INTR_GUC2HOST << 16;
3946

3947
	dev->vblank_disable_immediate = true;
3948

3949 3950 3951 3952 3953 3954 3955 3956 3957 3958
	/* Most platforms treat the display irq block as an always-on
	 * power domain. vlv/chv can disable it at runtime and need
	 * special care to avoid writing any of the display block registers
	 * outside of the power domain. We defer setting up the display irqs
	 * in this case to the runtime pm.
	 */
	dev_priv->display_irqs_enabled = true;
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
		dev_priv->display_irqs_enabled = false;

L
Lyude 已提交
3959
	dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
3960 3961 3962 3963 3964 3965 3966
	/* If we have MST support, we want to avoid doing short HPD IRQ storm
	 * detection, as short HPD storms will occur as a natural part of
	 * sideband messaging with MST.
	 * On older platforms however, IRQ storms can occur with both long and
	 * short pulses, as seen on some G4x systems.
	 */
	dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
L
Lyude 已提交
3967

3968 3969 3970 3971
	if (HAS_GMCH(dev_priv)) {
		if (I915_HAS_HOTPLUG(dev_priv))
			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
	} else {
M
Matt Roper 已提交
3972 3973 3974
		if (HAS_PCH_JSP(dev_priv))
			dev_priv->display.hpd_irq_setup = jsp_hpd_irq_setup;
		else if (HAS_PCH_MCC(dev_priv))
3975 3976
			dev_priv->display.hpd_irq_setup = mcc_hpd_irq_setup;
		else if (INTEL_GEN(dev_priv) >= 11)
3977 3978
			dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
		else if (IS_GEN9_LP(dev_priv))
3979
			dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
3980
		else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
3981 3982
			dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
		else
3983
			dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
3984 3985
	}
}
3986

3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000
/**
 * intel_irq_fini - deinitializes IRQ support
 * @i915: i915 device instance
 *
 * This function deinitializes all the IRQ support.
 */
void intel_irq_fini(struct drm_i915_private *i915)
{
	int i;

	for (i = 0; i < MAX_L3_SLICES; ++i)
		kfree(i915->l3_parity.remap_info[i]);
}

4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019
static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
{
	if (HAS_GMCH(dev_priv)) {
		if (IS_CHERRYVIEW(dev_priv))
			return cherryview_irq_handler;
		else if (IS_VALLEYVIEW(dev_priv))
			return valleyview_irq_handler;
		else if (IS_GEN(dev_priv, 4))
			return i965_irq_handler;
		else if (IS_GEN(dev_priv, 3))
			return i915_irq_handler;
		else
			return i8xx_irq_handler;
	} else {
		if (INTEL_GEN(dev_priv) >= 11)
			return gen11_irq_handler;
		else if (INTEL_GEN(dev_priv) >= 8)
			return gen8_irq_handler;
		else
4020
			return ilk_irq_handler;
4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042
	}
}

static void intel_irq_reset(struct drm_i915_private *dev_priv)
{
	if (HAS_GMCH(dev_priv)) {
		if (IS_CHERRYVIEW(dev_priv))
			cherryview_irq_reset(dev_priv);
		else if (IS_VALLEYVIEW(dev_priv))
			valleyview_irq_reset(dev_priv);
		else if (IS_GEN(dev_priv, 4))
			i965_irq_reset(dev_priv);
		else if (IS_GEN(dev_priv, 3))
			i915_irq_reset(dev_priv);
		else
			i8xx_irq_reset(dev_priv);
	} else {
		if (INTEL_GEN(dev_priv) >= 11)
			gen11_irq_reset(dev_priv);
		else if (INTEL_GEN(dev_priv) >= 8)
			gen8_irq_reset(dev_priv);
		else
4043
			ilk_irq_reset(dev_priv);
4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065
	}
}

static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
{
	if (HAS_GMCH(dev_priv)) {
		if (IS_CHERRYVIEW(dev_priv))
			cherryview_irq_postinstall(dev_priv);
		else if (IS_VALLEYVIEW(dev_priv))
			valleyview_irq_postinstall(dev_priv);
		else if (IS_GEN(dev_priv, 4))
			i965_irq_postinstall(dev_priv);
		else if (IS_GEN(dev_priv, 3))
			i915_irq_postinstall(dev_priv);
		else
			i8xx_irq_postinstall(dev_priv);
	} else {
		if (INTEL_GEN(dev_priv) >= 11)
			gen11_irq_postinstall(dev_priv);
		else if (INTEL_GEN(dev_priv) >= 8)
			gen8_irq_postinstall(dev_priv);
		else
4066
			ilk_irq_postinstall(dev_priv);
4067 4068 4069
	}
}

4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080
/**
 * intel_irq_install - enables the hardware interrupt
 * @dev_priv: i915 device instance
 *
 * This function enables the hardware interrupt handling, but leaves the hotplug
 * handling still disabled. It is called after intel_irq_init().
 *
 * In the driver load and resume code we need working interrupts in a few places
 * but don't want to deal with the hassle of concurrent probe and hotplug
 * workers. Hence the split into this two-stage approach.
 */
4081 4082
int intel_irq_install(struct drm_i915_private *dev_priv)
{
4083 4084 4085
	int irq = dev_priv->drm.pdev->irq;
	int ret;

4086 4087 4088 4089 4090
	/*
	 * We enable some interrupt sources in our postinstall hooks, so mark
	 * interrupts as enabled _before_ actually enabling them to avoid
	 * special cases in our ordering checks.
	 */
4091
	dev_priv->runtime_pm.irqs_enabled = true;
4092

4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106
	dev_priv->drm.irq_enabled = true;

	intel_irq_reset(dev_priv);

	ret = request_irq(irq, intel_irq_handler(dev_priv),
			  IRQF_SHARED, DRIVER_NAME, dev_priv);
	if (ret < 0) {
		dev_priv->drm.irq_enabled = false;
		return ret;
	}

	intel_irq_postinstall(dev_priv);

	return ret;
4107 4108
}

4109 4110 4111 4112 4113 4114 4115
/**
 * intel_irq_uninstall - finilizes all irq handling
 * @dev_priv: i915 device instance
 *
 * This stops interrupt and hotplug handling and unregisters and frees all
 * resources acquired in the init functions.
 */
4116 4117
void intel_irq_uninstall(struct drm_i915_private *dev_priv)
{
4118 4119 4120
	int irq = dev_priv->drm.pdev->irq;

	/*
4121 4122 4123 4124
	 * FIXME we can get called twice during driver probe
	 * error handling as well as during driver remove due to
	 * intel_modeset_driver_remove() calling us out of sequence.
	 * Would be nice if it didn't do that...
4125 4126 4127 4128 4129 4130 4131 4132 4133 4134
	 */
	if (!dev_priv->drm.irq_enabled)
		return;

	dev_priv->drm.irq_enabled = false;

	intel_irq_reset(dev_priv);

	free_irq(irq, dev_priv);

4135
	intel_hpd_cancel_work(dev_priv);
4136
	dev_priv->runtime_pm.irqs_enabled = false;
4137 4138
}

4139 4140 4141 4142 4143 4144 4145
/**
 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
 * @dev_priv: i915 device instance
 *
 * This function is used to disable interrupts at runtime, both in the runtime
 * pm and the system suspend/resume code.
 */
4146
void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4147
{
4148
	intel_irq_reset(dev_priv);
4149
	dev_priv->runtime_pm.irqs_enabled = false;
4150
	intel_synchronize_irq(dev_priv);
4151 4152
}

4153 4154 4155 4156 4157 4158 4159
/**
 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
 * @dev_priv: i915 device instance
 *
 * This function is used to enable interrupts at runtime, both in the runtime
 * pm and the system suspend/resume code.
 */
4160
void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4161
{
4162
	dev_priv->runtime_pm.irqs_enabled = true;
4163 4164
	intel_irq_reset(dev_priv);
	intel_irq_postinstall(dev_priv);
4165
}
4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179

bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
{
	/*
	 * We only use drm_irq_uninstall() at unload and VT switch, so
	 * this is the only thing we need to check.
	 */
	return dev_priv->runtime_pm.irqs_enabled;
}

void intel_synchronize_irq(struct drm_i915_private *i915)
{
	synchronize_irq(i915->drm.pdev->irq);
}