i915_irq.c 114.5 KB
Newer Older
D
Dave Airlie 已提交
1
/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
L
Linus Torvalds 已提交
2
 */
D
Dave Airlie 已提交
3
/*
L
Linus Torvalds 已提交
4 5
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
 * All Rights Reserved.
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
D
Dave Airlie 已提交
27
 */
L
Linus Torvalds 已提交
28

29 30
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

31
#include <linux/circ_buf.h>
32 33 34
#include <linux/slab.h>
#include <linux/sysrq.h>

35
#include <drm/drm_drv.h>
36 37
#include <drm/drm_irq.h>

38
#include "display/intel_display_types.h"
39 40 41 42 43
#include "display/intel_fifo_underrun.h"
#include "display/intel_hotplug.h"
#include "display/intel_lpe_audio.h"
#include "display/intel_psr.h"

44
#include "gt/intel_gt.h"
45
#include "gt/intel_gt_irq.h"
46
#include "gt/intel_gt_pm_irq.h"
47
#include "gt/intel_rps.h"
48

L
Linus Torvalds 已提交
49
#include "i915_drv.h"
50
#include "i915_irq.h"
C
Chris Wilson 已提交
51
#include "i915_trace.h"
52
#include "intel_pm.h"
L
Linus Torvalds 已提交
53

54 55 56 57 58 59 60 61
/**
 * DOC: interrupt handling
 *
 * These functions provide the basic support for enabling and disabling the
 * interrupt handling support. There's a lot more functionality in i915_irq.c
 * and related files, but that will be described in separate chapters.
 */

62 63
typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val);

64 65 66 67
static const u32 hpd_ilk[HPD_NUM_PINS] = {
	[HPD_PORT_A] = DE_DP_A_HOTPLUG,
};

68 69 70 71
static const u32 hpd_ivb[HPD_NUM_PINS] = {
	[HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
};

72 73 74 75
static const u32 hpd_bdw[HPD_NUM_PINS] = {
	[HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
};

76
static const u32 hpd_ibx[HPD_NUM_PINS] = {
77 78 79 80
	[HPD_CRT] = SDE_CRT_HOTPLUG,
	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
81
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG,
82 83
};

84
static const u32 hpd_cpt[HPD_NUM_PINS] = {
85
	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
86
	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
87 88
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
89
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
90 91
};

X
Xiong Zhang 已提交
92
static const u32 hpd_spt[HPD_NUM_PINS] = {
93
	[HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
X
Xiong Zhang 已提交
94 95 96
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
97
	[HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT,
X
Xiong Zhang 已提交
98 99
};

100
static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
101 102 103 104 105
	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
106
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN,
107 108
};

109
static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
110 111 112 113 114
	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
115
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
116 117
};

118
static const u32 hpd_status_i915[HPD_NUM_PINS] = {
119 120 121 122 123
	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
124
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
125 126
};

127 128
/* BXT hpd list */
static const u32 hpd_bxt[HPD_NUM_PINS] = {
129
	[HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
130
	[HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
131
	[HPD_PORT_C] = BXT_DE_PORT_HP_DDIC,
132 133
};

134 135 136 137
static const u32 hpd_gen11[HPD_NUM_PINS] = {
	[HPD_PORT_C] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG,
	[HPD_PORT_D] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG,
	[HPD_PORT_E] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG,
138
	[HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG,
139 140
};

141 142 143 144 145 146
static const u32 hpd_gen12[HPD_NUM_PINS] = {
	[HPD_PORT_D] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG,
	[HPD_PORT_E] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG,
	[HPD_PORT_F] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG,
	[HPD_PORT_G] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG,
	[HPD_PORT_H] = GEN12_TC5_HOTPLUG | GEN12_TBT5_HOTPLUG,
147
	[HPD_PORT_I] = GEN12_TC6_HOTPLUG | GEN12_TBT6_HOTPLUG,
148 149
};

150
static const u32 hpd_icp[HPD_NUM_PINS] = {
151 152 153 154 155 156
	[HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(PORT_A),
	[HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(PORT_B),
	[HPD_PORT_C] = SDE_TC_HOTPLUG_ICP(PORT_TC1),
	[HPD_PORT_D] = SDE_TC_HOTPLUG_ICP(PORT_TC2),
	[HPD_PORT_E] = SDE_TC_HOTPLUG_ICP(PORT_TC3),
	[HPD_PORT_F] = SDE_TC_HOTPLUG_ICP(PORT_TC4),
157 158
};

159
static const u32 hpd_tgp[HPD_NUM_PINS] = {
160 161 162 163 164 165 166 167 168
	[HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(PORT_A),
	[HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(PORT_B),
	[HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(PORT_C),
	[HPD_PORT_D] = SDE_TC_HOTPLUG_ICP(PORT_TC1),
	[HPD_PORT_E] = SDE_TC_HOTPLUG_ICP(PORT_TC2),
	[HPD_PORT_F] = SDE_TC_HOTPLUG_ICP(PORT_TC3),
	[HPD_PORT_G] = SDE_TC_HOTPLUG_ICP(PORT_TC4),
	[HPD_PORT_H] = SDE_TC_HOTPLUG_ICP(PORT_TC5),
	[HPD_PORT_I] = SDE_TC_HOTPLUG_ICP(PORT_TC6),
169 170
};

171 172 173 174 175 176 177 178
static void
intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
{
	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);

	drm_crtc_handle_vblank(&crtc->base);
}

179 180
void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
		    i915_reg_t iir, i915_reg_t ier)
181
{
182 183
	intel_uncore_write(uncore, imr, 0xffffffff);
	intel_uncore_posting_read(uncore, imr);
184

185
	intel_uncore_write(uncore, ier, 0);
186 187

	/* IIR can theoretically queue up two events. Be paranoid. */
188 189 190 191
	intel_uncore_write(uncore, iir, 0xffffffff);
	intel_uncore_posting_read(uncore, iir);
	intel_uncore_write(uncore, iir, 0xffffffff);
	intel_uncore_posting_read(uncore, iir);
192 193
}

194
void gen2_irq_reset(struct intel_uncore *uncore)
195
{
196 197
	intel_uncore_write16(uncore, GEN2_IMR, 0xffff);
	intel_uncore_posting_read16(uncore, GEN2_IMR);
198

199
	intel_uncore_write16(uncore, GEN2_IER, 0);
200 201

	/* IIR can theoretically queue up two events. Be paranoid. */
202 203 204 205
	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
	intel_uncore_posting_read16(uncore, GEN2_IIR);
	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
	intel_uncore_posting_read16(uncore, GEN2_IIR);
206 207
}

208 209 210
/*
 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
 */
211
static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
212
{
213
	u32 val = intel_uncore_read(uncore, reg);
214 215 216 217

	if (val == 0)
		return;

218 219 220
	drm_WARN(&uncore->i915->drm, 1,
		 "Interrupt register 0x%x is not zero: 0x%08x\n",
		 i915_mmio_reg_offset(reg), val);
221 222 223 224
	intel_uncore_write(uncore, reg, 0xffffffff);
	intel_uncore_posting_read(uncore, reg);
	intel_uncore_write(uncore, reg, 0xffffffff);
	intel_uncore_posting_read(uncore, reg);
225
}
226

227
static void gen2_assert_iir_is_zero(struct intel_uncore *uncore)
228
{
229
	u16 val = intel_uncore_read16(uncore, GEN2_IIR);
230 231 232 233

	if (val == 0)
		return;

234 235 236
	drm_WARN(&uncore->i915->drm, 1,
		 "Interrupt register 0x%x is not zero: 0x%08x\n",
		 i915_mmio_reg_offset(GEN2_IIR), val);
237 238 239 240
	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
	intel_uncore_posting_read16(uncore, GEN2_IIR);
	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
	intel_uncore_posting_read16(uncore, GEN2_IIR);
241 242
}

243 244 245 246
void gen3_irq_init(struct intel_uncore *uncore,
		   i915_reg_t imr, u32 imr_val,
		   i915_reg_t ier, u32 ier_val,
		   i915_reg_t iir)
247
{
248
	gen3_assert_iir_is_zero(uncore, iir);
249

250 251 252
	intel_uncore_write(uncore, ier, ier_val);
	intel_uncore_write(uncore, imr, imr_val);
	intel_uncore_posting_read(uncore, imr);
253 254
}

255 256
void gen2_irq_init(struct intel_uncore *uncore,
		   u32 imr_val, u32 ier_val)
257
{
258
	gen2_assert_iir_is_zero(uncore);
259

260 261 262
	intel_uncore_write16(uncore, GEN2_IER, ier_val);
	intel_uncore_write16(uncore, GEN2_IMR, imr_val);
	intel_uncore_posting_read16(uncore, GEN2_IMR);
263 264
}

265 266 267
/* For display hotplug interrupt */
static inline void
i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
268 269
				     u32 mask,
				     u32 bits)
270
{
271
	u32 val;
272

273
	lockdep_assert_held(&dev_priv->irq_lock);
274
	drm_WARN_ON(&dev_priv->drm, bits & ~mask);
275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294

	val = I915_READ(PORT_HOTPLUG_EN);
	val &= ~mask;
	val |= bits;
	I915_WRITE(PORT_HOTPLUG_EN, val);
}

/**
 * i915_hotplug_interrupt_update - update hotplug interrupt enable
 * @dev_priv: driver private
 * @mask: bits to update
 * @bits: bits to enable
 * NOTE: the HPD enable bits are modified both inside and outside
 * of an interrupt context. To avoid that read-modify-write cycles
 * interfer, these bits are protected by a spinlock. Since this
 * function is usually not called from a context where the lock is
 * held already, this function acquires the lock itself. A non-locking
 * version is also available.
 */
void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
295 296
				   u32 mask,
				   u32 bits)
297 298 299 300 301 302
{
	spin_lock_irq(&dev_priv->irq_lock);
	i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
	spin_unlock_irq(&dev_priv->irq_lock);
}

303 304 305 306 307 308
/**
 * ilk_update_display_irq - update DEIMR
 * @dev_priv: driver private
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
309
void ilk_update_display_irq(struct drm_i915_private *dev_priv,
310 311
			    u32 interrupt_mask,
			    u32 enabled_irq_mask)
312
{
313
	u32 new_val;
314

315
	lockdep_assert_held(&dev_priv->irq_lock);
316

317
	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
318

319
	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
320 321
		return;

322 323 324 325 326 327
	new_val = dev_priv->irq_mask;
	new_val &= ~interrupt_mask;
	new_val |= (~enabled_irq_mask & interrupt_mask);

	if (new_val != dev_priv->irq_mask) {
		dev_priv->irq_mask = new_val;
328
		I915_WRITE(DEIMR, dev_priv->irq_mask);
329
		POSTING_READ(DEIMR);
330 331 332
	}
}

333
/**
334 335 336 337 338
 * bdw_update_port_irq - update DE port interrupt
 * @dev_priv: driver private
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
339
static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
340 341
				u32 interrupt_mask,
				u32 enabled_irq_mask)
342
{
343 344
	u32 new_val;
	u32 old_val;
345

346
	lockdep_assert_held(&dev_priv->irq_lock);
347

348
	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
349

350
	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
351 352 353 354 355 356 357 358 359 360 361 362 363 364
		return;

	old_val = I915_READ(GEN8_DE_PORT_IMR);

	new_val = old_val;
	new_val &= ~interrupt_mask;
	new_val |= (~enabled_irq_mask & interrupt_mask);

	if (new_val != old_val) {
		I915_WRITE(GEN8_DE_PORT_IMR, new_val);
		POSTING_READ(GEN8_DE_PORT_IMR);
	}
}

365 366 367 368 369 370 371 372 373
/**
 * bdw_update_pipe_irq - update DE pipe interrupt
 * @dev_priv: driver private
 * @pipe: pipe whose interrupt to update
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
			 enum pipe pipe,
374 375
			 u32 interrupt_mask,
			 u32 enabled_irq_mask)
376
{
377
	u32 new_val;
378

379
	lockdep_assert_held(&dev_priv->irq_lock);
380

381
	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
382

383
	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
384 385 386 387 388 389 390 391 392 393 394 395 396
		return;

	new_val = dev_priv->de_irq_mask[pipe];
	new_val &= ~interrupt_mask;
	new_val |= (~enabled_irq_mask & interrupt_mask);

	if (new_val != dev_priv->de_irq_mask[pipe]) {
		dev_priv->de_irq_mask[pipe] = new_val;
		I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
		POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
	}
}

397 398 399 400 401 402
/**
 * ibx_display_interrupt_update - update SDEIMR
 * @dev_priv: driver private
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
403
void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
404 405
				  u32 interrupt_mask,
				  u32 enabled_irq_mask)
406
{
407
	u32 sdeimr = I915_READ(SDEIMR);
408 409 410
	sdeimr &= ~interrupt_mask;
	sdeimr |= (~enabled_irq_mask & interrupt_mask);

411
	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
412

413
	lockdep_assert_held(&dev_priv->irq_lock);
414

415
	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
416 417
		return;

418 419 420
	I915_WRITE(SDEIMR, sdeimr);
	POSTING_READ(SDEIMR);
}
421

422 423
u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
			      enum pipe pipe)
424
{
425 426
	u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
	u32 enable_mask = status_mask << 16;
427

428
	lockdep_assert_held(&dev_priv->irq_lock);
429

430 431
	if (INTEL_GEN(dev_priv) < 5)
		goto out;
432 433

	/*
434 435
	 * On pipe A we don't support the PSR interrupt yet,
	 * on pipe B and C the same bit MBZ.
436
	 */
437 438
	if (drm_WARN_ON_ONCE(&dev_priv->drm,
			     status_mask & PIPE_A_PSR_STATUS_VLV))
439
		return 0;
440 441 442 443
	/*
	 * On pipe B and C we don't support the PSR interrupt yet, on pipe
	 * A the same bit is for perf counters which we don't use either.
	 */
444 445
	if (drm_WARN_ON_ONCE(&dev_priv->drm,
			     status_mask & PIPE_B_PSR_STATUS_VLV))
446
		return 0;
447 448 449 450 451 452 453 454 455

	enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
			 SPRITE0_FLIP_DONE_INT_EN_VLV |
			 SPRITE1_FLIP_DONE_INT_EN_VLV);
	if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
		enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
	if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
		enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;

456
out:
457 458 459 460 461
	drm_WARN_ONCE(&dev_priv->drm,
		      enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
		      pipe_name(pipe), enable_mask, status_mask);
462

463 464 465
	return enable_mask;
}

466 467
void i915_enable_pipestat(struct drm_i915_private *dev_priv,
			  enum pipe pipe, u32 status_mask)
468
{
469
	i915_reg_t reg = PIPESTAT(pipe);
470 471
	u32 enable_mask;

472 473 474
	drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
		      "pipe %c: status_mask=0x%x\n",
		      pipe_name(pipe), status_mask);
475 476

	lockdep_assert_held(&dev_priv->irq_lock);
477
	drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
478 479 480 481 482 483 484 485 486

	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
		return;

	dev_priv->pipestat_irq_mask[pipe] |= status_mask;
	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);

	I915_WRITE(reg, enable_mask | status_mask);
	POSTING_READ(reg);
487 488
}

489 490
void i915_disable_pipestat(struct drm_i915_private *dev_priv,
			   enum pipe pipe, u32 status_mask)
491
{
492
	i915_reg_t reg = PIPESTAT(pipe);
493 494
	u32 enable_mask;

495 496 497
	drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
		      "pipe %c: status_mask=0x%x\n",
		      pipe_name(pipe), status_mask);
498 499

	lockdep_assert_held(&dev_priv->irq_lock);
500
	drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
501 502 503 504 505 506 507 508 509

	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
		return;

	dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);

	I915_WRITE(reg, enable_mask | status_mask);
	POSTING_READ(reg);
510 511
}

512 513 514 515 516 517 518 519
static bool i915_has_asle(struct drm_i915_private *dev_priv)
{
	if (!dev_priv->opregion.asle)
		return false;

	return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
}

520
/**
521
 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
522
 * @dev_priv: i915 device private
523
 */
524
static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
525
{
526
	if (!i915_has_asle(dev_priv))
527 528
		return;

529
	spin_lock_irq(&dev_priv->irq_lock);
530

531
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
532
	if (INTEL_GEN(dev_priv) >= 4)
533
		i915_enable_pipestat(dev_priv, PIPE_A,
534
				     PIPE_LEGACY_BLC_EVENT_STATUS);
535

536
	spin_unlock_irq(&dev_priv->irq_lock);
537 538
}

539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588
/*
 * This timing diagram depicts the video signal in and
 * around the vertical blanking period.
 *
 * Assumptions about the fictitious mode used in this example:
 *  vblank_start >= 3
 *  vsync_start = vblank_start + 1
 *  vsync_end = vblank_start + 2
 *  vtotal = vblank_start + 3
 *
 *           start of vblank:
 *           latch double buffered registers
 *           increment frame counter (ctg+)
 *           generate start of vblank interrupt (gen4+)
 *           |
 *           |          frame start:
 *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
 *           |          may be shifted forward 1-3 extra lines via PIPECONF
 *           |          |
 *           |          |  start of vsync:
 *           |          |  generate vsync interrupt
 *           |          |  |
 * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
 *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
 * ----va---> <-----------------vb--------------------> <--------va-------------
 *       |          |       <----vs----->                     |
 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
 *       |          |                                         |
 *       last visible pixel                                   first visible pixel
 *                  |                                         increment frame counter (gen3/4)
 *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
 *
 * x  = horizontal active
 * _  = horizontal blanking
 * hs = horizontal sync
 * va = vertical active
 * vb = vertical blanking
 * vs = vertical sync
 * vbs = vblank_start (number)
 *
 * Summary:
 * - most events happen at the start of horizontal sync
 * - frame start happens at the start of horizontal blank, 1-4 lines
 *   (depending on PIPECONF settings) after the start of vblank
 * - gen3/4 pixel and frame counter are synchronized with the start
 *   of horizontal active on the first line of vertical active
 */

589 590 591
/* Called from drm generic code, passed a 'crtc', which
 * we use as a pipe index
 */
592
u32 i915_get_vblank_counter(struct drm_crtc *crtc)
593
{
594 595
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
596
	const struct drm_display_mode *mode = &vblank->hwmode;
597
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
598
	i915_reg_t high_frame, low_frame;
599
	u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
600
	unsigned long irqflags;
601

602 603 604 605 606 607 608 609 610 611 612 613 614 615
	/*
	 * On i965gm TV output the frame counter only works up to
	 * the point when we enable the TV encoder. After that the
	 * frame counter ceases to work and reads zero. We need a
	 * vblank wait before enabling the TV encoder and so we
	 * have to enable vblank interrupts while the frame counter
	 * is still in a working state. However the core vblank code
	 * does not like us returning non-zero frame counter values
	 * when we've told it that we don't have a working frame
	 * counter. Thus we must stop non-zero values leaking out.
	 */
	if (!vblank->max_vblank_count)
		return 0;

616 617 618 619 620
	htotal = mode->crtc_htotal;
	hsync_start = mode->crtc_hsync_start;
	vbl_start = mode->crtc_vblank_start;
	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
		vbl_start = DIV_ROUND_UP(vbl_start, 2);
621

622 623 624 625 626 627
	/* Convert to pixel count */
	vbl_start *= htotal;

	/* Start of vblank event occurs at start of hsync */
	vbl_start -= htotal - hsync_start;

628 629
	high_frame = PIPEFRAME(pipe);
	low_frame = PIPEFRAMEPIXEL(pipe);
630

631 632
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);

633 634 635 636 637 638
	/*
	 * High & low register fields aren't synchronized, so make sure
	 * we get a low value that's stable across two reads of the high
	 * register.
	 */
	do {
639 640 641
		high1 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK;
		low   = intel_de_read_fw(dev_priv, low_frame);
		high2 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK;
642 643
	} while (high1 != high2);

644 645
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);

646
	high1 >>= PIPE_FRAME_HIGH_SHIFT;
647
	pixel = low & PIPE_PIXEL_MASK;
648
	low >>= PIPE_FRAME_LOW_SHIFT;
649 650 651 652 653 654

	/*
	 * The frame counter increments at beginning of active.
	 * Cook up a vblank counter by also checking the pixel
	 * counter against vblank start.
	 */
655
	return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
656 657
}

658
u32 g4x_get_vblank_counter(struct drm_crtc *crtc)
659
{
660 661
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
662

663
	return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
664 665
}

666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697
/*
 * On certain encoders on certain platforms, pipe
 * scanline register will not work to get the scanline,
 * since the timings are driven from the PORT or issues
 * with scanline register updates.
 * This function will use Framestamp and current
 * timestamp registers to calculate the scanline.
 */
static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
{
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
	struct drm_vblank_crtc *vblank =
		&crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
	const struct drm_display_mode *mode = &vblank->hwmode;
	u32 vblank_start = mode->crtc_vblank_start;
	u32 vtotal = mode->crtc_vtotal;
	u32 htotal = mode->crtc_htotal;
	u32 clock = mode->crtc_clock;
	u32 scanline, scan_prev_time, scan_curr_time, scan_post_time;

	/*
	 * To avoid the race condition where we might cross into the
	 * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR
	 * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR
	 * during the same frame.
	 */
	do {
		/*
		 * This field provides read back of the display
		 * pipe frame time stamp. The time stamp value
		 * is sampled at every start of vertical blank.
		 */
698 699
		scan_prev_time = intel_de_read_fw(dev_priv,
						  PIPE_FRMTMSTMP(crtc->pipe));
700 701 702 703 704

		/*
		 * The TIMESTAMP_CTR register has the current
		 * time stamp value.
		 */
705
		scan_curr_time = intel_de_read_fw(dev_priv, IVB_TIMESTAMP_CTR);
706

707 708
		scan_post_time = intel_de_read_fw(dev_priv,
						  PIPE_FRMTMSTMP(crtc->pipe));
709 710 711 712 713 714 715 716 717 718
	} while (scan_post_time != scan_prev_time);

	scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
					clock), 1000 * htotal);
	scanline = min(scanline, vtotal - 1);
	scanline = (scanline + vblank_start) % vtotal;

	return scanline;
}

719 720 721 722
/*
 * intel_de_read_fw(), only for fast reads of display block, no need for
 * forcewake etc.
 */
723 724 725
static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
{
	struct drm_device *dev = crtc->base.dev;
726
	struct drm_i915_private *dev_priv = to_i915(dev);
727 728
	const struct drm_display_mode *mode;
	struct drm_vblank_crtc *vblank;
729
	enum pipe pipe = crtc->pipe;
730
	int position, vtotal;
731

732 733 734
	if (!crtc->active)
		return -1;

735 736 737
	vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
	mode = &vblank->hwmode;

738 739 740
	if (mode->private_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP)
		return __intel_get_crtc_scanline_from_timestamp(crtc);

741
	vtotal = mode->crtc_vtotal;
742 743 744
	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
		vtotal /= 2;

745
	if (IS_GEN(dev_priv, 2))
746
		position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
747
	else
748
		position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
749

750 751 752 753 754 755 756 757 758 759 760 761
	/*
	 * On HSW, the DSL reg (0x70000) appears to return 0 if we
	 * read it just before the start of vblank.  So try it again
	 * so we don't accidentally end up spanning a vblank frame
	 * increment, causing the pipe_update_end() code to squak at us.
	 *
	 * The nature of this problem means we can't simply check the ISR
	 * bit and return the vblank start value; nor can we use the scanline
	 * debug register in the transcoder as it appears to have the same
	 * problem.  We may need to extend this to include other platforms,
	 * but so far testing only shows the problem on HSW.
	 */
762
	if (HAS_DDI(dev_priv) && !position) {
763 764 765 766
		int i, temp;

		for (i = 0; i < 100; i++) {
			udelay(1);
767
			temp = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
768 769 770 771 772 773 774
			if (temp != position) {
				position = temp;
				break;
			}
		}
	}

775
	/*
776 777
	 * See update_scanline_offset() for the details on the
	 * scanline_offset adjustment.
778
	 */
779
	return (position + crtc->scanline_offset) % vtotal;
780 781
}

782
bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int index,
783 784 785
			      bool in_vblank_irq, int *vpos, int *hpos,
			      ktime_t *stime, ktime_t *etime,
			      const struct drm_display_mode *mode)
786
{
787
	struct drm_i915_private *dev_priv = to_i915(dev);
788 789
	struct intel_crtc *crtc = to_intel_crtc(drm_crtc_from_index(dev, index));
	enum pipe pipe = crtc->pipe;
790
	int position;
791
	int vbl_start, vbl_end, hsync_start, htotal, vtotal;
792
	unsigned long irqflags;
793 794 795
	bool use_scanline_counter = INTEL_GEN(dev_priv) >= 5 ||
		IS_G4X(dev_priv) || IS_GEN(dev_priv, 2) ||
		mode->private_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER;
796

797
	if (drm_WARN_ON(&dev_priv->drm, !mode->crtc_clock)) {
798 799 800
		drm_dbg(&dev_priv->drm,
			"trying to get scanoutpos for disabled "
			"pipe %c\n", pipe_name(pipe));
801
		return false;
802 803
	}

804
	htotal = mode->crtc_htotal;
805
	hsync_start = mode->crtc_hsync_start;
806 807 808
	vtotal = mode->crtc_vtotal;
	vbl_start = mode->crtc_vblank_start;
	vbl_end = mode->crtc_vblank_end;
809

810 811 812 813 814 815
	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
		vbl_start = DIV_ROUND_UP(vbl_start, 2);
		vbl_end /= 2;
		vtotal /= 2;
	}

816 817 818 819 820 821
	/*
	 * Lock uncore.lock, as we will do multiple timing critical raw
	 * register reads, potentially with preemption disabled, so the
	 * following code must not block on uncore.lock.
	 */
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
822

823 824 825 826 827 828
	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */

	/* Get optional system timestamp before query. */
	if (stime)
		*stime = ktime_get();

829
	if (use_scanline_counter) {
830 831 832
		/* No obvious pixelcount register. Only query vertical
		 * scanout position from Display scan line register.
		 */
833
		position = __intel_get_crtc_scanline(crtc);
834 835 836 837 838
	} else {
		/* Have access to pixelcount since start of frame.
		 * We can split this into vertical and horizontal
		 * scanout position.
		 */
839
		position = (intel_de_read_fw(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
840

841 842 843 844
		/* convert to pixel counts */
		vbl_start *= htotal;
		vbl_end *= htotal;
		vtotal *= htotal;
845

846 847 848 849 850 851 852 853 854 855 856 857
		/*
		 * In interlaced modes, the pixel counter counts all pixels,
		 * so one field will have htotal more pixels. In order to avoid
		 * the reported position from jumping backwards when the pixel
		 * counter is beyond the length of the shorter field, just
		 * clamp the position the length of the shorter field. This
		 * matches how the scanline counter based position works since
		 * the scanline counter doesn't count the two half lines.
		 */
		if (position >= vtotal)
			position = vtotal - 1;

858 859 860 861 862 863 864 865 866 867
		/*
		 * Start of vblank interrupt is triggered at start of hsync,
		 * just prior to the first active line of vblank. However we
		 * consider lines to start at the leading edge of horizontal
		 * active. So, should we get here before we've crossed into
		 * the horizontal active of the first line in vblank, we would
		 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
		 * always add htotal-hsync_start to the current pixel position.
		 */
		position = (position + htotal - hsync_start) % vtotal;
868 869
	}

870 871 872 873 874 875 876 877
	/* Get optional system timestamp after query. */
	if (etime)
		*etime = ktime_get();

	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */

	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);

878 879 880 881 882 883 884 885 886 887
	/*
	 * While in vblank, position will be negative
	 * counting up towards 0 at vbl_end. And outside
	 * vblank, position will be positive counting
	 * up since vbl_end.
	 */
	if (position >= vbl_start)
		position -= vbl_end;
	else
		position += vtotal - vbl_end;
888

889
	if (use_scanline_counter) {
890 891 892 893 894 895
		*vpos = position;
		*hpos = 0;
	} else {
		*vpos = position / htotal;
		*hpos = position - (*vpos * htotal);
	}
896

897
	return true;
898 899
}

900 901
int intel_get_crtc_scanline(struct intel_crtc *crtc)
{
902
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
903 904 905 906 907 908 909 910 911 912
	unsigned long irqflags;
	int position;

	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
	position = __intel_get_crtc_scanline(crtc);
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);

	return position;
}

913
/**
914
 * ivb_parity_work - Workqueue called when a parity error interrupt
915 916 917 918 919 920 921
 * occurred.
 * @work: workqueue struct
 *
 * Doesn't actually do anything except notify userspace. As a consequence of
 * this event, userspace should try to remap the bad rows since statistically
 * it is likely the same row is more likely to go bad again.
 */
922
static void ivb_parity_work(struct work_struct *work)
923
{
924
	struct drm_i915_private *dev_priv =
925
		container_of(work, typeof(*dev_priv), l3_parity.error_work);
926
	struct intel_gt *gt = &dev_priv->gt;
927
	u32 error_status, row, bank, subbank;
928
	char *parity_event[6];
929 930
	u32 misccpctl;
	u8 slice = 0;
931 932 933 934 935

	/* We must turn off DOP level clock gating to access the L3 registers.
	 * In order to prevent a get/put style interface, acquire struct mutex
	 * any time we access those registers.
	 */
936
	mutex_lock(&dev_priv->drm.struct_mutex);
937

938
	/* If we've screwed up tracking, just let the interrupt fire again */
939
	if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice))
940 941
		goto out;

942 943 944 945
	misccpctl = I915_READ(GEN7_MISCCPCTL);
	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
	POSTING_READ(GEN7_MISCCPCTL);

946
	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
947
		i915_reg_t reg;
948

949
		slice--;
950 951
		if (drm_WARN_ON_ONCE(&dev_priv->drm,
				     slice >= NUM_L3_SLICES(dev_priv)))
952
			break;
953

954
		dev_priv->l3_parity.which_slice &= ~(1<<slice);
955

956
		reg = GEN7_L3CDERRST1(slice);
957

958 959 960 961 962 963 964 965 966 967 968 969 970 971 972
		error_status = I915_READ(reg);
		row = GEN7_PARITY_ERROR_ROW(error_status);
		bank = GEN7_PARITY_ERROR_BANK(error_status);
		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);

		I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
		POSTING_READ(reg);

		parity_event[0] = I915_L3_PARITY_UEVENT "=1";
		parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
		parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
		parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
		parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
		parity_event[5] = NULL;

973
		kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
974
				   KOBJ_CHANGE, parity_event);
975

976 977
		DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
			  slice, row, bank, subbank);
978

979 980 981 982 983
		kfree(parity_event[4]);
		kfree(parity_event[3]);
		kfree(parity_event[2]);
		kfree(parity_event[1]);
	}
984

985
	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
986

987
out:
988
	drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice);
989 990 991
	spin_lock_irq(&gt->irq_lock);
	gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv));
	spin_unlock_irq(&gt->irq_lock);
992

993
	mutex_unlock(&dev_priv->drm.struct_mutex);
994 995
}

996
static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
997
{
998 999
	switch (pin) {
	case HPD_PORT_C:
1000
		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1);
1001
	case HPD_PORT_D:
1002
		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2);
1003
	case HPD_PORT_E:
1004
		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3);
1005
	case HPD_PORT_F:
1006 1007 1008 1009 1010 1011
		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4);
	default:
		return false;
	}
}

1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031
static bool gen12_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
{
	switch (pin) {
	case HPD_PORT_D:
		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1);
	case HPD_PORT_E:
		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2);
	case HPD_PORT_F:
		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3);
	case HPD_PORT_G:
		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4);
	case HPD_PORT_H:
		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC5);
	case HPD_PORT_I:
		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC6);
	default:
		return false;
	}
}

1032
static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1033
{
1034 1035
	switch (pin) {
	case HPD_PORT_A:
1036
		return val & PORTA_HOTPLUG_LONG_DETECT;
1037
	case HPD_PORT_B:
1038
		return val & PORTB_HOTPLUG_LONG_DETECT;
1039
	case HPD_PORT_C:
1040 1041 1042 1043 1044 1045
		return val & PORTC_HOTPLUG_LONG_DETECT;
	default:
		return false;
	}
}

1046
static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1047
{
1048 1049
	switch (pin) {
	case HPD_PORT_A:
1050
		return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_A);
1051
	case HPD_PORT_B:
1052
		return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_B);
1053
	case HPD_PORT_C:
1054
		return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_C);
1055 1056 1057 1058 1059
	default:
		return false;
	}
}

1060
static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1061
{
1062 1063
	switch (pin) {
	case HPD_PORT_C:
1064
		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1);
1065
	case HPD_PORT_D:
1066
		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2);
1067
	case HPD_PORT_E:
1068
		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3);
1069
	case HPD_PORT_F:
1070 1071 1072 1073 1074 1075
		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4);
	default:
		return false;
	}
}

1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095
static bool tgp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
{
	switch (pin) {
	case HPD_PORT_D:
		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1);
	case HPD_PORT_E:
		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2);
	case HPD_PORT_F:
		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3);
	case HPD_PORT_G:
		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4);
	case HPD_PORT_H:
		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC5);
	case HPD_PORT_I:
		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC6);
	default:
		return false;
	}
}

1096
static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val)
1097
{
1098 1099
	switch (pin) {
	case HPD_PORT_E:
1100 1101 1102 1103 1104 1105
		return val & PORTE_HOTPLUG_LONG_DETECT;
	default:
		return false;
	}
}

1106
static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1107
{
1108 1109
	switch (pin) {
	case HPD_PORT_A:
1110
		return val & PORTA_HOTPLUG_LONG_DETECT;
1111
	case HPD_PORT_B:
1112
		return val & PORTB_HOTPLUG_LONG_DETECT;
1113
	case HPD_PORT_C:
1114
		return val & PORTC_HOTPLUG_LONG_DETECT;
1115
	case HPD_PORT_D:
1116 1117 1118 1119 1120 1121
		return val & PORTD_HOTPLUG_LONG_DETECT;
	default:
		return false;
	}
}

1122
static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1123
{
1124 1125
	switch (pin) {
	case HPD_PORT_A:
1126 1127 1128 1129 1130 1131
		return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
	default:
		return false;
	}
}

1132
static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1133
{
1134 1135
	switch (pin) {
	case HPD_PORT_B:
1136
		return val & PORTB_HOTPLUG_LONG_DETECT;
1137
	case HPD_PORT_C:
1138
		return val & PORTC_HOTPLUG_LONG_DETECT;
1139
	case HPD_PORT_D:
1140 1141 1142
		return val & PORTD_HOTPLUG_LONG_DETECT;
	default:
		return false;
1143 1144 1145
	}
}

1146
static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1147
{
1148 1149
	switch (pin) {
	case HPD_PORT_B:
1150
		return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1151
	case HPD_PORT_C:
1152
		return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1153
	case HPD_PORT_D:
1154 1155 1156
		return val & PORTD_HOTPLUG_INT_LONG_PULSE;
	default:
		return false;
1157 1158 1159
	}
}

1160 1161 1162 1163 1164 1165 1166
/*
 * Get a bit mask of pins that have triggered, and which ones may be long.
 * This can be called multiple times with the same masks to accumulate
 * hotplug detection results from several registers.
 *
 * Note that the caller is expected to zero out the masks initially.
 */
1167 1168 1169 1170
static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
			       u32 *pin_mask, u32 *long_mask,
			       u32 hotplug_trigger, u32 dig_hotplug_reg,
			       const u32 hpd[HPD_NUM_PINS],
1171
			       bool long_pulse_detect(enum hpd_pin pin, u32 val))
1172
{
1173
	enum hpd_pin pin;
1174

1175 1176
	BUILD_BUG_ON(BITS_PER_TYPE(*pin_mask) < HPD_NUM_PINS);

1177 1178
	for_each_hpd_pin(pin) {
		if ((hpd[pin] & hotplug_trigger) == 0)
1179
			continue;
1180

1181
		*pin_mask |= BIT(pin);
1182

1183
		if (long_pulse_detect(pin, dig_hotplug_reg))
1184
			*long_mask |= BIT(pin);
1185 1186
	}

1187 1188 1189
	drm_dbg(&dev_priv->drm,
		"hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
		hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
1190 1191 1192

}

1193
static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
1194
{
1195
	wake_up_all(&dev_priv->gmbus_wait_queue);
1196 1197
}

1198
static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
1199
{
1200
	wake_up_all(&dev_priv->gmbus_wait_queue);
1201 1202
}

1203
#if defined(CONFIG_DEBUG_FS)
1204 1205
static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
					 enum pipe pipe,
1206 1207 1208
					 u32 crc0, u32 crc1,
					 u32 crc2, u32 crc3,
					 u32 crc4)
1209
{
T
Tomeu Vizoso 已提交
1210
	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
1211
	struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
1212 1213 1214
	u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };

	trace_intel_pipe_crc(crtc, crcs);
1215

1216
	spin_lock(&pipe_crc->lock);
1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227
	/*
	 * For some not yet identified reason, the first CRC is
	 * bonkers. So let's just wait for the next vblank and read
	 * out the buggy result.
	 *
	 * On GEN8+ sometimes the second CRC is bonkers as well, so
	 * don't trust that one either.
	 */
	if (pipe_crc->skipped <= 0 ||
	    (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
		pipe_crc->skipped++;
T
Tomeu Vizoso 已提交
1228
		spin_unlock(&pipe_crc->lock);
1229
		return;
T
Tomeu Vizoso 已提交
1230
	}
1231 1232 1233 1234 1235
	spin_unlock(&pipe_crc->lock);

	drm_crtc_add_crc_entry(&crtc->base, true,
				drm_crtc_accurate_vblank_count(&crtc->base),
				crcs);
1236
}
1237 1238
#else
static inline void
1239 1240
display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
			     enum pipe pipe,
1241 1242 1243
			     u32 crc0, u32 crc1,
			     u32 crc2, u32 crc3,
			     u32 crc4) {}
1244 1245
#endif

1246

1247 1248
static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
				     enum pipe pipe)
D
Daniel Vetter 已提交
1249
{
1250
	display_pipe_crc_irq_handler(dev_priv, pipe,
1251 1252
				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
				     0, 0, 0, 0);
D
Daniel Vetter 已提交
1253 1254
}

1255 1256
static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
				     enum pipe pipe)
1257
{
1258
	display_pipe_crc_irq_handler(dev_priv, pipe,
1259 1260 1261 1262 1263
				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
				     I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
				     I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
				     I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
				     I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1264
}
1265

1266 1267
static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
				      enum pipe pipe)
1268
{
1269
	u32 res1, res2;
1270

1271
	if (INTEL_GEN(dev_priv) >= 3)
1272 1273 1274 1275
		res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
	else
		res1 = 0;

1276
	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1277 1278 1279
		res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
	else
		res2 = 0;
1280

1281
	display_pipe_crc_irq_handler(dev_priv, pipe,
1282 1283 1284 1285
				     I915_READ(PIPE_CRC_RES_RED(pipe)),
				     I915_READ(PIPE_CRC_RES_GREEN(pipe)),
				     I915_READ(PIPE_CRC_RES_BLUE(pipe)),
				     res1, res2);
1286
}
1287

1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300
static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
{
	enum pipe pipe;

	for_each_pipe(dev_priv, pipe) {
		I915_WRITE(PIPESTAT(pipe),
			   PIPESTAT_INT_STATUS_MASK |
			   PIPE_FIFO_UNDERRUN_STATUS);

		dev_priv->pipestat_irq_mask[pipe] = 0;
	}
}

1301 1302
static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
				  u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1303
{
1304
	enum pipe pipe;
1305

1306
	spin_lock(&dev_priv->irq_lock);
1307 1308 1309 1310 1311 1312

	if (!dev_priv->display_irqs_enabled) {
		spin_unlock(&dev_priv->irq_lock);
		return;
	}

1313
	for_each_pipe(dev_priv, pipe) {
1314
		i915_reg_t reg;
1315
		u32 status_mask, enable_mask, iir_bit = 0;
1316

1317 1318 1319 1320 1321 1322 1323
		/*
		 * PIPESTAT bits get signalled even when the interrupt is
		 * disabled with the mask bits, and some of the status bits do
		 * not generate interrupts at all (like the underrun bit). Hence
		 * we need to be careful that we only handle what we want to
		 * handle.
		 */
1324 1325

		/* fifo underruns are filterered in the underrun handler. */
1326
		status_mask = PIPE_FIFO_UNDERRUN_STATUS;
1327 1328

		switch (pipe) {
1329
		default:
1330 1331 1332 1333 1334 1335
		case PIPE_A:
			iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
			break;
		case PIPE_B:
			iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
			break;
1336 1337 1338
		case PIPE_C:
			iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
			break;
1339 1340
		}
		if (iir & iir_bit)
1341
			status_mask |= dev_priv->pipestat_irq_mask[pipe];
1342

1343
		if (!status_mask)
1344 1345 1346
			continue;

		reg = PIPESTAT(pipe);
1347 1348
		pipe_stats[pipe] = I915_READ(reg) & status_mask;
		enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
1349 1350 1351

		/*
		 * Clear the PIPE*STAT regs before the IIR
1352 1353 1354 1355 1356 1357
		 *
		 * Toggle the enable bits to make sure we get an
		 * edge in the ISR pipe event bit if we don't clear
		 * all the enabled status bits. Otherwise the edge
		 * triggered IIR on i965/g4x wouldn't notice that
		 * an interrupt is still pending.
1358
		 */
1359 1360 1361 1362
		if (pipe_stats[pipe]) {
			I915_WRITE(reg, pipe_stats[pipe]);
			I915_WRITE(reg, enable_mask);
		}
1363
	}
1364
	spin_unlock(&dev_priv->irq_lock);
1365 1366
}

1367 1368 1369 1370 1371 1372 1373
static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
				      u16 iir, u32 pipe_stats[I915_MAX_PIPES])
{
	enum pipe pipe;

	for_each_pipe(dev_priv, pipe) {
		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1374
			intel_handle_vblank(dev_priv, pipe);
1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391

		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
			i9xx_pipe_crc_irq_handler(dev_priv, pipe);

		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
	}
}

static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
{
	bool blc_event = false;
	enum pipe pipe;

	for_each_pipe(dev_priv, pipe) {
		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1392
			intel_handle_vblank(dev_priv, pipe);
1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415

		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
			blc_event = true;

		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
			i9xx_pipe_crc_irq_handler(dev_priv, pipe);

		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
	}

	if (blc_event || (iir & I915_ASLE_INTERRUPT))
		intel_opregion_asle_intr(dev_priv);
}

static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
{
	bool blc_event = false;
	enum pipe pipe;

	for_each_pipe(dev_priv, pipe) {
		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1416
			intel_handle_vblank(dev_priv, pipe);
1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434

		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
			blc_event = true;

		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
			i9xx_pipe_crc_irq_handler(dev_priv, pipe);

		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
	}

	if (blc_event || (iir & I915_ASLE_INTERRUPT))
		intel_opregion_asle_intr(dev_priv);

	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
		gmbus_irq_handler(dev_priv);
}

1435
static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1436 1437 1438
					    u32 pipe_stats[I915_MAX_PIPES])
{
	enum pipe pipe;
1439

1440
	for_each_pipe(dev_priv, pipe) {
1441
		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1442
			intel_handle_vblank(dev_priv, pipe);
1443 1444

		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1445
			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1446

1447 1448
		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1449 1450 1451
	}

	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1452
		gmbus_irq_handler(dev_priv);
1453 1454
}

1455
static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
1456
{
1457 1458 1459 1460 1461 1462 1463 1464 1465
	u32 hotplug_status = 0, hotplug_status_mask;
	int i;

	if (IS_G4X(dev_priv) ||
	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
		hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
			DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
	else
		hotplug_status_mask = HOTPLUG_INT_STATUS_I915;
1466

1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482
	/*
	 * We absolutely have to clear all the pending interrupt
	 * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port
	 * interrupt bit won't have an edge, and the i965/g4x
	 * edge triggered IIR will not notice that an interrupt
	 * is still pending. We can't use PORT_HOTPLUG_EN to
	 * guarantee the edge as the act of toggling the enable
	 * bits can itself generate a new hotplug interrupt :(
	 */
	for (i = 0; i < 10; i++) {
		u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask;

		if (tmp == 0)
			return hotplug_status;

		hotplug_status |= tmp;
1483
		I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1484 1485
	}

1486 1487 1488
	drm_WARN_ONCE(&dev_priv->drm, 1,
		      "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
		      I915_READ(PORT_HOTPLUG_STAT));
1489

1490 1491 1492
	return hotplug_status;
}

1493
static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1494 1495 1496
				 u32 hotplug_status)
{
	u32 pin_mask = 0, long_mask = 0;
1497

1498 1499
	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
	    IS_CHERRYVIEW(dev_priv)) {
1500
		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1501

1502
		if (hotplug_trigger) {
1503 1504 1505
			intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
					   hotplug_trigger, hotplug_trigger,
					   hpd_status_g4x,
1506 1507
					   i9xx_port_hotplug_long_detect);

1508
			intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1509
		}
1510 1511

		if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1512
			dp_aux_irq_handler(dev_priv);
1513 1514
	} else {
		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1515

1516
		if (hotplug_trigger) {
1517 1518 1519
			intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
					   hotplug_trigger, hotplug_trigger,
					   hpd_status_i915,
1520
					   i9xx_port_hotplug_long_detect);
1521
			intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1522
		}
1523
	}
1524 1525
}

1526
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
J
Jesse Barnes 已提交
1527
{
1528
	struct drm_i915_private *dev_priv = arg;
J
Jesse Barnes 已提交
1529 1530
	irqreturn_t ret = IRQ_NONE;

1531 1532 1533
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

1534
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
1535
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1536

1537
	do {
1538
		u32 iir, gt_iir, pm_iir;
1539
		u32 pipe_stats[I915_MAX_PIPES] = {};
1540
		u32 hotplug_status = 0;
1541
		u32 ier = 0;
1542

J
Jesse Barnes 已提交
1543 1544
		gt_iir = I915_READ(GTIIR);
		pm_iir = I915_READ(GEN6_PMIIR);
1545
		iir = I915_READ(VLV_IIR);
J
Jesse Barnes 已提交
1546 1547

		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1548
			break;
J
Jesse Barnes 已提交
1549 1550 1551

		ret = IRQ_HANDLED;

1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564
		/*
		 * Theory on interrupt generation, based on empirical evidence:
		 *
		 * x = ((VLV_IIR & VLV_IER) ||
		 *      (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
		 *       (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
		 *
		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
		 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
		 * guarantee the CPU interrupt will be raised again even if we
		 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
		 * bits this time around.
		 */
1565
		I915_WRITE(VLV_MASTER_IER, 0);
1566 1567
		ier = I915_READ(VLV_IER);
		I915_WRITE(VLV_IER, 0);
1568 1569 1570 1571 1572 1573

		if (gt_iir)
			I915_WRITE(GTIIR, gt_iir);
		if (pm_iir)
			I915_WRITE(GEN6_PMIIR, pm_iir);

1574
		if (iir & I915_DISPLAY_PORT_INTERRUPT)
1575
			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1576

1577 1578
		/* Call regardless, as some status bits might not be
		 * signalled in iir */
1579
		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1580

1581 1582 1583 1584
		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
			   I915_LPE_PIPE_B_INTERRUPT))
			intel_lpe_audio_irq_handler(dev_priv);

1585 1586 1587 1588 1589 1590
		/*
		 * VLV_IIR is single buffered, and reflects the level
		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
		 */
		if (iir)
			I915_WRITE(VLV_IIR, iir);
1591

1592
		I915_WRITE(VLV_IER, ier);
1593
		I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
1594

1595
		if (gt_iir)
1596
			gen6_gt_irq_handler(&dev_priv->gt, gt_iir);
1597
		if (pm_iir)
1598
			gen6_rps_irq_handler(&dev_priv->gt.rps, pm_iir);
1599

1600
		if (hotplug_status)
1601
			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1602

1603
		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1604
	} while (0);
J
Jesse Barnes 已提交
1605

1606
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1607

J
Jesse Barnes 已提交
1608 1609 1610
	return ret;
}

1611 1612
static irqreturn_t cherryview_irq_handler(int irq, void *arg)
{
1613
	struct drm_i915_private *dev_priv = arg;
1614 1615
	irqreturn_t ret = IRQ_NONE;

1616 1617 1618
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

1619
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
1620
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1621

1622
	do {
1623
		u32 master_ctl, iir;
1624
		u32 pipe_stats[I915_MAX_PIPES] = {};
1625
		u32 hotplug_status = 0;
1626 1627
		u32 ier = 0;

1628 1629
		master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
		iir = I915_READ(VLV_IIR);
1630

1631 1632
		if (master_ctl == 0 && iir == 0)
			break;
1633

1634 1635
		ret = IRQ_HANDLED;

1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648
		/*
		 * Theory on interrupt generation, based on empirical evidence:
		 *
		 * x = ((VLV_IIR & VLV_IER) ||
		 *      ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
		 *       (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
		 *
		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
		 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
		 * guarantee the CPU interrupt will be raised again even if we
		 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
		 * bits this time around.
		 */
1649
		I915_WRITE(GEN8_MASTER_IRQ, 0);
1650 1651
		ier = I915_READ(VLV_IER);
		I915_WRITE(VLV_IER, 0);
1652

1653
		gen8_gt_irq_handler(&dev_priv->gt, master_ctl);
1654

1655
		if (iir & I915_DISPLAY_PORT_INTERRUPT)
1656
			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1657

1658 1659
		/* Call regardless, as some status bits might not be
		 * signalled in iir */
1660
		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1661

1662 1663 1664 1665 1666
		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
			   I915_LPE_PIPE_B_INTERRUPT |
			   I915_LPE_PIPE_C_INTERRUPT))
			intel_lpe_audio_irq_handler(dev_priv);

1667 1668 1669 1670 1671 1672 1673
		/*
		 * VLV_IIR is single buffered, and reflects the level
		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
		 */
		if (iir)
			I915_WRITE(VLV_IIR, iir);

1674
		I915_WRITE(VLV_IER, ier);
1675
		I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
1676 1677

		if (hotplug_status)
1678
			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1679

1680
		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1681
	} while (0);
1682

1683
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1684

1685 1686 1687
	return ret;
}

1688 1689
static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
				u32 hotplug_trigger,
1690 1691 1692 1693
				const u32 hpd[HPD_NUM_PINS])
{
	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;

1694 1695 1696 1697 1698 1699
	/*
	 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
	 * unless we touch the hotplug register, even if hotplug_trigger is
	 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
	 * errors.
	 */
1700
	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1701 1702 1703 1704 1705 1706 1707 1708
	if (!hotplug_trigger) {
		u32 mask = PORTA_HOTPLUG_STATUS_MASK |
			PORTD_HOTPLUG_STATUS_MASK |
			PORTC_HOTPLUG_STATUS_MASK |
			PORTB_HOTPLUG_STATUS_MASK;
		dig_hotplug_reg &= ~mask;
	}

1709
	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1710 1711
	if (!hotplug_trigger)
		return;
1712

1713
	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
1714 1715 1716
			   dig_hotplug_reg, hpd,
			   pch_port_hotplug_long_detect);

1717
	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1718 1719
}

1720
static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1721
{
1722
	enum pipe pipe;
1723
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1724

1725
	ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
1726

1727 1728 1729
	if (pch_iir & SDE_AUDIO_POWER_MASK) {
		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
			       SDE_AUDIO_POWER_SHIFT);
1730 1731
		drm_dbg(&dev_priv->drm, "PCH audio power change on port %d\n",
			port_name(port));
1732
	}
1733

1734
	if (pch_iir & SDE_AUX_MASK)
1735
		dp_aux_irq_handler(dev_priv);
1736

1737
	if (pch_iir & SDE_GMBUS)
1738
		gmbus_irq_handler(dev_priv);
1739 1740

	if (pch_iir & SDE_AUDIO_HDCP_MASK)
1741
		drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n");
1742 1743

	if (pch_iir & SDE_AUDIO_TRANS_MASK)
1744
		drm_dbg(&dev_priv->drm, "PCH transcoder audio interrupt\n");
1745 1746

	if (pch_iir & SDE_POISON)
1747
		drm_err(&dev_priv->drm, "PCH poison interrupt\n");
1748

1749
	if (pch_iir & SDE_FDI_MASK) {
1750
		for_each_pipe(dev_priv, pipe)
1751 1752 1753
			drm_dbg(&dev_priv->drm, "  pipe %c FDI IIR: 0x%08x\n",
				pipe_name(pipe),
				I915_READ(FDI_RX_IIR(pipe)));
1754
	}
1755 1756

	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1757
		drm_dbg(&dev_priv->drm, "PCH transcoder CRC done interrupt\n");
1758 1759

	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1760 1761
		drm_dbg(&dev_priv->drm,
			"PCH transcoder CRC error interrupt\n");
1762 1763

	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1764
		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
1765 1766

	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1767
		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
1768 1769
}

1770
static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
1771 1772
{
	u32 err_int = I915_READ(GEN7_ERR_INT);
D
Daniel Vetter 已提交
1773
	enum pipe pipe;
1774

1775
	if (err_int & ERR_INT_POISON)
1776
		drm_err(&dev_priv->drm, "Poison interrupt\n");
1777

1778
	for_each_pipe(dev_priv, pipe) {
1779 1780
		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1781

D
Daniel Vetter 已提交
1782
		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1783 1784
			if (IS_IVYBRIDGE(dev_priv))
				ivb_pipe_crc_irq_handler(dev_priv, pipe);
D
Daniel Vetter 已提交
1785
			else
1786
				hsw_pipe_crc_irq_handler(dev_priv, pipe);
D
Daniel Vetter 已提交
1787 1788
		}
	}
1789

1790 1791 1792
	I915_WRITE(GEN7_ERR_INT, err_int);
}

1793
static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
1794 1795
{
	u32 serr_int = I915_READ(SERR_INT);
1796
	enum pipe pipe;
1797

1798
	if (serr_int & SERR_INT_POISON)
1799
		drm_err(&dev_priv->drm, "PCH poison interrupt\n");
1800

1801 1802 1803
	for_each_pipe(dev_priv, pipe)
		if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
			intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
1804 1805

	I915_WRITE(SERR_INT, serr_int);
1806 1807
}

1808
static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1809
{
1810
	enum pipe pipe;
1811
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1812

1813
	ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
1814

1815 1816 1817
	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
			       SDE_AUDIO_POWER_SHIFT_CPT);
1818 1819
		drm_dbg(&dev_priv->drm, "PCH audio power change on port %c\n",
			port_name(port));
1820
	}
1821 1822

	if (pch_iir & SDE_AUX_MASK_CPT)
1823
		dp_aux_irq_handler(dev_priv);
1824 1825

	if (pch_iir & SDE_GMBUS_CPT)
1826
		gmbus_irq_handler(dev_priv);
1827 1828

	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1829
		drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n");
1830 1831

	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1832
		drm_dbg(&dev_priv->drm, "Audio CP change interrupt\n");
1833

1834
	if (pch_iir & SDE_FDI_MASK_CPT) {
1835
		for_each_pipe(dev_priv, pipe)
1836 1837 1838
			drm_dbg(&dev_priv->drm, "  pipe %c FDI IIR: 0x%08x\n",
				pipe_name(pipe),
				I915_READ(FDI_RX_IIR(pipe)));
1839
	}
1840 1841

	if (pch_iir & SDE_ERROR_CPT)
1842
		cpt_serr_int_handler(dev_priv);
1843 1844
}

1845
static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1846
{
1847
	u32 ddi_hotplug_trigger, tc_hotplug_trigger;
1848
	u32 pin_mask = 0, long_mask = 0;
1849 1850
	bool (*tc_port_hotplug_long_detect)(enum hpd_pin pin, u32 val);
	const u32 *pins;
1851

1852 1853 1854 1855 1856
	if (HAS_PCH_TGP(dev_priv)) {
		ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP;
		tc_hotplug_trigger = pch_iir & SDE_TC_MASK_TGP;
		tc_port_hotplug_long_detect = tgp_tc_port_hotplug_long_detect;
		pins = hpd_tgp;
M
Matt Roper 已提交
1857 1858 1859 1860
	} else if (HAS_PCH_JSP(dev_priv)) {
		ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP;
		tc_hotplug_trigger = 0;
		pins = hpd_tgp;
1861
	} else if (HAS_PCH_MCC(dev_priv)) {
1862 1863
		ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
		tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_ICP(PORT_TC1);
1864
		tc_port_hotplug_long_detect = icp_tc_port_hotplug_long_detect;
1865
		pins = hpd_icp;
1866
	} else {
1867 1868 1869
		drm_WARN(&dev_priv->drm, !HAS_PCH_ICP(dev_priv),
			 "Unrecognized PCH type 0x%x\n",
			 INTEL_PCH_TYPE(dev_priv));
M
Matt Roper 已提交
1870

1871 1872
		ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
		tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP;
1873 1874
		tc_port_hotplug_long_detect = icp_tc_port_hotplug_long_detect;
		pins = hpd_icp;
1875 1876
	}

1877 1878 1879 1880 1881 1882 1883 1884
	if (ddi_hotplug_trigger) {
		u32 dig_hotplug_reg;

		dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI);
		I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg);

		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
				   ddi_hotplug_trigger,
1885
				   dig_hotplug_reg, pins,
1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896
				   icp_ddi_port_hotplug_long_detect);
	}

	if (tc_hotplug_trigger) {
		u32 dig_hotplug_reg;

		dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC);
		I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg);

		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
				   tc_hotplug_trigger,
1897
				   dig_hotplug_reg, pins,
1898
				   tc_port_hotplug_long_detect);
1899 1900 1901 1902 1903 1904 1905 1906 1907
	}

	if (pin_mask)
		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);

	if (pch_iir & SDE_GMBUS_ICP)
		gmbus_irq_handler(dev_priv);
}

1908
static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920
{
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
		~SDE_PORTE_HOTPLUG_SPT;
	u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
	u32 pin_mask = 0, long_mask = 0;

	if (hotplug_trigger) {
		u32 dig_hotplug_reg;

		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
		I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);

1921 1922
		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
				   hotplug_trigger, dig_hotplug_reg, hpd_spt,
1923
				   spt_port_hotplug_long_detect);
1924 1925 1926 1927 1928 1929 1930 1931
	}

	if (hotplug2_trigger) {
		u32 dig_hotplug_reg;

		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
		I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);

1932 1933
		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
				   hotplug2_trigger, dig_hotplug_reg, hpd_spt,
1934 1935 1936 1937
				   spt_port_hotplug2_long_detect);
	}

	if (pin_mask)
1938
		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1939 1940

	if (pch_iir & SDE_GMBUS_CPT)
1941
		gmbus_irq_handler(dev_priv);
1942 1943
}

1944 1945
static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
				u32 hotplug_trigger,
1946 1947 1948 1949 1950 1951 1952
				const u32 hpd[HPD_NUM_PINS])
{
	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;

	dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);

1953
	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
1954 1955 1956
			   dig_hotplug_reg, hpd,
			   ilk_port_hotplug_long_detect);

1957
	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1958 1959
}

1960 1961
static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
				    u32 de_iir)
1962
{
1963
	enum pipe pipe;
1964 1965
	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;

1966
	if (hotplug_trigger)
1967
		ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk);
1968 1969

	if (de_iir & DE_AUX_CHANNEL_A)
1970
		dp_aux_irq_handler(dev_priv);
1971 1972

	if (de_iir & DE_GSE)
1973
		intel_opregion_asle_intr(dev_priv);
1974 1975

	if (de_iir & DE_POISON)
1976
		drm_err(&dev_priv->drm, "Poison interrupt\n");
1977

1978
	for_each_pipe(dev_priv, pipe) {
1979
		if (de_iir & DE_PIPE_VBLANK(pipe))
1980
			intel_handle_vblank(dev_priv, pipe);
1981

1982
		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
1983
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1984

1985
		if (de_iir & DE_PIPE_CRC_DONE(pipe))
1986
			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1987 1988 1989 1990 1991 1992
	}

	/* check event from PCH */
	if (de_iir & DE_PCH_EVENT) {
		u32 pch_iir = I915_READ(SDEIIR);

1993 1994
		if (HAS_PCH_CPT(dev_priv))
			cpt_irq_handler(dev_priv, pch_iir);
1995
		else
1996
			ibx_irq_handler(dev_priv, pch_iir);
1997 1998 1999 2000 2001

		/* should clear PCH hotplug event before clear CPU irq */
		I915_WRITE(SDEIIR, pch_iir);
	}

2002
	if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT)
2003
		gen5_rps_irq_handler(&dev_priv->gt.rps);
2004 2005
}

2006 2007
static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
				    u32 de_iir)
2008
{
2009
	enum pipe pipe;
2010 2011
	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;

2012
	if (hotplug_trigger)
2013
		ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb);
2014 2015

	if (de_iir & DE_ERR_INT_IVB)
2016
		ivb_err_int_handler(dev_priv);
2017

2018 2019 2020 2021 2022 2023
	if (de_iir & DE_EDP_PSR_INT_HSW) {
		u32 psr_iir = I915_READ(EDP_PSR_IIR);

		intel_psr_irq_handler(dev_priv, psr_iir);
		I915_WRITE(EDP_PSR_IIR, psr_iir);
	}
2024

2025
	if (de_iir & DE_AUX_CHANNEL_A_IVB)
2026
		dp_aux_irq_handler(dev_priv);
2027 2028

	if (de_iir & DE_GSE_IVB)
2029
		intel_opregion_asle_intr(dev_priv);
2030

2031
	for_each_pipe(dev_priv, pipe) {
2032
		if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
2033
			intel_handle_vblank(dev_priv, pipe);
2034 2035 2036
	}

	/* check event from PCH */
2037
	if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
2038 2039
		u32 pch_iir = I915_READ(SDEIIR);

2040
		cpt_irq_handler(dev_priv, pch_iir);
2041 2042 2043 2044 2045 2046

		/* clear PCH hotplug event before clear CPU irq */
		I915_WRITE(SDEIIR, pch_iir);
	}
}

2047 2048 2049 2050 2051 2052 2053 2054
/*
 * To handle irqs with the minimum potential races with fresh interrupts, we:
 * 1 - Disable Master Interrupt Control.
 * 2 - Find the source(s) of the interrupt.
 * 3 - Clear the Interrupt Identity bits (IIR).
 * 4 - Process the interrupt(s) that had bits set in the IIRs.
 * 5 - Re-enable Master Interrupt Control.
 */
2055
static irqreturn_t ilk_irq_handler(int irq, void *arg)
2056
{
2057
	struct drm_i915_private *dev_priv = arg;
2058
	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2059
	irqreturn_t ret = IRQ_NONE;
2060

2061 2062 2063
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

2064
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2065
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2066

2067 2068 2069 2070
	/* disable master interrupt before clearing iir  */
	de_ier = I915_READ(DEIER);
	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);

2071 2072 2073 2074 2075
	/* Disable south interrupts. We'll only write to SDEIIR once, so further
	 * interrupts will will be stored on its back queue, and then we'll be
	 * able to process them after we restore SDEIER (as soon as we restore
	 * it, we'll get an interrupt if SDEIIR still has something to process
	 * due to its back queue). */
2076
	if (!HAS_PCH_NOP(dev_priv)) {
2077 2078 2079
		sde_ier = I915_READ(SDEIER);
		I915_WRITE(SDEIER, 0);
	}
2080

2081 2082
	/* Find, clear, then process each source of interrupt */

2083
	gt_iir = I915_READ(GTIIR);
2084
	if (gt_iir) {
2085 2086
		I915_WRITE(GTIIR, gt_iir);
		ret = IRQ_HANDLED;
2087
		if (INTEL_GEN(dev_priv) >= 6)
2088
			gen6_gt_irq_handler(&dev_priv->gt, gt_iir);
2089
		else
2090
			gen5_gt_irq_handler(&dev_priv->gt, gt_iir);
2091 2092
	}

2093 2094
	de_iir = I915_READ(DEIIR);
	if (de_iir) {
2095 2096
		I915_WRITE(DEIIR, de_iir);
		ret = IRQ_HANDLED;
2097 2098
		if (INTEL_GEN(dev_priv) >= 7)
			ivb_display_irq_handler(dev_priv, de_iir);
2099
		else
2100
			ilk_display_irq_handler(dev_priv, de_iir);
2101 2102
	}

2103
	if (INTEL_GEN(dev_priv) >= 6) {
2104 2105 2106 2107
		u32 pm_iir = I915_READ(GEN6_PMIIR);
		if (pm_iir) {
			I915_WRITE(GEN6_PMIIR, pm_iir);
			ret = IRQ_HANDLED;
2108
			gen6_rps_irq_handler(&dev_priv->gt.rps, pm_iir);
2109
		}
2110
	}
2111 2112

	I915_WRITE(DEIER, de_ier);
2113
	if (!HAS_PCH_NOP(dev_priv))
2114
		I915_WRITE(SDEIER, sde_ier);
2115

2116
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2117
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2118

2119 2120 2121
	return ret;
}

2122 2123
static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
				u32 hotplug_trigger,
2124
				const u32 hpd[HPD_NUM_PINS])
2125
{
2126
	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2127

2128 2129
	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2130

2131
	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
2132
			   dig_hotplug_reg, hpd,
2133
			   bxt_port_hotplug_long_detect);
2134

2135
	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2136 2137
}

2138 2139 2140
static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
{
	u32 pin_mask = 0, long_mask = 0;
2141 2142
	u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
	u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
2143 2144 2145 2146 2147 2148 2149 2150 2151 2152
	long_pulse_detect_func long_pulse_detect;
	const u32 *hpd;

	if (INTEL_GEN(dev_priv) >= 12) {
		long_pulse_detect = gen12_port_hotplug_long_detect;
		hpd = hpd_gen12;
	} else {
		long_pulse_detect = gen11_port_hotplug_long_detect;
		hpd = hpd_gen11;
	}
2153 2154

	if (trigger_tc) {
2155 2156
		u32 dig_hotplug_reg;

2157 2158 2159 2160
		dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL);
		I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);

		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tc,
2161
				   dig_hotplug_reg, hpd, long_pulse_detect);
2162 2163 2164 2165 2166 2167 2168 2169 2170
	}

	if (trigger_tbt) {
		u32 dig_hotplug_reg;

		dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL);
		I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);

		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tbt,
2171
				   dig_hotplug_reg, hpd, long_pulse_detect);
2172 2173 2174
	}

	if (pin_mask)
2175
		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2176
	else
2177 2178
		drm_err(&dev_priv->drm,
			"Unexpected DE HPD interrupt 0x%08x\n", iir);
2179 2180
}

2181 2182
static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
{
2183
	u32 mask;
2184

2185 2186 2187
	if (INTEL_GEN(dev_priv) >= 12)
		return TGL_DE_PORT_AUX_DDIA |
			TGL_DE_PORT_AUX_DDIB |
2188 2189 2190 2191 2192 2193 2194 2195
			TGL_DE_PORT_AUX_DDIC |
			TGL_DE_PORT_AUX_USBC1 |
			TGL_DE_PORT_AUX_USBC2 |
			TGL_DE_PORT_AUX_USBC3 |
			TGL_DE_PORT_AUX_USBC4 |
			TGL_DE_PORT_AUX_USBC5 |
			TGL_DE_PORT_AUX_USBC6;

2196 2197

	mask = GEN8_AUX_CHANNEL_A;
2198 2199 2200 2201 2202
	if (INTEL_GEN(dev_priv) >= 9)
		mask |= GEN9_AUX_CHANNEL_B |
			GEN9_AUX_CHANNEL_C |
			GEN9_AUX_CHANNEL_D;

2203
	if (IS_CNL_WITH_PORT_F(dev_priv) || IS_GEN(dev_priv, 11))
2204 2205
		mask |= CNL_AUX_CHANNEL_F;

2206 2207
	if (IS_GEN(dev_priv, 11))
		mask |= ICL_AUX_CHANNEL_E;
2208 2209 2210 2211

	return mask;
}

2212 2213
static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
{
2214 2215 2216
	if (INTEL_GEN(dev_priv) >= 11)
		return GEN11_DE_PIPE_IRQ_FAULT_ERRORS;
	else if (INTEL_GEN(dev_priv) >= 9)
2217 2218 2219 2220 2221
		return GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
	else
		return GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
}

2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232
static void
gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
{
	bool found = false;

	if (iir & GEN8_DE_MISC_GSE) {
		intel_opregion_asle_intr(dev_priv);
		found = true;
	}

	if (iir & GEN8_DE_EDP_PSR) {
2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245
		u32 psr_iir;
		i915_reg_t iir_reg;

		if (INTEL_GEN(dev_priv) >= 12)
			iir_reg = TRANS_PSR_IIR(dev_priv->psr.transcoder);
		else
			iir_reg = EDP_PSR_IIR;

		psr_iir = I915_READ(iir_reg);
		I915_WRITE(iir_reg, psr_iir);

		if (psr_iir)
			found = true;
2246 2247 2248 2249 2250

		intel_psr_irq_handler(dev_priv, psr_iir);
	}

	if (!found)
2251
		drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt\n");
2252 2253
}

2254 2255
static irqreturn_t
gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2256 2257
{
	irqreturn_t ret = IRQ_NONE;
2258
	u32 iir;
2259
	enum pipe pipe;
J
Jesse Barnes 已提交
2260

2261
	if (master_ctl & GEN8_DE_MISC_IRQ) {
2262 2263 2264
		iir = I915_READ(GEN8_DE_MISC_IIR);
		if (iir) {
			I915_WRITE(GEN8_DE_MISC_IIR, iir);
2265
			ret = IRQ_HANDLED;
2266 2267
			gen8_de_misc_irq_handler(dev_priv, iir);
		} else {
2268 2269
			drm_err(&dev_priv->drm,
				"The master control interrupt lied (DE MISC)!\n");
2270
		}
2271 2272
	}

2273 2274 2275 2276 2277 2278 2279
	if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
		iir = I915_READ(GEN11_DE_HPD_IIR);
		if (iir) {
			I915_WRITE(GEN11_DE_HPD_IIR, iir);
			ret = IRQ_HANDLED;
			gen11_hpd_irq_handler(dev_priv, iir);
		} else {
2280 2281
			drm_err(&dev_priv->drm,
				"The master control interrupt lied, (DE HPD)!\n");
2282 2283 2284
		}
	}

2285
	if (master_ctl & GEN8_DE_PORT_IRQ) {
2286 2287 2288
		iir = I915_READ(GEN8_DE_PORT_IIR);
		if (iir) {
			u32 tmp_mask;
2289
			bool found = false;
2290

2291
			I915_WRITE(GEN8_DE_PORT_IIR, iir);
2292
			ret = IRQ_HANDLED;
J
Jesse Barnes 已提交
2293

2294
			if (iir & gen8_de_port_aux_mask(dev_priv)) {
2295
				dp_aux_irq_handler(dev_priv);
2296 2297 2298
				found = true;
			}

2299
			if (IS_GEN9_LP(dev_priv)) {
2300 2301
				tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
				if (tmp_mask) {
2302 2303
					bxt_hpd_irq_handler(dev_priv, tmp_mask,
							    hpd_bxt);
2304 2305 2306 2307 2308
					found = true;
				}
			} else if (IS_BROADWELL(dev_priv)) {
				tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
				if (tmp_mask) {
2309 2310
					ilk_hpd_irq_handler(dev_priv,
							    tmp_mask, hpd_bdw);
2311 2312
					found = true;
				}
2313 2314
			}

2315
			if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
2316
				gmbus_irq_handler(dev_priv);
S
Shashank Sharma 已提交
2317 2318 2319
				found = true;
			}

2320
			if (!found)
2321 2322
				drm_err(&dev_priv->drm,
					"Unexpected DE Port interrupt\n");
2323
		}
2324
		else
2325 2326
			drm_err(&dev_priv->drm,
				"The master control interrupt lied (DE PORT)!\n");
2327 2328
	}

2329
	for_each_pipe(dev_priv, pipe) {
2330
		u32 fault_errors;
2331

2332 2333
		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
			continue;
2334

2335 2336
		iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
		if (!iir) {
2337 2338
			drm_err(&dev_priv->drm,
				"The master control interrupt lied (DE PIPE)!\n");
2339 2340
			continue;
		}
2341

2342 2343
		ret = IRQ_HANDLED;
		I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
2344

2345
		if (iir & GEN8_PIPE_VBLANK)
2346
			intel_handle_vblank(dev_priv, pipe);
2347

2348
		if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
2349
			hsw_pipe_crc_irq_handler(dev_priv, pipe);
2350

2351 2352
		if (iir & GEN8_PIPE_FIFO_UNDERRUN)
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2353

2354
		fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv);
2355
		if (fault_errors)
2356 2357 2358 2359
			drm_err(&dev_priv->drm,
				"Fault errors on pipe %c: 0x%08x\n",
				pipe_name(pipe),
				fault_errors);
2360 2361
	}

2362
	if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2363
	    master_ctl & GEN8_DE_PCH_IRQ) {
2364 2365 2366 2367 2368
		/*
		 * FIXME(BDW): Assume for now that the new interrupt handling
		 * scheme also closed the SDE interrupt handling race we've seen
		 * on older pch-split platforms. But this needs testing.
		 */
2369 2370 2371
		iir = I915_READ(SDEIIR);
		if (iir) {
			I915_WRITE(SDEIIR, iir);
2372
			ret = IRQ_HANDLED;
2373

2374 2375
			if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
				icp_irq_handler(dev_priv, iir);
2376
			else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
2377
				spt_irq_handler(dev_priv, iir);
2378
			else
2379
				cpt_irq_handler(dev_priv, iir);
2380 2381 2382 2383 2384
		} else {
			/*
			 * Like on previous PCH there seems to be something
			 * fishy going on with forwarding PCH interrupts.
			 */
2385 2386
			drm_dbg(&dev_priv->drm,
				"The master control interrupt lied (SDE)!\n");
2387
		}
2388 2389
	}

2390 2391 2392
	return ret;
}

2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410
static inline u32 gen8_master_intr_disable(void __iomem * const regs)
{
	raw_reg_write(regs, GEN8_MASTER_IRQ, 0);

	/*
	 * Now with master disabled, get a sample of level indications
	 * for this interrupt. Indications will be cleared on related acks.
	 * New indications can and will light up during processing,
	 * and will generate new interrupt after enabling master.
	 */
	return raw_reg_read(regs, GEN8_MASTER_IRQ);
}

static inline void gen8_master_intr_enable(void __iomem * const regs)
{
	raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
}

2411 2412
static irqreturn_t gen8_irq_handler(int irq, void *arg)
{
2413
	struct drm_i915_private *dev_priv = arg;
2414
	void __iomem * const regs = dev_priv->uncore.regs;
2415 2416 2417 2418 2419
	u32 master_ctl;

	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

2420 2421 2422
	master_ctl = gen8_master_intr_disable(regs);
	if (!master_ctl) {
		gen8_master_intr_enable(regs);
2423
		return IRQ_NONE;
2424
	}
2425

2426 2427
	/* Find, queue (onto bottom-halves), then clear each source */
	gen8_gt_irq_handler(&dev_priv->gt, master_ctl);
2428 2429 2430

	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
	if (master_ctl & ~GEN8_GT_IRQS) {
2431
		disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2432
		gen8_de_irq_handler(dev_priv, master_ctl);
2433
		enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2434
	}
2435

2436
	gen8_master_intr_enable(regs);
2437

2438
	return IRQ_HANDLED;
2439 2440
}

2441
static u32
2442
gen11_gu_misc_irq_ack(struct intel_gt *gt, const u32 master_ctl)
2443
{
2444
	void __iomem * const regs = gt->uncore->regs;
2445
	u32 iir;
2446 2447

	if (!(master_ctl & GEN11_GU_MISC_IRQ))
2448 2449 2450 2451 2452
		return 0;

	iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
	if (likely(iir))
		raw_reg_write(regs, GEN11_GU_MISC_IIR, iir);
2453

2454
	return iir;
2455 2456 2457
}

static void
2458
gen11_gu_misc_irq_handler(struct intel_gt *gt, const u32 iir)
2459 2460
{
	if (iir & GEN11_GU_MISC_GSE)
2461
		intel_opregion_asle_intr(gt->i915);
2462 2463
}

2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481
static inline u32 gen11_master_intr_disable(void __iomem * const regs)
{
	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);

	/*
	 * Now with master disabled, get a sample of level indications
	 * for this interrupt. Indications will be cleared on related acks.
	 * New indications can and will light up during processing,
	 * and will generate new interrupt after enabling master.
	 */
	return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
}

static inline void gen11_master_intr_enable(void __iomem * const regs)
{
	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
}

2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500
static void
gen11_display_irq_handler(struct drm_i915_private *i915)
{
	void __iomem * const regs = i915->uncore.regs;
	const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);

	disable_rpm_wakeref_asserts(&i915->runtime_pm);
	/*
	 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
	 * for the display related bits.
	 */
	raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 0x0);
	gen8_de_irq_handler(i915, disp_ctl);
	raw_reg_write(regs, GEN11_DISPLAY_INT_CTL,
		      GEN11_DISPLAY_IRQ_ENABLE);

	enable_rpm_wakeref_asserts(&i915->runtime_pm);
}

2501 2502 2503 2504
static __always_inline irqreturn_t
__gen11_irq_handler(struct drm_i915_private * const i915,
		    u32 (*intr_disable)(void __iomem * const regs),
		    void (*intr_enable)(void __iomem * const regs))
M
Mika Kuoppala 已提交
2505
{
2506
	void __iomem * const regs = i915->uncore.regs;
2507
	struct intel_gt *gt = &i915->gt;
M
Mika Kuoppala 已提交
2508
	u32 master_ctl;
2509
	u32 gu_misc_iir;
M
Mika Kuoppala 已提交
2510 2511 2512 2513

	if (!intel_irqs_enabled(i915))
		return IRQ_NONE;

2514
	master_ctl = intr_disable(regs);
2515
	if (!master_ctl) {
2516
		intr_enable(regs);
M
Mika Kuoppala 已提交
2517
		return IRQ_NONE;
2518
	}
M
Mika Kuoppala 已提交
2519

2520
	/* Find, queue (onto bottom-halves), then clear each source */
2521
	gen11_gt_irq_handler(gt, master_ctl);
M
Mika Kuoppala 已提交
2522 2523

	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2524 2525
	if (master_ctl & GEN11_DISPLAY_IRQ)
		gen11_display_irq_handler(i915);
M
Mika Kuoppala 已提交
2526

2527
	gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl);
2528

2529
	intr_enable(regs);
M
Mika Kuoppala 已提交
2530

2531
	gen11_gu_misc_irq_handler(gt, gu_misc_iir);
2532

M
Mika Kuoppala 已提交
2533 2534 2535
	return IRQ_HANDLED;
}

2536 2537 2538 2539 2540 2541 2542
static irqreturn_t gen11_irq_handler(int irq, void *arg)
{
	return __gen11_irq_handler(arg,
				   gen11_master_intr_disable,
				   gen11_master_intr_enable);
}

2543 2544 2545
/* Called from drm generic code, passed 'crtc' which
 * we use as a pipe index
 */
2546
int i8xx_enable_vblank(struct drm_crtc *crtc)
2547
{
2548 2549
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2550
	unsigned long irqflags;
2551

2552
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2553
	i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2554
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2555

2556 2557 2558
	return 0;
}

2559
int i915gm_enable_vblank(struct drm_crtc *crtc)
2560
{
2561
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2562

2563 2564 2565 2566 2567 2568 2569 2570
	/*
	 * Vblank interrupts fail to wake the device up from C2+.
	 * Disabling render clock gating during C-states avoids
	 * the problem. There is a small power cost so we do this
	 * only when vblank interrupts are actually enabled.
	 */
	if (dev_priv->vblank_enabled++ == 0)
		I915_WRITE(SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
2571

2572
	return i8xx_enable_vblank(crtc);
2573 2574
}

2575
int i965_enable_vblank(struct drm_crtc *crtc)
2576
{
2577 2578
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2579 2580 2581
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2582 2583
	i915_enable_pipestat(dev_priv, pipe,
			     PIPE_START_VBLANK_INTERRUPT_STATUS);
2584 2585 2586 2587 2588
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);

	return 0;
}

2589
int ilk_enable_vblank(struct drm_crtc *crtc)
J
Jesse Barnes 已提交
2590
{
2591 2592
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
J
Jesse Barnes 已提交
2593
	unsigned long irqflags;
2594
	u32 bit = INTEL_GEN(dev_priv) >= 7 ?
2595
		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
J
Jesse Barnes 已提交
2596 2597

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2598
	ilk_enable_display_irq(dev_priv, bit);
J
Jesse Barnes 已提交
2599 2600
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);

2601 2602 2603 2604
	/* Even though there is no DMC, frame counter can get stuck when
	 * PSR is active as no frames are generated.
	 */
	if (HAS_PSR(dev_priv))
2605
		drm_crtc_vblank_restore(crtc);
2606

J
Jesse Barnes 已提交
2607 2608 2609
	return 0;
}

2610
int bdw_enable_vblank(struct drm_crtc *crtc)
2611
{
2612 2613
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2614 2615 2616
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2617
	bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2618
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2619

2620 2621 2622 2623
	/* Even if there is no DMC, frame counter can get stuck when
	 * PSR is active as no frames are generated, so check only for PSR.
	 */
	if (HAS_PSR(dev_priv))
2624
		drm_crtc_vblank_restore(crtc);
2625

2626 2627 2628
	return 0;
}

2629 2630 2631
/* Called from drm generic code, passed 'crtc' which
 * we use as a pipe index
 */
2632
void i8xx_disable_vblank(struct drm_crtc *crtc)
2633
{
2634 2635
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2636
	unsigned long irqflags;
2637

2638
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2639
	i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2640 2641 2642
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

2643
void i915gm_disable_vblank(struct drm_crtc *crtc)
2644
{
2645
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2646

2647
	i8xx_disable_vblank(crtc);
2648

2649 2650
	if (--dev_priv->vblank_enabled == 0)
		I915_WRITE(SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
2651 2652
}

2653
void i965_disable_vblank(struct drm_crtc *crtc)
2654
{
2655 2656
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2657 2658 2659
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2660 2661
	i915_disable_pipestat(dev_priv, pipe,
			      PIPE_START_VBLANK_INTERRUPT_STATUS);
2662 2663 2664
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

2665
void ilk_disable_vblank(struct drm_crtc *crtc)
J
Jesse Barnes 已提交
2666
{
2667 2668
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
J
Jesse Barnes 已提交
2669
	unsigned long irqflags;
2670
	u32 bit = INTEL_GEN(dev_priv) >= 7 ?
2671
		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
J
Jesse Barnes 已提交
2672 2673

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2674
	ilk_disable_display_irq(dev_priv, bit);
J
Jesse Barnes 已提交
2675 2676 2677
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

2678
void bdw_disable_vblank(struct drm_crtc *crtc)
2679
{
2680 2681
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2682 2683 2684
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2685
	bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2686 2687 2688
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

2689
static void ibx_irq_reset(struct drm_i915_private *dev_priv)
P
Paulo Zanoni 已提交
2690
{
2691 2692
	struct intel_uncore *uncore = &dev_priv->uncore;

2693
	if (HAS_PCH_NOP(dev_priv))
P
Paulo Zanoni 已提交
2694 2695
		return;

2696
	GEN3_IRQ_RESET(uncore, SDE);
2697

2698
	if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
2699
		I915_WRITE(SERR_INT, 0xffffffff);
P
Paulo Zanoni 已提交
2700
}
2701

P
Paulo Zanoni 已提交
2702 2703 2704 2705 2706 2707 2708 2709
/*
 * SDEIER is also touched by the interrupt handler to work around missed PCH
 * interrupts. Hence we can't update it after the interrupt handler is enabled -
 * instead we unconditionally enable all PCH interrupt sources here, but then
 * only unmask them as needed with SDEIMR.
 *
 * This function needs to be called before interrupts are enabled.
 */
2710
static void ibx_irq_pre_postinstall(struct drm_i915_private *dev_priv)
P
Paulo Zanoni 已提交
2711
{
2712
	if (HAS_PCH_NOP(dev_priv))
P
Paulo Zanoni 已提交
2713 2714
		return;

2715
	drm_WARN_ON(&dev_priv->drm, I915_READ(SDEIER) != 0);
P
Paulo Zanoni 已提交
2716 2717 2718 2719
	I915_WRITE(SDEIER, 0xffffffff);
	POSTING_READ(SDEIER);
}

2720 2721
static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
{
2722 2723
	struct intel_uncore *uncore = &dev_priv->uncore;

2724
	if (IS_CHERRYVIEW(dev_priv))
2725
		intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
2726
	else
2727
		intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK);
2728

2729
	i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
2730
	intel_uncore_write(uncore, PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2731

2732
	i9xx_pipestat_irq_reset(dev_priv);
2733

2734
	GEN3_IRQ_RESET(uncore, VLV_);
2735
	dev_priv->irq_mask = ~0u;
2736 2737
}

2738 2739
static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
{
2740 2741
	struct intel_uncore *uncore = &dev_priv->uncore;

2742
	u32 pipestat_mask;
2743
	u32 enable_mask;
2744 2745
	enum pipe pipe;

2746
	pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
2747 2748 2749 2750 2751

	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
	for_each_pipe(dev_priv, pipe)
		i915_enable_pipestat(dev_priv, pipe, pipestat_mask);

2752 2753
	enable_mask = I915_DISPLAY_PORT_INTERRUPT |
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2754 2755 2756 2757
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
		I915_LPE_PIPE_A_INTERRUPT |
		I915_LPE_PIPE_B_INTERRUPT;

2758
	if (IS_CHERRYVIEW(dev_priv))
2759 2760
		enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
			I915_LPE_PIPE_C_INTERRUPT;
2761

2762
	drm_WARN_ON(&dev_priv->drm, dev_priv->irq_mask != ~0u);
2763

2764 2765
	dev_priv->irq_mask = ~enable_mask;

2766
	GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask);
2767 2768 2769 2770
}

/* drm_dma.h hooks
*/
2771
static void ilk_irq_reset(struct drm_i915_private *dev_priv)
2772
{
2773
	struct intel_uncore *uncore = &dev_priv->uncore;
2774

2775
	GEN3_IRQ_RESET(uncore, DE);
2776
	if (IS_GEN(dev_priv, 7))
2777
		intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff);
2778

2779
	if (IS_HASWELL(dev_priv)) {
2780 2781
		intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
		intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
2782 2783
	}

2784
	gen5_gt_irq_reset(&dev_priv->gt);
2785

2786
	ibx_irq_reset(dev_priv);
2787 2788
}

2789
static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
J
Jesse Barnes 已提交
2790
{
2791 2792 2793
	I915_WRITE(VLV_MASTER_IER, 0);
	POSTING_READ(VLV_MASTER_IER);

2794
	gen5_gt_irq_reset(&dev_priv->gt);
J
Jesse Barnes 已提交
2795

2796
	spin_lock_irq(&dev_priv->irq_lock);
2797 2798
	if (dev_priv->display_irqs_enabled)
		vlv_display_irq_reset(dev_priv);
2799
	spin_unlock_irq(&dev_priv->irq_lock);
J
Jesse Barnes 已提交
2800 2801
}

2802
static void gen8_irq_reset(struct drm_i915_private *dev_priv)
2803
{
2804
	struct intel_uncore *uncore = &dev_priv->uncore;
2805
	enum pipe pipe;
2806

2807
	gen8_master_intr_disable(dev_priv->uncore.regs);
2808

2809
	gen8_gt_irq_reset(&dev_priv->gt);
2810

2811 2812
	intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
	intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
2813

2814
	for_each_pipe(dev_priv, pipe)
2815 2816
		if (intel_display_power_is_enabled(dev_priv,
						   POWER_DOMAIN_PIPE(pipe)))
2817
			GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
2818

2819 2820 2821
	GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
	GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
2822

2823
	if (HAS_PCH_SPLIT(dev_priv))
2824
		ibx_irq_reset(dev_priv);
2825
}
2826

2827
static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
M
Mika Kuoppala 已提交
2828
{
2829
	struct intel_uncore *uncore = &dev_priv->uncore;
2830
	enum pipe pipe;
M
Mika Kuoppala 已提交
2831

2832
	intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0);
M
Mika Kuoppala 已提交
2833

2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850
	if (INTEL_GEN(dev_priv) >= 12) {
		enum transcoder trans;

		for (trans = TRANSCODER_A; trans <= TRANSCODER_D; trans++) {
			enum intel_display_power_domain domain;

			domain = POWER_DOMAIN_TRANSCODER(trans);
			if (!intel_display_power_is_enabled(dev_priv, domain))
				continue;

			intel_uncore_write(uncore, TRANS_PSR_IMR(trans), 0xffffffff);
			intel_uncore_write(uncore, TRANS_PSR_IIR(trans), 0xffffffff);
		}
	} else {
		intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
		intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
	}
2851

M
Mika Kuoppala 已提交
2852 2853 2854
	for_each_pipe(dev_priv, pipe)
		if (intel_display_power_is_enabled(dev_priv,
						   POWER_DOMAIN_PIPE(pipe)))
2855
			GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
M
Mika Kuoppala 已提交
2856

2857 2858 2859
	GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
	GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
	GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_);
2860

2861
	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
2862
		GEN3_IRQ_RESET(uncore, SDE);
M
Mika Kuoppala 已提交
2863 2864
}

2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877
static void gen11_irq_reset(struct drm_i915_private *dev_priv)
{
	struct intel_uncore *uncore = &dev_priv->uncore;

	gen11_master_intr_disable(dev_priv->uncore.regs);

	gen11_gt_irq_reset(&dev_priv->gt);
	gen11_display_irq_reset(dev_priv);

	GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
}

2878
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
2879
				     u8 pipe_mask)
2880
{
2881 2882
	struct intel_uncore *uncore = &dev_priv->uncore;

2883
	u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
2884
	enum pipe pipe;
2885

2886
	spin_lock_irq(&dev_priv->irq_lock);
2887 2888 2889 2890 2891 2892

	if (!intel_irqs_enabled(dev_priv)) {
		spin_unlock_irq(&dev_priv->irq_lock);
		return;
	}

2893
	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
2894
		GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
2895 2896
				  dev_priv->de_irq_mask[pipe],
				  ~dev_priv->de_irq_mask[pipe] | extra_ier);
2897

2898
	spin_unlock_irq(&dev_priv->irq_lock);
2899 2900
}

2901
void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
2902
				     u8 pipe_mask)
2903
{
2904
	struct intel_uncore *uncore = &dev_priv->uncore;
2905 2906
	enum pipe pipe;

2907
	spin_lock_irq(&dev_priv->irq_lock);
2908 2909 2910 2911 2912 2913

	if (!intel_irqs_enabled(dev_priv)) {
		spin_unlock_irq(&dev_priv->irq_lock);
		return;
	}

2914
	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
2915
		GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
2916

2917 2918 2919
	spin_unlock_irq(&dev_priv->irq_lock);

	/* make sure we're done processing display irqs */
2920
	intel_synchronize_irq(dev_priv);
2921 2922
}

2923
static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
2924
{
2925
	struct intel_uncore *uncore = &dev_priv->uncore;
2926 2927 2928 2929

	I915_WRITE(GEN8_MASTER_IRQ, 0);
	POSTING_READ(GEN8_MASTER_IRQ);

2930
	gen8_gt_irq_reset(&dev_priv->gt);
2931

2932
	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
2933

2934
	spin_lock_irq(&dev_priv->irq_lock);
2935 2936
	if (dev_priv->display_irqs_enabled)
		vlv_display_irq_reset(dev_priv);
2937
	spin_unlock_irq(&dev_priv->irq_lock);
2938 2939
}

2940
static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
2941 2942 2943 2944 2945
				  const u32 hpd[HPD_NUM_PINS])
{
	struct intel_encoder *encoder;
	u32 enabled_irqs = 0;

2946
	for_each_intel_encoder(&dev_priv->drm, encoder)
2947 2948 2949 2950 2951 2952
		if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
			enabled_irqs |= hpd[encoder->hpd_pin];

	return enabled_irqs;
}

2953
static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
2954
{
2955
	u32 hotplug;
2956 2957 2958

	/*
	 * Enable digital hotplug on the PCH, and configure the DP short pulse
2959 2960
	 * duration to 2ms (which is the minimum in the Display Port spec).
	 * The pulse duration bits are reserved on LPT+.
2961
	 */
2962
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
2963 2964 2965
	hotplug &= ~(PORTB_PULSE_DURATION_MASK |
		     PORTC_PULSE_DURATION_MASK |
		     PORTD_PULSE_DURATION_MASK);
2966
	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
2967 2968
	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
2969 2970 2971 2972
	/*
	 * When CPU and PCH are on the same package, port A
	 * HPD must be enabled in both north and south.
	 */
2973
	if (HAS_PCH_LPT_LP(dev_priv))
2974
		hotplug |= PORTA_HOTPLUG_ENABLE;
2975
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
2976
}
X
Xiong Zhang 已提交
2977

2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994
static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug_irqs, enabled_irqs;

	if (HAS_PCH_IBX(dev_priv)) {
		hotplug_irqs = SDE_HOTPLUG_MASK;
		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
	} else {
		hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
	}

	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);

	ibx_hpd_detection_setup(dev_priv);
}

2995 2996 2997
static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv,
				    u32 ddi_hotplug_enable_mask,
				    u32 tc_hotplug_enable_mask)
2998 2999 3000 3001
{
	u32 hotplug;

	hotplug = I915_READ(SHOTPLUG_CTL_DDI);
3002
	hotplug |= ddi_hotplug_enable_mask;
3003 3004
	I915_WRITE(SHOTPLUG_CTL_DDI, hotplug);

3005 3006 3007 3008 3009
	if (tc_hotplug_enable_mask) {
		hotplug = I915_READ(SHOTPLUG_CTL_TC);
		hotplug |= tc_hotplug_enable_mask;
		I915_WRITE(SHOTPLUG_CTL_TC, hotplug);
	}
3010 3011
}

3012 3013 3014 3015
static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv,
			      u32 sde_ddi_mask, u32 sde_tc_mask,
			      u32 ddi_enable_mask, u32 tc_enable_mask,
			      const u32 *pins)
3016 3017 3018
{
	u32 hotplug_irqs, enabled_irqs;

3019 3020
	hotplug_irqs = sde_ddi_mask | sde_tc_mask;
	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, pins);
3021

3022 3023
	I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);

3024 3025
	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);

3026
	icp_hpd_detection_setup(dev_priv, ddi_enable_mask, tc_enable_mask);
3027 3028
}

3029 3030 3031 3032
/*
 * EHL doesn't need most of gen11_hpd_irq_setup, it's handling only the
 * equivalent of SDE.
 */
3033 3034
static void mcc_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
3035
	icp_hpd_irq_setup(dev_priv,
3036 3037
			  SDE_DDI_MASK_ICP, SDE_TC_HOTPLUG_ICP(PORT_TC1),
			  ICP_DDI_HPD_ENABLE_MASK, ICP_TC_HPD_ENABLE(PORT_TC1),
3038
			  hpd_icp);
3039 3040
}

M
Matt Roper 已提交
3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053
/*
 * JSP behaves exactly the same as MCC above except that port C is mapped to
 * the DDI-C pins instead of the TC1 pins.  This means we should follow TGP's
 * masks & tables rather than ICP's masks & tables.
 */
static void jsp_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
	icp_hpd_irq_setup(dev_priv,
			  SDE_DDI_MASK_TGP, 0,
			  TGP_DDI_HPD_ENABLE_MASK, 0,
			  hpd_tgp);
}

3054 3055 3056 3057 3058 3059 3060 3061 3062 3063
static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug;

	hotplug = I915_READ(GEN11_TC_HOTPLUG_CTL);
	hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
	I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug);
3064 3065 3066 3067 3068 3069 3070

	hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL);
	hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
	I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug);
3071 3072 3073 3074 3075
}

static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug_irqs, enabled_irqs;
3076
	const u32 *hpd;
3077 3078
	u32 val;

3079 3080
	hpd = INTEL_GEN(dev_priv) >= 12 ? hpd_gen12 : hpd_gen11;
	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd);
3081
	hotplug_irqs = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK;
3082 3083 3084 3085 3086 3087 3088

	val = I915_READ(GEN11_DE_HPD_IMR);
	val &= ~hotplug_irqs;
	I915_WRITE(GEN11_DE_HPD_IMR, val);
	POSTING_READ(GEN11_DE_HPD_IMR);

	gen11_hpd_detection_setup(dev_priv);
3089

3090
	if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP)
3091 3092 3093
		icp_hpd_irq_setup(dev_priv, SDE_DDI_MASK_TGP, SDE_TC_MASK_TGP,
				  TGP_DDI_HPD_ENABLE_MASK,
				  TGP_TC_HPD_ENABLE_MASK, hpd_tgp);
3094
	else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3095 3096 3097
		icp_hpd_irq_setup(dev_priv, SDE_DDI_MASK_ICP, SDE_TC_MASK_ICP,
				  ICP_DDI_HPD_ENABLE_MASK,
				  ICP_TC_HPD_ENABLE_MASK, hpd_icp);
3098 3099
}

3100
static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3101
{
3102 3103 3104 3105 3106 3107 3108 3109 3110
	u32 val, hotplug;

	/* Display WA #1179 WaHardHangonHotPlug: cnp */
	if (HAS_PCH_CNP(dev_priv)) {
		val = I915_READ(SOUTH_CHICKEN1);
		val &= ~CHASSIS_CLK_REQ_DURATION_MASK;
		val |= CHASSIS_CLK_REQ_DURATION(0xf);
		I915_WRITE(SOUTH_CHICKEN1, val);
	}
3111 3112 3113

	/* Enable digital hotplug on the PCH */
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3114 3115 3116 3117
	hotplug |= PORTA_HOTPLUG_ENABLE |
		   PORTB_HOTPLUG_ENABLE |
		   PORTC_HOTPLUG_ENABLE |
		   PORTD_HOTPLUG_ENABLE;
3118 3119 3120 3121 3122
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);

	hotplug = I915_READ(PCH_PORT_HOTPLUG2);
	hotplug |= PORTE_HOTPLUG_ENABLE;
	I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3123 3124
}

3125 3126 3127 3128
static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug_irqs, enabled_irqs;

3129 3130 3131
	if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
		I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);

3132 3133 3134 3135 3136 3137 3138 3139
	hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);

	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);

	spt_hpd_detection_setup(dev_priv);
}

3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155
static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug;

	/*
	 * Enable digital hotplug on the CPU, and configure the DP short pulse
	 * duration to 2ms (which is the minimum in the Display Port spec)
	 * The pulse duration bits are reserved on HSW+.
	 */
	hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
	hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
	hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE |
		   DIGITAL_PORTA_PULSE_DURATION_2ms;
	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
}

3156
static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3157
{
3158
	u32 hotplug_irqs, enabled_irqs;
3159

3160
	if (INTEL_GEN(dev_priv) >= 8) {
3161
		hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
3162
		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw);
3163 3164

		bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3165
	} else if (INTEL_GEN(dev_priv) >= 7) {
3166
		hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
3167
		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb);
3168 3169

		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3170 3171
	} else {
		hotplug_irqs = DE_DP_A_HOTPLUG;
3172
		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk);
3173

3174 3175
		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
	}
3176

3177
	ilk_hpd_detection_setup(dev_priv);
3178

3179
	ibx_hpd_irq_setup(dev_priv);
3180 3181
}

3182 3183
static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv,
				      u32 enabled_irqs)
3184
{
3185
	u32 hotplug;
3186

3187
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3188 3189 3190
	hotplug |= PORTA_HOTPLUG_ENABLE |
		   PORTB_HOTPLUG_ENABLE |
		   PORTC_HOTPLUG_ENABLE;
3191

3192 3193 3194
	drm_dbg_kms(&dev_priv->drm,
		    "Invert bit setting: hp_ctl:%x hp_port:%x\n",
		    hotplug, enabled_irqs);
3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210
	hotplug &= ~BXT_DDI_HPD_INVERT_MASK;

	/*
	 * For BXT invert bit has to be set based on AOB design
	 * for HPD detection logic, update it based on VBT fields.
	 */
	if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
		hotplug |= BXT_DDIA_HPD_INVERT;
	if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) &&
	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_B))
		hotplug |= BXT_DDIB_HPD_INVERT;
	if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) &&
	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_C))
		hotplug |= BXT_DDIC_HPD_INVERT;

3211
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3212 3213
}

3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230
static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
	__bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK);
}

static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug_irqs, enabled_irqs;

	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
	hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;

	bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);

	__bxt_hpd_detection_setup(dev_priv, enabled_irqs);
}

3231
static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
P
Paulo Zanoni 已提交
3232
{
3233
	u32 mask;
3234

3235
	if (HAS_PCH_NOP(dev_priv))
D
Daniel Vetter 已提交
3236 3237
		return;

3238
	if (HAS_PCH_IBX(dev_priv))
3239
		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3240
	else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3241
		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3242 3243
	else
		mask = SDE_GMBUS_CPT;
3244

3245
	gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
P
Paulo Zanoni 已提交
3246
	I915_WRITE(SDEIMR, ~mask);
3247 3248 3249

	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
	    HAS_PCH_LPT(dev_priv))
3250
		ibx_hpd_detection_setup(dev_priv);
3251 3252
	else
		spt_hpd_detection_setup(dev_priv);
P
Paulo Zanoni 已提交
3253 3254
}

3255
static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
3256
{
3257
	struct intel_uncore *uncore = &dev_priv->uncore;
3258 3259
	u32 display_mask, extra_mask;

3260
	if (INTEL_GEN(dev_priv) >= 7) {
3261
		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3262
				DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
3263
		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3264 3265
			      DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
			      DE_DP_A_HOTPLUG_IVB);
3266 3267
	} else {
		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3268 3269
				DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
				DE_PIPEA_CRC_DONE | DE_POISON);
3270 3271 3272
		extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
			      DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
			      DE_DP_A_HOTPLUG);
3273
	}
3274

3275
	if (IS_HASWELL(dev_priv)) {
3276
		gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
3277 3278 3279
		display_mask |= DE_EDP_PSR_INT_HSW;
	}

3280
	dev_priv->irq_mask = ~display_mask;
3281

3282
	ibx_irq_pre_postinstall(dev_priv);
P
Paulo Zanoni 已提交
3283

3284 3285
	GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
		      display_mask | extra_mask);
3286

3287
	gen5_gt_irq_postinstall(&dev_priv->gt);
3288

3289 3290
	ilk_hpd_detection_setup(dev_priv);

3291
	ibx_irq_postinstall(dev_priv);
3292

3293
	if (IS_IRONLAKE_M(dev_priv)) {
3294 3295 3296
		/* Enable PCU event interrupts
		 *
		 * spinlocking not required here for correctness since interrupt
3297 3298
		 * setup is guaranteed to run in single-threaded context. But we
		 * need it to make the assert_spin_locked happy. */
3299
		spin_lock_irq(&dev_priv->irq_lock);
3300
		ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
3301
		spin_unlock_irq(&dev_priv->irq_lock);
3302
	}
3303 3304
}

3305 3306
void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
{
3307
	lockdep_assert_held(&dev_priv->irq_lock);
3308 3309 3310 3311 3312 3313

	if (dev_priv->display_irqs_enabled)
		return;

	dev_priv->display_irqs_enabled = true;

3314 3315
	if (intel_irqs_enabled(dev_priv)) {
		vlv_display_irq_reset(dev_priv);
3316
		vlv_display_irq_postinstall(dev_priv);
3317
	}
3318 3319 3320 3321
}

void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
{
3322
	lockdep_assert_held(&dev_priv->irq_lock);
3323 3324 3325 3326 3327 3328

	if (!dev_priv->display_irqs_enabled)
		return;

	dev_priv->display_irqs_enabled = false;

3329
	if (intel_irqs_enabled(dev_priv))
3330
		vlv_display_irq_reset(dev_priv);
3331 3332
}

3333

3334
static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
3335
{
3336
	gen5_gt_irq_postinstall(&dev_priv->gt);
J
Jesse Barnes 已提交
3337

3338
	spin_lock_irq(&dev_priv->irq_lock);
3339 3340
	if (dev_priv->display_irqs_enabled)
		vlv_display_irq_postinstall(dev_priv);
3341 3342
	spin_unlock_irq(&dev_priv->irq_lock);

J
Jesse Barnes 已提交
3343
	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3344
	POSTING_READ(VLV_MASTER_IER);
3345 3346
}

3347 3348
static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
{
3349 3350
	struct intel_uncore *uncore = &dev_priv->uncore;

3351 3352
	u32 de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
	u32 de_pipe_enables;
3353 3354
	u32 de_port_masked = GEN8_AUX_CHANNEL_A;
	u32 de_port_enables;
3355
	u32 de_misc_masked = GEN8_DE_EDP_PSR;
3356
	enum pipe pipe;
3357

3358 3359 3360
	if (INTEL_GEN(dev_priv) <= 10)
		de_misc_masked |= GEN8_DE_MISC_GSE;

3361
	if (INTEL_GEN(dev_priv) >= 9) {
3362
		de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3363 3364
		de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
				  GEN9_AUX_CHANNEL_D;
3365
		if (IS_GEN9_LP(dev_priv))
3366 3367
			de_port_masked |= BXT_DE_PORT_GMBUS;
	} else {
3368
		de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3369
	}
3370

3371 3372 3373
	if (INTEL_GEN(dev_priv) >= 11)
		de_port_masked |= ICL_AUX_CHANNEL_E;

3374
	if (IS_CNL_WITH_PORT_F(dev_priv) || INTEL_GEN(dev_priv) >= 11)
R
Rodrigo Vivi 已提交
3375 3376
		de_port_masked |= CNL_AUX_CHANNEL_F;

3377 3378 3379
	de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
					   GEN8_PIPE_FIFO_UNDERRUN;

3380
	de_port_enables = de_port_masked;
3381
	if (IS_GEN9_LP(dev_priv))
3382 3383
		de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
	else if (IS_BROADWELL(dev_priv))
3384 3385
		de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;

3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400
	if (INTEL_GEN(dev_priv) >= 12) {
		enum transcoder trans;

		for (trans = TRANSCODER_A; trans <= TRANSCODER_D; trans++) {
			enum intel_display_power_domain domain;

			domain = POWER_DOMAIN_TRANSCODER(trans);
			if (!intel_display_power_is_enabled(dev_priv, domain))
				continue;

			gen3_assert_iir_is_zero(uncore, TRANS_PSR_IIR(trans));
		}
	} else {
		gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
	}
3401

M
Mika Kahola 已提交
3402 3403
	for_each_pipe(dev_priv, pipe) {
		dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
3404

3405
		if (intel_display_power_is_enabled(dev_priv,
3406
				POWER_DOMAIN_PIPE(pipe)))
3407
			GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
3408 3409
					  dev_priv->de_irq_mask[pipe],
					  de_pipe_enables);
M
Mika Kahola 已提交
3410
	}
3411

3412 3413
	GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
	GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
3414

3415 3416
	if (INTEL_GEN(dev_priv) >= 11) {
		u32 de_hpd_masked = 0;
3417 3418
		u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
				     GEN11_DE_TBT_HOTPLUG_MASK;
3419

3420 3421
		GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked,
			      de_hpd_enables);
3422 3423
		gen11_hpd_detection_setup(dev_priv);
	} else if (IS_GEN9_LP(dev_priv)) {
3424
		bxt_hpd_detection_setup(dev_priv);
3425
	} else if (IS_BROADWELL(dev_priv)) {
3426
		ilk_hpd_detection_setup(dev_priv);
3427
	}
3428 3429
}

3430
static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
3431
{
3432
	if (HAS_PCH_SPLIT(dev_priv))
3433
		ibx_irq_pre_postinstall(dev_priv);
P
Paulo Zanoni 已提交
3434

3435
	gen8_gt_irq_postinstall(&dev_priv->gt);
3436 3437
	gen8_de_irq_postinstall(dev_priv);

3438
	if (HAS_PCH_SPLIT(dev_priv))
3439
		ibx_irq_postinstall(dev_priv);
3440

3441
	gen8_master_intr_enable(dev_priv->uncore.regs);
3442 3443
}

3444
static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
3445 3446 3447
{
	u32 mask = SDE_GMBUS_ICP;

3448
	drm_WARN_ON(&dev_priv->drm, I915_READ(SDEIER) != 0);
3449 3450 3451
	I915_WRITE(SDEIER, 0xffffffff);
	POSTING_READ(SDEIER);

3452
	gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
3453 3454
	I915_WRITE(SDEIMR, ~mask);

3455 3456 3457
	if (HAS_PCH_TGP(dev_priv))
		icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK,
					TGP_TC_HPD_ENABLE_MASK);
3458
	else if (HAS_PCH_JSP(dev_priv))
3459
		icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK, 0);
3460 3461 3462
	else if (HAS_PCH_MCC(dev_priv))
		icp_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK,
					ICP_TC_HPD_ENABLE(PORT_TC1));
3463 3464 3465
	else
		icp_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK,
					ICP_TC_HPD_ENABLE_MASK);
3466 3467
}

3468
static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
M
Mika Kuoppala 已提交
3469
{
3470
	struct intel_uncore *uncore = &dev_priv->uncore;
3471
	u32 gu_misc_masked = GEN11_GU_MISC_GSE;
M
Mika Kuoppala 已提交
3472

3473
	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3474
		icp_irq_postinstall(dev_priv);
3475

3476
	gen11_gt_irq_postinstall(&dev_priv->gt);
M
Mika Kuoppala 已提交
3477 3478
	gen8_de_irq_postinstall(dev_priv);

3479
	GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
3480

M
Mika Kuoppala 已提交
3481 3482
	I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);

3483
	gen11_master_intr_enable(uncore->regs);
3484
	POSTING_READ(GEN11_GFX_MSTR_IRQ);
M
Mika Kuoppala 已提交
3485 3486
}

3487
static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
3488
{
3489
	gen8_gt_irq_postinstall(&dev_priv->gt);
3490

3491
	spin_lock_irq(&dev_priv->irq_lock);
3492 3493
	if (dev_priv->display_irqs_enabled)
		vlv_display_irq_postinstall(dev_priv);
3494 3495
	spin_unlock_irq(&dev_priv->irq_lock);

3496
	I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3497 3498 3499
	POSTING_READ(GEN8_MASTER_IRQ);
}

3500
static void i8xx_irq_reset(struct drm_i915_private *dev_priv)
L
Linus Torvalds 已提交
3501
{
3502
	struct intel_uncore *uncore = &dev_priv->uncore;
3503

3504 3505
	i9xx_pipestat_irq_reset(dev_priv);

3506
	GEN2_IRQ_RESET(uncore);
C
Chris Wilson 已提交
3507 3508
}

3509
static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv)
C
Chris Wilson 已提交
3510
{
3511
	struct intel_uncore *uncore = &dev_priv->uncore;
3512
	u16 enable_mask;
C
Chris Wilson 已提交
3513

3514 3515 3516 3517
	intel_uncore_write16(uncore,
			     EMR,
			     ~(I915_ERROR_PAGE_TABLE |
			       I915_ERROR_MEMORY_REFRESH));
C
Chris Wilson 已提交
3518 3519 3520 3521

	/* Unmask the interrupts that we always want on. */
	dev_priv->irq_mask =
		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3522 3523
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
		  I915_MASTER_ERROR_INTERRUPT);
C
Chris Wilson 已提交
3524

3525 3526 3527
	enable_mask =
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3528
		I915_MASTER_ERROR_INTERRUPT |
3529 3530
		I915_USER_INTERRUPT;

3531
	GEN2_IRQ_INIT(uncore, dev_priv->irq_mask, enable_mask);
C
Chris Wilson 已提交
3532

3533 3534
	/* Interrupt setup is already guaranteed to be single-threaded, this is
	 * just to make the assert_spin_locked check happy. */
3535
	spin_lock_irq(&dev_priv->irq_lock);
3536 3537
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3538
	spin_unlock_irq(&dev_priv->irq_lock);
C
Chris Wilson 已提交
3539 3540
}

3541
static void i8xx_error_irq_ack(struct drm_i915_private *i915,
3542 3543
			       u16 *eir, u16 *eir_stuck)
{
3544
	struct intel_uncore *uncore = &i915->uncore;
3545 3546
	u16 emr;

3547
	*eir = intel_uncore_read16(uncore, EIR);
3548 3549

	if (*eir)
3550
		intel_uncore_write16(uncore, EIR, *eir);
3551

3552
	*eir_stuck = intel_uncore_read16(uncore, EIR);
3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565
	if (*eir_stuck == 0)
		return;

	/*
	 * Toggle all EMR bits to make sure we get an edge
	 * in the ISR master error bit if we don't clear
	 * all the EIR bits. Otherwise the edge triggered
	 * IIR on i965/g4x wouldn't notice that an interrupt
	 * is still pending. Also some EIR bits can't be
	 * cleared except by handling the underlying error
	 * (or by a GPU reset) so we mask any bit that
	 * remains set.
	 */
3566 3567 3568
	emr = intel_uncore_read16(uncore, EMR);
	intel_uncore_write16(uncore, EMR, 0xffff);
	intel_uncore_write16(uncore, EMR, emr | *eir_stuck);
3569 3570 3571 3572 3573 3574 3575 3576
}

static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
				   u16 eir, u16 eir_stuck)
{
	DRM_DEBUG("Master Error: EIR 0x%04x\n", eir);

	if (eir_stuck)
3577 3578
		drm_dbg(&dev_priv->drm, "EIR stuck: 0x%04x, masked\n",
			eir_stuck);
3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614
}

static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
			       u32 *eir, u32 *eir_stuck)
{
	u32 emr;

	*eir = I915_READ(EIR);

	I915_WRITE(EIR, *eir);

	*eir_stuck = I915_READ(EIR);
	if (*eir_stuck == 0)
		return;

	/*
	 * Toggle all EMR bits to make sure we get an edge
	 * in the ISR master error bit if we don't clear
	 * all the EIR bits. Otherwise the edge triggered
	 * IIR on i965/g4x wouldn't notice that an interrupt
	 * is still pending. Also some EIR bits can't be
	 * cleared except by handling the underlying error
	 * (or by a GPU reset) so we mask any bit that
	 * remains set.
	 */
	emr = I915_READ(EMR);
	I915_WRITE(EMR, 0xffffffff);
	I915_WRITE(EMR, emr | *eir_stuck);
}

static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
				   u32 eir, u32 eir_stuck)
{
	DRM_DEBUG("Master Error, EIR 0x%08x\n", eir);

	if (eir_stuck)
3615 3616
		drm_dbg(&dev_priv->drm, "EIR stuck: 0x%08x, masked\n",
			eir_stuck);
3617 3618
}

3619
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
C
Chris Wilson 已提交
3620
{
3621
	struct drm_i915_private *dev_priv = arg;
3622
	irqreturn_t ret = IRQ_NONE;
C
Chris Wilson 已提交
3623

3624 3625 3626
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

3627
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
3628
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3629

3630
	do {
3631
		u32 pipe_stats[I915_MAX_PIPES] = {};
3632
		u16 eir = 0, eir_stuck = 0;
3633
		u16 iir;
3634

3635
		iir = intel_uncore_read16(&dev_priv->uncore, GEN2_IIR);
3636 3637 3638 3639
		if (iir == 0)
			break;

		ret = IRQ_HANDLED;
C
Chris Wilson 已提交
3640

3641 3642 3643
		/* Call regardless, as some status bits might not be
		 * signalled in iir */
		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
C
Chris Wilson 已提交
3644

3645 3646 3647
		if (iir & I915_MASTER_ERROR_INTERRUPT)
			i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);

3648
		intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir);
C
Chris Wilson 已提交
3649 3650

		if (iir & I915_USER_INTERRUPT)
3651
			intel_engine_signal_breadcrumbs(dev_priv->engine[RCS0]);
C
Chris Wilson 已提交
3652

3653 3654
		if (iir & I915_MASTER_ERROR_INTERRUPT)
			i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
C
Chris Wilson 已提交
3655

3656 3657
		i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
	} while (0);
3658

3659
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
C
Chris Wilson 已提交
3660

3661
	return ret;
C
Chris Wilson 已提交
3662 3663
}

3664
static void i915_irq_reset(struct drm_i915_private *dev_priv)
3665
{
3666
	struct intel_uncore *uncore = &dev_priv->uncore;
3667

3668
	if (I915_HAS_HOTPLUG(dev_priv)) {
3669
		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3670 3671 3672
		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
	}

3673 3674
	i9xx_pipestat_irq_reset(dev_priv);

3675
	GEN3_IRQ_RESET(uncore, GEN2_);
3676 3677
}

3678
static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
3679
{
3680
	struct intel_uncore *uncore = &dev_priv->uncore;
3681
	u32 enable_mask;
3682

3683 3684
	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE |
			  I915_ERROR_MEMORY_REFRESH));
3685 3686 3687 3688 3689

	/* Unmask the interrupts that we always want on. */
	dev_priv->irq_mask =
		~(I915_ASLE_INTERRUPT |
		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3690 3691
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
		  I915_MASTER_ERROR_INTERRUPT);
3692 3693 3694 3695 3696

	enable_mask =
		I915_ASLE_INTERRUPT |
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3697
		I915_MASTER_ERROR_INTERRUPT |
3698 3699
		I915_USER_INTERRUPT;

3700
	if (I915_HAS_HOTPLUG(dev_priv)) {
3701 3702 3703 3704 3705 3706
		/* Enable in IER... */
		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
		/* and unmask in IMR */
		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
	}

3707
	GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
3708

3709 3710
	/* Interrupt setup is already guaranteed to be single-threaded, this is
	 * just to make the assert_spin_locked check happy. */
3711
	spin_lock_irq(&dev_priv->irq_lock);
3712 3713
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3714
	spin_unlock_irq(&dev_priv->irq_lock);
3715

3716
	i915_enable_asle_pipestat(dev_priv);
3717 3718
}

3719
static irqreturn_t i915_irq_handler(int irq, void *arg)
3720
{
3721
	struct drm_i915_private *dev_priv = arg;
3722
	irqreturn_t ret = IRQ_NONE;
3723

3724 3725 3726
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

3727
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
3728
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3729

3730
	do {
3731
		u32 pipe_stats[I915_MAX_PIPES] = {};
3732
		u32 eir = 0, eir_stuck = 0;
3733 3734
		u32 hotplug_status = 0;
		u32 iir;
3735

3736
		iir = I915_READ(GEN2_IIR);
3737 3738 3739 3740 3741 3742 3743 3744
		if (iir == 0)
			break;

		ret = IRQ_HANDLED;

		if (I915_HAS_HOTPLUG(dev_priv) &&
		    iir & I915_DISPLAY_PORT_INTERRUPT)
			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
3745

3746 3747 3748
		/* Call regardless, as some status bits might not be
		 * signalled in iir */
		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
3749

3750 3751 3752
		if (iir & I915_MASTER_ERROR_INTERRUPT)
			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);

3753
		I915_WRITE(GEN2_IIR, iir);
3754 3755

		if (iir & I915_USER_INTERRUPT)
3756
			intel_engine_signal_breadcrumbs(dev_priv->engine[RCS0]);
3757

3758 3759
		if (iir & I915_MASTER_ERROR_INTERRUPT)
			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
3760

3761 3762 3763 3764 3765
		if (hotplug_status)
			i9xx_hpd_irq_handler(dev_priv, hotplug_status);

		i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
	} while (0);
3766

3767
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3768

3769 3770 3771
	return ret;
}

3772
static void i965_irq_reset(struct drm_i915_private *dev_priv)
3773
{
3774
	struct intel_uncore *uncore = &dev_priv->uncore;
3775

3776
	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3777
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3778

3779 3780
	i9xx_pipestat_irq_reset(dev_priv);

3781
	GEN3_IRQ_RESET(uncore, GEN2_);
3782 3783
}

3784
static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
3785
{
3786
	struct intel_uncore *uncore = &dev_priv->uncore;
3787
	u32 enable_mask;
3788 3789
	u32 error_mask;

3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804
	/*
	 * Enable some error detection, note the instruction error mask
	 * bit is reserved, so we leave it masked.
	 */
	if (IS_G4X(dev_priv)) {
		error_mask = ~(GM45_ERROR_PAGE_TABLE |
			       GM45_ERROR_MEM_PRIV |
			       GM45_ERROR_CP_PRIV |
			       I915_ERROR_MEMORY_REFRESH);
	} else {
		error_mask = ~(I915_ERROR_PAGE_TABLE |
			       I915_ERROR_MEMORY_REFRESH);
	}
	I915_WRITE(EMR, error_mask);

3805
	/* Unmask the interrupts that we always want on. */
3806 3807 3808 3809 3810
	dev_priv->irq_mask =
		~(I915_ASLE_INTERRUPT |
		  I915_DISPLAY_PORT_INTERRUPT |
		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3811
		  I915_MASTER_ERROR_INTERRUPT);
3812

3813 3814 3815 3816 3817
	enable_mask =
		I915_ASLE_INTERRUPT |
		I915_DISPLAY_PORT_INTERRUPT |
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3818
		I915_MASTER_ERROR_INTERRUPT |
3819
		I915_USER_INTERRUPT;
3820

3821
	if (IS_G4X(dev_priv))
3822
		enable_mask |= I915_BSD_USER_INTERRUPT;
3823

3824
	GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
3825

3826 3827
	/* Interrupt setup is already guaranteed to be single-threaded, this is
	 * just to make the assert_spin_locked check happy. */
3828
	spin_lock_irq(&dev_priv->irq_lock);
3829 3830 3831
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3832
	spin_unlock_irq(&dev_priv->irq_lock);
3833

3834
	i915_enable_asle_pipestat(dev_priv);
3835 3836
}

3837
static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
3838 3839 3840
{
	u32 hotplug_en;

3841
	lockdep_assert_held(&dev_priv->irq_lock);
3842

3843 3844
	/* Note HDMI and DP share hotplug bits */
	/* enable bits are the same for all generations */
3845
	hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
3846 3847 3848 3849
	/* Programming the CRT detection parameters tends
	   to generate a spurious hotplug event about three
	   seconds later.  So just do it once.
	*/
3850
	if (IS_G4X(dev_priv))
3851 3852 3853 3854
		hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
	hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;

	/* Ignore TV since it's buggy */
3855
	i915_hotplug_interrupt_update_locked(dev_priv,
3856 3857 3858 3859
					     HOTPLUG_INT_EN_MASK |
					     CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
					     CRT_HOTPLUG_ACTIVATION_PERIOD_64,
					     hotplug_en);
3860 3861
}

3862
static irqreturn_t i965_irq_handler(int irq, void *arg)
3863
{
3864
	struct drm_i915_private *dev_priv = arg;
3865
	irqreturn_t ret = IRQ_NONE;
3866

3867 3868 3869
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

3870
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
3871
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3872

3873
	do {
3874
		u32 pipe_stats[I915_MAX_PIPES] = {};
3875
		u32 eir = 0, eir_stuck = 0;
3876 3877
		u32 hotplug_status = 0;
		u32 iir;
3878

3879
		iir = I915_READ(GEN2_IIR);
3880
		if (iir == 0)
3881 3882 3883 3884
			break;

		ret = IRQ_HANDLED;

3885 3886 3887 3888 3889 3890
		if (iir & I915_DISPLAY_PORT_INTERRUPT)
			hotplug_status = i9xx_hpd_irq_ack(dev_priv);

		/* Call regardless, as some status bits might not be
		 * signalled in iir */
		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
3891

3892 3893 3894
		if (iir & I915_MASTER_ERROR_INTERRUPT)
			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);

3895
		I915_WRITE(GEN2_IIR, iir);
3896 3897

		if (iir & I915_USER_INTERRUPT)
3898
			intel_engine_signal_breadcrumbs(dev_priv->engine[RCS0]);
3899

3900
		if (iir & I915_BSD_USER_INTERRUPT)
3901
			intel_engine_signal_breadcrumbs(dev_priv->engine[VCS0]);
3902

3903 3904
		if (iir & I915_MASTER_ERROR_INTERRUPT)
			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
3905

3906 3907 3908 3909 3910
		if (hotplug_status)
			i9xx_hpd_irq_handler(dev_priv, hotplug_status);

		i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
	} while (0);
3911

3912
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3913

3914 3915 3916
	return ret;
}

3917 3918 3919 3920 3921 3922 3923
/**
 * intel_irq_init - initializes irq support
 * @dev_priv: i915 device instance
 *
 * This function initializes all the irq support including work items, timers
 * and all the vtables. It does not setup the interrupt itself though.
 */
3924
void intel_irq_init(struct drm_i915_private *dev_priv)
3925
{
3926
	struct drm_device *dev = &dev_priv->drm;
3927
	int i;
3928

3929 3930
	intel_hpd_init_work(dev_priv);

3931
	INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work);
3932 3933
	for (i = 0; i < MAX_L3_SLICES; ++i)
		dev_priv->l3_parity.remap_info[i] = NULL;
3934

3935
	/* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */
3936
	if (HAS_GT_UC(dev_priv) && INTEL_GEN(dev_priv) < 11)
3937
		dev_priv->gt.pm_guc_events = GUC_INTR_GUC2HOST << 16;
3938

3939
	dev->vblank_disable_immediate = true;
3940

3941 3942 3943 3944 3945 3946 3947 3948 3949 3950
	/* Most platforms treat the display irq block as an always-on
	 * power domain. vlv/chv can disable it at runtime and need
	 * special care to avoid writing any of the display block registers
	 * outside of the power domain. We defer setting up the display irqs
	 * in this case to the runtime pm.
	 */
	dev_priv->display_irqs_enabled = true;
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
		dev_priv->display_irqs_enabled = false;

L
Lyude 已提交
3951
	dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
3952 3953 3954 3955 3956 3957 3958
	/* If we have MST support, we want to avoid doing short HPD IRQ storm
	 * detection, as short HPD storms will occur as a natural part of
	 * sideband messaging with MST.
	 * On older platforms however, IRQ storms can occur with both long and
	 * short pulses, as seen on some G4x systems.
	 */
	dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
L
Lyude 已提交
3959

3960 3961 3962 3963
	if (HAS_GMCH(dev_priv)) {
		if (I915_HAS_HOTPLUG(dev_priv))
			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
	} else {
M
Matt Roper 已提交
3964 3965 3966
		if (HAS_PCH_JSP(dev_priv))
			dev_priv->display.hpd_irq_setup = jsp_hpd_irq_setup;
		else if (HAS_PCH_MCC(dev_priv))
3967 3968
			dev_priv->display.hpd_irq_setup = mcc_hpd_irq_setup;
		else if (INTEL_GEN(dev_priv) >= 11)
3969 3970
			dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
		else if (IS_GEN9_LP(dev_priv))
3971
			dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
3972
		else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
3973 3974
			dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
		else
3975
			dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
3976 3977
	}
}
3978

3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992
/**
 * intel_irq_fini - deinitializes IRQ support
 * @i915: i915 device instance
 *
 * This function deinitializes all the IRQ support.
 */
void intel_irq_fini(struct drm_i915_private *i915)
{
	int i;

	for (i = 0; i < MAX_L3_SLICES; ++i)
		kfree(i915->l3_parity.remap_info[i]);
}

3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011
static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
{
	if (HAS_GMCH(dev_priv)) {
		if (IS_CHERRYVIEW(dev_priv))
			return cherryview_irq_handler;
		else if (IS_VALLEYVIEW(dev_priv))
			return valleyview_irq_handler;
		else if (IS_GEN(dev_priv, 4))
			return i965_irq_handler;
		else if (IS_GEN(dev_priv, 3))
			return i915_irq_handler;
		else
			return i8xx_irq_handler;
	} else {
		if (INTEL_GEN(dev_priv) >= 11)
			return gen11_irq_handler;
		else if (INTEL_GEN(dev_priv) >= 8)
			return gen8_irq_handler;
		else
4012
			return ilk_irq_handler;
4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034
	}
}

static void intel_irq_reset(struct drm_i915_private *dev_priv)
{
	if (HAS_GMCH(dev_priv)) {
		if (IS_CHERRYVIEW(dev_priv))
			cherryview_irq_reset(dev_priv);
		else if (IS_VALLEYVIEW(dev_priv))
			valleyview_irq_reset(dev_priv);
		else if (IS_GEN(dev_priv, 4))
			i965_irq_reset(dev_priv);
		else if (IS_GEN(dev_priv, 3))
			i915_irq_reset(dev_priv);
		else
			i8xx_irq_reset(dev_priv);
	} else {
		if (INTEL_GEN(dev_priv) >= 11)
			gen11_irq_reset(dev_priv);
		else if (INTEL_GEN(dev_priv) >= 8)
			gen8_irq_reset(dev_priv);
		else
4035
			ilk_irq_reset(dev_priv);
4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057
	}
}

static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
{
	if (HAS_GMCH(dev_priv)) {
		if (IS_CHERRYVIEW(dev_priv))
			cherryview_irq_postinstall(dev_priv);
		else if (IS_VALLEYVIEW(dev_priv))
			valleyview_irq_postinstall(dev_priv);
		else if (IS_GEN(dev_priv, 4))
			i965_irq_postinstall(dev_priv);
		else if (IS_GEN(dev_priv, 3))
			i915_irq_postinstall(dev_priv);
		else
			i8xx_irq_postinstall(dev_priv);
	} else {
		if (INTEL_GEN(dev_priv) >= 11)
			gen11_irq_postinstall(dev_priv);
		else if (INTEL_GEN(dev_priv) >= 8)
			gen8_irq_postinstall(dev_priv);
		else
4058
			ilk_irq_postinstall(dev_priv);
4059 4060 4061
	}
}

4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072
/**
 * intel_irq_install - enables the hardware interrupt
 * @dev_priv: i915 device instance
 *
 * This function enables the hardware interrupt handling, but leaves the hotplug
 * handling still disabled. It is called after intel_irq_init().
 *
 * In the driver load and resume code we need working interrupts in a few places
 * but don't want to deal with the hassle of concurrent probe and hotplug
 * workers. Hence the split into this two-stage approach.
 */
4073 4074
int intel_irq_install(struct drm_i915_private *dev_priv)
{
4075 4076 4077
	int irq = dev_priv->drm.pdev->irq;
	int ret;

4078 4079 4080 4081 4082
	/*
	 * We enable some interrupt sources in our postinstall hooks, so mark
	 * interrupts as enabled _before_ actually enabling them to avoid
	 * special cases in our ordering checks.
	 */
4083
	dev_priv->runtime_pm.irqs_enabled = true;
4084

4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098
	dev_priv->drm.irq_enabled = true;

	intel_irq_reset(dev_priv);

	ret = request_irq(irq, intel_irq_handler(dev_priv),
			  IRQF_SHARED, DRIVER_NAME, dev_priv);
	if (ret < 0) {
		dev_priv->drm.irq_enabled = false;
		return ret;
	}

	intel_irq_postinstall(dev_priv);

	return ret;
4099 4100
}

4101 4102 4103 4104 4105 4106 4107
/**
 * intel_irq_uninstall - finilizes all irq handling
 * @dev_priv: i915 device instance
 *
 * This stops interrupt and hotplug handling and unregisters and frees all
 * resources acquired in the init functions.
 */
4108 4109
void intel_irq_uninstall(struct drm_i915_private *dev_priv)
{
4110 4111 4112
	int irq = dev_priv->drm.pdev->irq;

	/*
4113 4114 4115 4116
	 * FIXME we can get called twice during driver probe
	 * error handling as well as during driver remove due to
	 * intel_modeset_driver_remove() calling us out of sequence.
	 * Would be nice if it didn't do that...
4117 4118 4119 4120 4121 4122 4123 4124 4125 4126
	 */
	if (!dev_priv->drm.irq_enabled)
		return;

	dev_priv->drm.irq_enabled = false;

	intel_irq_reset(dev_priv);

	free_irq(irq, dev_priv);

4127
	intel_hpd_cancel_work(dev_priv);
4128
	dev_priv->runtime_pm.irqs_enabled = false;
4129 4130
}

4131 4132 4133 4134 4135 4136 4137
/**
 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
 * @dev_priv: i915 device instance
 *
 * This function is used to disable interrupts at runtime, both in the runtime
 * pm and the system suspend/resume code.
 */
4138
void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4139
{
4140
	intel_irq_reset(dev_priv);
4141
	dev_priv->runtime_pm.irqs_enabled = false;
4142
	intel_synchronize_irq(dev_priv);
4143 4144
}

4145 4146 4147 4148 4149 4150 4151
/**
 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
 * @dev_priv: i915 device instance
 *
 * This function is used to enable interrupts at runtime, both in the runtime
 * pm and the system suspend/resume code.
 */
4152
void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4153
{
4154
	dev_priv->runtime_pm.irqs_enabled = true;
4155 4156
	intel_irq_reset(dev_priv);
	intel_irq_postinstall(dev_priv);
4157
}
4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171

bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
{
	/*
	 * We only use drm_irq_uninstall() at unload and VT switch, so
	 * this is the only thing we need to check.
	 */
	return dev_priv->runtime_pm.irqs_enabled;
}

void intel_synchronize_irq(struct drm_i915_private *i915)
{
	synchronize_irq(i915->drm.pdev->irq);
}