i915_irq.c 121.2 KB
Newer Older
D
Dave Airlie 已提交
1
/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
L
Linus Torvalds 已提交
2
 */
D
Dave Airlie 已提交
3
/*
L
Linus Torvalds 已提交
4 5
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
 * All Rights Reserved.
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
D
Dave Airlie 已提交
27
 */
L
Linus Torvalds 已提交
28

29 30
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

31
#include <linux/circ_buf.h>
32 33 34
#include <linux/slab.h>
#include <linux/sysrq.h>

35
#include <drm/drm_drv.h>
36 37
#include <drm/drm_irq.h>

38
#include "display/intel_display_types.h"
39 40 41 42 43
#include "display/intel_fifo_underrun.h"
#include "display/intel_hotplug.h"
#include "display/intel_lpe_audio.h"
#include "display/intel_psr.h"

44
#include "gt/intel_breadcrumbs.h"
45
#include "gt/intel_gt.h"
46
#include "gt/intel_gt_irq.h"
47
#include "gt/intel_gt_pm_irq.h"
48
#include "gt/intel_rps.h"
49

L
Linus Torvalds 已提交
50
#include "i915_drv.h"
51
#include "i915_irq.h"
C
Chris Wilson 已提交
52
#include "i915_trace.h"
53
#include "intel_pm.h"
L
Linus Torvalds 已提交
54

55 56 57 58 59 60 61 62
/**
 * DOC: interrupt handling
 *
 * These functions provide the basic support for enabling and disabling the
 * interrupt handling support. There's a lot more functionality in i915_irq.c
 * and related files, but that will be described in separate chapters.
 */

63
typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val);
64 65
typedef u32 (*hotplug_enables_func)(struct drm_i915_private *i915,
				    enum hpd_pin pin);
66

67 68 69 70
static const u32 hpd_ilk[HPD_NUM_PINS] = {
	[HPD_PORT_A] = DE_DP_A_HOTPLUG,
};

71 72 73 74
static const u32 hpd_ivb[HPD_NUM_PINS] = {
	[HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
};

75
static const u32 hpd_bdw[HPD_NUM_PINS] = {
76
	[HPD_PORT_A] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_A),
77 78
};

79
static const u32 hpd_ibx[HPD_NUM_PINS] = {
80 81 82 83
	[HPD_CRT] = SDE_CRT_HOTPLUG,
	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
84
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG,
85 86
};

87
static const u32 hpd_cpt[HPD_NUM_PINS] = {
88
	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
89
	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
90 91
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
92
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
93 94
};

X
Xiong Zhang 已提交
95
static const u32 hpd_spt[HPD_NUM_PINS] = {
96
	[HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
X
Xiong Zhang 已提交
97 98 99
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
100
	[HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT,
X
Xiong Zhang 已提交
101 102
};

103
static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
104 105 106 107 108
	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
109
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN,
110 111
};

112
static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
113 114 115 116 117
	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
118
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
119 120
};

121
static const u32 hpd_status_i915[HPD_NUM_PINS] = {
122 123 124 125 126
	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
127
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
128 129
};

130
static const u32 hpd_bxt[HPD_NUM_PINS] = {
131 132 133
	[HPD_PORT_A] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_A),
	[HPD_PORT_B] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_B),
	[HPD_PORT_C] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_C),
134 135
};

136
static const u32 hpd_gen11[HPD_NUM_PINS] = {
137 138 139 140 141 142
	[HPD_PORT_TC1] = GEN11_TC_HOTPLUG(HPD_PORT_TC1) | GEN11_TBT_HOTPLUG(HPD_PORT_TC1),
	[HPD_PORT_TC2] = GEN11_TC_HOTPLUG(HPD_PORT_TC2) | GEN11_TBT_HOTPLUG(HPD_PORT_TC2),
	[HPD_PORT_TC3] = GEN11_TC_HOTPLUG(HPD_PORT_TC3) | GEN11_TBT_HOTPLUG(HPD_PORT_TC3),
	[HPD_PORT_TC4] = GEN11_TC_HOTPLUG(HPD_PORT_TC4) | GEN11_TBT_HOTPLUG(HPD_PORT_TC4),
	[HPD_PORT_TC5] = GEN11_TC_HOTPLUG(HPD_PORT_TC5) | GEN11_TBT_HOTPLUG(HPD_PORT_TC5),
	[HPD_PORT_TC6] = GEN11_TC_HOTPLUG(HPD_PORT_TC6) | GEN11_TBT_HOTPLUG(HPD_PORT_TC6),
143 144
};

145
static const u32 hpd_icp[HPD_NUM_PINS] = {
146 147 148
	[HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A),
	[HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B),
	[HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C),
149 150 151 152 153 154
	[HPD_PORT_TC1] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC1),
	[HPD_PORT_TC2] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC2),
	[HPD_PORT_TC3] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC3),
	[HPD_PORT_TC4] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC4),
	[HPD_PORT_TC5] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC5),
	[HPD_PORT_TC6] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC6),
155 156
};

157
static const u32 hpd_sde_dg1[HPD_NUM_PINS] = {
158 159 160 161
	[HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A),
	[HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B),
	[HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C),
	[HPD_PORT_D] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_D),
162 163
};

164 165 166 167 168 169 170 171 172 173 174 175 176
static void intel_hpd_init_pins(struct drm_i915_private *dev_priv)
{
	struct i915_hotplug *hpd = &dev_priv->hotplug;

	if (HAS_GMCH(dev_priv)) {
		if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
		    IS_CHERRYVIEW(dev_priv))
			hpd->hpd = hpd_status_g4x;
		else
			hpd->hpd = hpd_status_i915;
		return;
	}

177
	if (INTEL_GEN(dev_priv) >= 11)
178 179 180 181 182 183 184 185 186 187
		hpd->hpd = hpd_gen11;
	else if (IS_GEN9_LP(dev_priv))
		hpd->hpd = hpd_bxt;
	else if (INTEL_GEN(dev_priv) >= 8)
		hpd->hpd = hpd_bdw;
	else if (INTEL_GEN(dev_priv) >= 7)
		hpd->hpd = hpd_ivb;
	else
		hpd->hpd = hpd_ilk;

188 189
	if ((INTEL_PCH_TYPE(dev_priv) < PCH_DG1) &&
	    (!HAS_PCH_SPLIT(dev_priv) || HAS_PCH_NOP(dev_priv)))
190 191
		return;

192 193 194 195
	if (HAS_PCH_DG1(dev_priv))
		hpd->pch_hpd = hpd_sde_dg1;
	else if (HAS_PCH_TGP(dev_priv) || HAS_PCH_JSP(dev_priv) ||
		 HAS_PCH_ICP(dev_priv) || HAS_PCH_MCC(dev_priv))
196 197 198 199 200 201 202 203 204 205 206
		hpd->pch_hpd = hpd_icp;
	else if (HAS_PCH_CNP(dev_priv) || HAS_PCH_SPT(dev_priv))
		hpd->pch_hpd = hpd_spt;
	else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_CPT(dev_priv))
		hpd->pch_hpd = hpd_cpt;
	else if (HAS_PCH_IBX(dev_priv))
		hpd->pch_hpd = hpd_ibx;
	else
		MISSING_CASE(INTEL_PCH_TYPE(dev_priv));
}

207 208 209 210 211 212 213 214
static void
intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
{
	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);

	drm_crtc_handle_vblank(&crtc->base);
}

215 216
void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
		    i915_reg_t iir, i915_reg_t ier)
217
{
218 219
	intel_uncore_write(uncore, imr, 0xffffffff);
	intel_uncore_posting_read(uncore, imr);
220

221
	intel_uncore_write(uncore, ier, 0);
222 223

	/* IIR can theoretically queue up two events. Be paranoid. */
224 225 226 227
	intel_uncore_write(uncore, iir, 0xffffffff);
	intel_uncore_posting_read(uncore, iir);
	intel_uncore_write(uncore, iir, 0xffffffff);
	intel_uncore_posting_read(uncore, iir);
228 229
}

230
void gen2_irq_reset(struct intel_uncore *uncore)
231
{
232 233
	intel_uncore_write16(uncore, GEN2_IMR, 0xffff);
	intel_uncore_posting_read16(uncore, GEN2_IMR);
234

235
	intel_uncore_write16(uncore, GEN2_IER, 0);
236 237

	/* IIR can theoretically queue up two events. Be paranoid. */
238 239 240 241
	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
	intel_uncore_posting_read16(uncore, GEN2_IIR);
	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
	intel_uncore_posting_read16(uncore, GEN2_IIR);
242 243
}

244 245 246
/*
 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
 */
247
static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
248
{
249
	u32 val = intel_uncore_read(uncore, reg);
250 251 252 253

	if (val == 0)
		return;

254 255 256
	drm_WARN(&uncore->i915->drm, 1,
		 "Interrupt register 0x%x is not zero: 0x%08x\n",
		 i915_mmio_reg_offset(reg), val);
257 258 259 260
	intel_uncore_write(uncore, reg, 0xffffffff);
	intel_uncore_posting_read(uncore, reg);
	intel_uncore_write(uncore, reg, 0xffffffff);
	intel_uncore_posting_read(uncore, reg);
261
}
262

263
static void gen2_assert_iir_is_zero(struct intel_uncore *uncore)
264
{
265
	u16 val = intel_uncore_read16(uncore, GEN2_IIR);
266 267 268 269

	if (val == 0)
		return;

270 271 272
	drm_WARN(&uncore->i915->drm, 1,
		 "Interrupt register 0x%x is not zero: 0x%08x\n",
		 i915_mmio_reg_offset(GEN2_IIR), val);
273 274 275 276
	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
	intel_uncore_posting_read16(uncore, GEN2_IIR);
	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
	intel_uncore_posting_read16(uncore, GEN2_IIR);
277 278
}

279 280 281 282
void gen3_irq_init(struct intel_uncore *uncore,
		   i915_reg_t imr, u32 imr_val,
		   i915_reg_t ier, u32 ier_val,
		   i915_reg_t iir)
283
{
284
	gen3_assert_iir_is_zero(uncore, iir);
285

286 287 288
	intel_uncore_write(uncore, ier, ier_val);
	intel_uncore_write(uncore, imr, imr_val);
	intel_uncore_posting_read(uncore, imr);
289 290
}

291 292
void gen2_irq_init(struct intel_uncore *uncore,
		   u32 imr_val, u32 ier_val)
293
{
294
	gen2_assert_iir_is_zero(uncore);
295

296 297 298
	intel_uncore_write16(uncore, GEN2_IER, ier_val);
	intel_uncore_write16(uncore, GEN2_IMR, imr_val);
	intel_uncore_posting_read16(uncore, GEN2_IMR);
299 300
}

301 302 303
/* For display hotplug interrupt */
static inline void
i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
304 305
				     u32 mask,
				     u32 bits)
306
{
307
	u32 val;
308

309
	lockdep_assert_held(&dev_priv->irq_lock);
310
	drm_WARN_ON(&dev_priv->drm, bits & ~mask);
311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330

	val = I915_READ(PORT_HOTPLUG_EN);
	val &= ~mask;
	val |= bits;
	I915_WRITE(PORT_HOTPLUG_EN, val);
}

/**
 * i915_hotplug_interrupt_update - update hotplug interrupt enable
 * @dev_priv: driver private
 * @mask: bits to update
 * @bits: bits to enable
 * NOTE: the HPD enable bits are modified both inside and outside
 * of an interrupt context. To avoid that read-modify-write cycles
 * interfer, these bits are protected by a spinlock. Since this
 * function is usually not called from a context where the lock is
 * held already, this function acquires the lock itself. A non-locking
 * version is also available.
 */
void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
331 332
				   u32 mask,
				   u32 bits)
333 334 335 336 337 338
{
	spin_lock_irq(&dev_priv->irq_lock);
	i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
	spin_unlock_irq(&dev_priv->irq_lock);
}

339 340 341 342 343 344
/**
 * ilk_update_display_irq - update DEIMR
 * @dev_priv: driver private
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
345
void ilk_update_display_irq(struct drm_i915_private *dev_priv,
346 347
			    u32 interrupt_mask,
			    u32 enabled_irq_mask)
348
{
349
	u32 new_val;
350

351
	lockdep_assert_held(&dev_priv->irq_lock);
352
	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
353 354 355 356 357

	new_val = dev_priv->irq_mask;
	new_val &= ~interrupt_mask;
	new_val |= (~enabled_irq_mask & interrupt_mask);

358 359
	if (new_val != dev_priv->irq_mask &&
	    !drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) {
360
		dev_priv->irq_mask = new_val;
361
		I915_WRITE(DEIMR, dev_priv->irq_mask);
362
		POSTING_READ(DEIMR);
363 364 365
	}
}

366
/**
367 368 369 370 371
 * bdw_update_port_irq - update DE port interrupt
 * @dev_priv: driver private
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
372
static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
373 374
				u32 interrupt_mask,
				u32 enabled_irq_mask)
375
{
376 377
	u32 new_val;
	u32 old_val;
378

379
	lockdep_assert_held(&dev_priv->irq_lock);
380

381
	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
382

383
	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
384 385 386 387 388 389 390 391 392 393 394 395 396 397
		return;

	old_val = I915_READ(GEN8_DE_PORT_IMR);

	new_val = old_val;
	new_val &= ~interrupt_mask;
	new_val |= (~enabled_irq_mask & interrupt_mask);

	if (new_val != old_val) {
		I915_WRITE(GEN8_DE_PORT_IMR, new_val);
		POSTING_READ(GEN8_DE_PORT_IMR);
	}
}

398 399 400 401 402 403 404 405 406
/**
 * bdw_update_pipe_irq - update DE pipe interrupt
 * @dev_priv: driver private
 * @pipe: pipe whose interrupt to update
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
			 enum pipe pipe,
407 408
			 u32 interrupt_mask,
			 u32 enabled_irq_mask)
409
{
410
	u32 new_val;
411

412
	lockdep_assert_held(&dev_priv->irq_lock);
413

414
	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
415

416
	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
417 418 419 420 421 422 423 424 425 426 427 428 429
		return;

	new_val = dev_priv->de_irq_mask[pipe];
	new_val &= ~interrupt_mask;
	new_val |= (~enabled_irq_mask & interrupt_mask);

	if (new_val != dev_priv->de_irq_mask[pipe]) {
		dev_priv->de_irq_mask[pipe] = new_val;
		I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
		POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
	}
}

430 431 432 433 434 435
/**
 * ibx_display_interrupt_update - update SDEIMR
 * @dev_priv: driver private
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
436
void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
437 438
				  u32 interrupt_mask,
				  u32 enabled_irq_mask)
439
{
440
	u32 sdeimr = I915_READ(SDEIMR);
441 442 443
	sdeimr &= ~interrupt_mask;
	sdeimr |= (~enabled_irq_mask & interrupt_mask);

444
	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
445

446
	lockdep_assert_held(&dev_priv->irq_lock);
447

448
	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
449 450
		return;

451 452 453
	I915_WRITE(SDEIMR, sdeimr);
	POSTING_READ(SDEIMR);
}
454

455 456
u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
			      enum pipe pipe)
457
{
458 459
	u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
	u32 enable_mask = status_mask << 16;
460

461
	lockdep_assert_held(&dev_priv->irq_lock);
462

463 464
	if (INTEL_GEN(dev_priv) < 5)
		goto out;
465 466

	/*
467 468
	 * On pipe A we don't support the PSR interrupt yet,
	 * on pipe B and C the same bit MBZ.
469
	 */
470 471
	if (drm_WARN_ON_ONCE(&dev_priv->drm,
			     status_mask & PIPE_A_PSR_STATUS_VLV))
472
		return 0;
473 474 475 476
	/*
	 * On pipe B and C we don't support the PSR interrupt yet, on pipe
	 * A the same bit is for perf counters which we don't use either.
	 */
477 478
	if (drm_WARN_ON_ONCE(&dev_priv->drm,
			     status_mask & PIPE_B_PSR_STATUS_VLV))
479
		return 0;
480 481 482 483 484 485 486 487 488

	enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
			 SPRITE0_FLIP_DONE_INT_EN_VLV |
			 SPRITE1_FLIP_DONE_INT_EN_VLV);
	if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
		enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
	if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
		enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;

489
out:
490 491 492 493 494
	drm_WARN_ONCE(&dev_priv->drm,
		      enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
		      pipe_name(pipe), enable_mask, status_mask);
495

496 497 498
	return enable_mask;
}

499 500
void i915_enable_pipestat(struct drm_i915_private *dev_priv,
			  enum pipe pipe, u32 status_mask)
501
{
502
	i915_reg_t reg = PIPESTAT(pipe);
503 504
	u32 enable_mask;

505 506 507
	drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
		      "pipe %c: status_mask=0x%x\n",
		      pipe_name(pipe), status_mask);
508 509

	lockdep_assert_held(&dev_priv->irq_lock);
510
	drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
511 512 513 514 515 516 517 518 519

	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
		return;

	dev_priv->pipestat_irq_mask[pipe] |= status_mask;
	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);

	I915_WRITE(reg, enable_mask | status_mask);
	POSTING_READ(reg);
520 521
}

522 523
void i915_disable_pipestat(struct drm_i915_private *dev_priv,
			   enum pipe pipe, u32 status_mask)
524
{
525
	i915_reg_t reg = PIPESTAT(pipe);
526 527
	u32 enable_mask;

528 529 530
	drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
		      "pipe %c: status_mask=0x%x\n",
		      pipe_name(pipe), status_mask);
531 532

	lockdep_assert_held(&dev_priv->irq_lock);
533
	drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
534 535 536 537 538 539 540 541 542

	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
		return;

	dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);

	I915_WRITE(reg, enable_mask | status_mask);
	POSTING_READ(reg);
543 544
}

545 546 547 548 549 550 551 552
static bool i915_has_asle(struct drm_i915_private *dev_priv)
{
	if (!dev_priv->opregion.asle)
		return false;

	return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
}

553
/**
554
 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
555
 * @dev_priv: i915 device private
556
 */
557
static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
558
{
559
	if (!i915_has_asle(dev_priv))
560 561
		return;

562
	spin_lock_irq(&dev_priv->irq_lock);
563

564
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
565
	if (INTEL_GEN(dev_priv) >= 4)
566
		i915_enable_pipestat(dev_priv, PIPE_A,
567
				     PIPE_LEGACY_BLC_EVENT_STATUS);
568

569
	spin_unlock_irq(&dev_priv->irq_lock);
570 571
}

572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621
/*
 * This timing diagram depicts the video signal in and
 * around the vertical blanking period.
 *
 * Assumptions about the fictitious mode used in this example:
 *  vblank_start >= 3
 *  vsync_start = vblank_start + 1
 *  vsync_end = vblank_start + 2
 *  vtotal = vblank_start + 3
 *
 *           start of vblank:
 *           latch double buffered registers
 *           increment frame counter (ctg+)
 *           generate start of vblank interrupt (gen4+)
 *           |
 *           |          frame start:
 *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
 *           |          may be shifted forward 1-3 extra lines via PIPECONF
 *           |          |
 *           |          |  start of vsync:
 *           |          |  generate vsync interrupt
 *           |          |  |
 * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
 *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
 * ----va---> <-----------------vb--------------------> <--------va-------------
 *       |          |       <----vs----->                     |
 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
 *       |          |                                         |
 *       last visible pixel                                   first visible pixel
 *                  |                                         increment frame counter (gen3/4)
 *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
 *
 * x  = horizontal active
 * _  = horizontal blanking
 * hs = horizontal sync
 * va = vertical active
 * vb = vertical blanking
 * vs = vertical sync
 * vbs = vblank_start (number)
 *
 * Summary:
 * - most events happen at the start of horizontal sync
 * - frame start happens at the start of horizontal blank, 1-4 lines
 *   (depending on PIPECONF settings) after the start of vblank
 * - gen3/4 pixel and frame counter are synchronized with the start
 *   of horizontal active on the first line of vertical active
 */

622 623 624
/* Called from drm generic code, passed a 'crtc', which
 * we use as a pipe index
 */
625
u32 i915_get_vblank_counter(struct drm_crtc *crtc)
626
{
627 628
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
629
	const struct drm_display_mode *mode = &vblank->hwmode;
630
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
631
	i915_reg_t high_frame, low_frame;
632
	u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
633
	unsigned long irqflags;
634

635 636 637 638 639 640 641 642 643 644 645 646 647 648
	/*
	 * On i965gm TV output the frame counter only works up to
	 * the point when we enable the TV encoder. After that the
	 * frame counter ceases to work and reads zero. We need a
	 * vblank wait before enabling the TV encoder and so we
	 * have to enable vblank interrupts while the frame counter
	 * is still in a working state. However the core vblank code
	 * does not like us returning non-zero frame counter values
	 * when we've told it that we don't have a working frame
	 * counter. Thus we must stop non-zero values leaking out.
	 */
	if (!vblank->max_vblank_count)
		return 0;

649 650 651 652 653
	htotal = mode->crtc_htotal;
	hsync_start = mode->crtc_hsync_start;
	vbl_start = mode->crtc_vblank_start;
	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
		vbl_start = DIV_ROUND_UP(vbl_start, 2);
654

655 656 657 658 659 660
	/* Convert to pixel count */
	vbl_start *= htotal;

	/* Start of vblank event occurs at start of hsync */
	vbl_start -= htotal - hsync_start;

661 662
	high_frame = PIPEFRAME(pipe);
	low_frame = PIPEFRAMEPIXEL(pipe);
663

664 665
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);

666 667 668 669 670 671
	/*
	 * High & low register fields aren't synchronized, so make sure
	 * we get a low value that's stable across two reads of the high
	 * register.
	 */
	do {
672 673 674
		high1 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK;
		low   = intel_de_read_fw(dev_priv, low_frame);
		high2 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK;
675 676
	} while (high1 != high2);

677 678
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);

679
	high1 >>= PIPE_FRAME_HIGH_SHIFT;
680
	pixel = low & PIPE_PIXEL_MASK;
681
	low >>= PIPE_FRAME_LOW_SHIFT;
682 683 684 685 686 687

	/*
	 * The frame counter increments at beginning of active.
	 * Cook up a vblank counter by also checking the pixel
	 * counter against vblank start.
	 */
688
	return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
689 690
}

691
u32 g4x_get_vblank_counter(struct drm_crtc *crtc)
692
{
693
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
694
	struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
695
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
696

697 698 699
	if (!vblank->max_vblank_count)
		return 0;

700
	return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
701 702
}

703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734
/*
 * On certain encoders on certain platforms, pipe
 * scanline register will not work to get the scanline,
 * since the timings are driven from the PORT or issues
 * with scanline register updates.
 * This function will use Framestamp and current
 * timestamp registers to calculate the scanline.
 */
static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
{
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
	struct drm_vblank_crtc *vblank =
		&crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
	const struct drm_display_mode *mode = &vblank->hwmode;
	u32 vblank_start = mode->crtc_vblank_start;
	u32 vtotal = mode->crtc_vtotal;
	u32 htotal = mode->crtc_htotal;
	u32 clock = mode->crtc_clock;
	u32 scanline, scan_prev_time, scan_curr_time, scan_post_time;

	/*
	 * To avoid the race condition where we might cross into the
	 * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR
	 * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR
	 * during the same frame.
	 */
	do {
		/*
		 * This field provides read back of the display
		 * pipe frame time stamp. The time stamp value
		 * is sampled at every start of vertical blank.
		 */
735 736
		scan_prev_time = intel_de_read_fw(dev_priv,
						  PIPE_FRMTMSTMP(crtc->pipe));
737 738 739 740 741

		/*
		 * The TIMESTAMP_CTR register has the current
		 * time stamp value.
		 */
742
		scan_curr_time = intel_de_read_fw(dev_priv, IVB_TIMESTAMP_CTR);
743

744 745
		scan_post_time = intel_de_read_fw(dev_priv,
						  PIPE_FRMTMSTMP(crtc->pipe));
746 747 748 749 750 751 752 753 754 755
	} while (scan_post_time != scan_prev_time);

	scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
					clock), 1000 * htotal);
	scanline = min(scanline, vtotal - 1);
	scanline = (scanline + vblank_start) % vtotal;

	return scanline;
}

756 757 758 759
/*
 * intel_de_read_fw(), only for fast reads of display block, no need for
 * forcewake etc.
 */
760 761 762
static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
{
	struct drm_device *dev = crtc->base.dev;
763
	struct drm_i915_private *dev_priv = to_i915(dev);
764 765
	const struct drm_display_mode *mode;
	struct drm_vblank_crtc *vblank;
766
	enum pipe pipe = crtc->pipe;
767
	int position, vtotal;
768

769 770 771
	if (!crtc->active)
		return -1;

772 773 774
	vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
	mode = &vblank->hwmode;

775
	if (crtc->mode_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP)
776 777
		return __intel_get_crtc_scanline_from_timestamp(crtc);

778
	vtotal = mode->crtc_vtotal;
779 780 781
	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
		vtotal /= 2;

782
	if (IS_GEN(dev_priv, 2))
783
		position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
784
	else
785
		position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
786

787 788 789 790 791 792 793 794 795 796 797 798
	/*
	 * On HSW, the DSL reg (0x70000) appears to return 0 if we
	 * read it just before the start of vblank.  So try it again
	 * so we don't accidentally end up spanning a vblank frame
	 * increment, causing the pipe_update_end() code to squak at us.
	 *
	 * The nature of this problem means we can't simply check the ISR
	 * bit and return the vblank start value; nor can we use the scanline
	 * debug register in the transcoder as it appears to have the same
	 * problem.  We may need to extend this to include other platforms,
	 * but so far testing only shows the problem on HSW.
	 */
799
	if (HAS_DDI(dev_priv) && !position) {
800 801 802 803
		int i, temp;

		for (i = 0; i < 100; i++) {
			udelay(1);
804
			temp = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
805 806 807 808 809 810 811
			if (temp != position) {
				position = temp;
				break;
			}
		}
	}

812
	/*
813 814
	 * See update_scanline_offset() for the details on the
	 * scanline_offset adjustment.
815
	 */
816
	return (position + crtc->scanline_offset) % vtotal;
817 818
}

819 820 821 822 823
static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
				     bool in_vblank_irq,
				     int *vpos, int *hpos,
				     ktime_t *stime, ktime_t *etime,
				     const struct drm_display_mode *mode)
824
{
825
	struct drm_device *dev = _crtc->dev;
826
	struct drm_i915_private *dev_priv = to_i915(dev);
827
	struct intel_crtc *crtc = to_intel_crtc(_crtc);
828
	enum pipe pipe = crtc->pipe;
829
	int position;
830
	int vbl_start, vbl_end, hsync_start, htotal, vtotal;
831
	unsigned long irqflags;
832 833
	bool use_scanline_counter = INTEL_GEN(dev_priv) >= 5 ||
		IS_G4X(dev_priv) || IS_GEN(dev_priv, 2) ||
834
		crtc->mode_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER;
835

836
	if (drm_WARN_ON(&dev_priv->drm, !mode->crtc_clock)) {
837 838 839
		drm_dbg(&dev_priv->drm,
			"trying to get scanoutpos for disabled "
			"pipe %c\n", pipe_name(pipe));
840
		return false;
841 842
	}

843
	htotal = mode->crtc_htotal;
844
	hsync_start = mode->crtc_hsync_start;
845 846 847
	vtotal = mode->crtc_vtotal;
	vbl_start = mode->crtc_vblank_start;
	vbl_end = mode->crtc_vblank_end;
848

849 850 851 852 853 854
	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
		vbl_start = DIV_ROUND_UP(vbl_start, 2);
		vbl_end /= 2;
		vtotal /= 2;
	}

855 856 857 858 859 860
	/*
	 * Lock uncore.lock, as we will do multiple timing critical raw
	 * register reads, potentially with preemption disabled, so the
	 * following code must not block on uncore.lock.
	 */
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
861

862 863 864 865 866 867
	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */

	/* Get optional system timestamp before query. */
	if (stime)
		*stime = ktime_get();

868
	if (use_scanline_counter) {
869 870 871
		/* No obvious pixelcount register. Only query vertical
		 * scanout position from Display scan line register.
		 */
872
		position = __intel_get_crtc_scanline(crtc);
873 874 875 876 877
	} else {
		/* Have access to pixelcount since start of frame.
		 * We can split this into vertical and horizontal
		 * scanout position.
		 */
878
		position = (intel_de_read_fw(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
879

880 881 882 883
		/* convert to pixel counts */
		vbl_start *= htotal;
		vbl_end *= htotal;
		vtotal *= htotal;
884

885 886 887 888 889 890 891 892 893 894 895 896
		/*
		 * In interlaced modes, the pixel counter counts all pixels,
		 * so one field will have htotal more pixels. In order to avoid
		 * the reported position from jumping backwards when the pixel
		 * counter is beyond the length of the shorter field, just
		 * clamp the position the length of the shorter field. This
		 * matches how the scanline counter based position works since
		 * the scanline counter doesn't count the two half lines.
		 */
		if (position >= vtotal)
			position = vtotal - 1;

897 898 899 900 901 902 903 904 905 906
		/*
		 * Start of vblank interrupt is triggered at start of hsync,
		 * just prior to the first active line of vblank. However we
		 * consider lines to start at the leading edge of horizontal
		 * active. So, should we get here before we've crossed into
		 * the horizontal active of the first line in vblank, we would
		 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
		 * always add htotal-hsync_start to the current pixel position.
		 */
		position = (position + htotal - hsync_start) % vtotal;
907 908
	}

909 910 911 912 913 914 915 916
	/* Get optional system timestamp after query. */
	if (etime)
		*etime = ktime_get();

	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */

	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);

917 918 919 920 921 922 923 924 925 926
	/*
	 * While in vblank, position will be negative
	 * counting up towards 0 at vbl_end. And outside
	 * vblank, position will be positive counting
	 * up since vbl_end.
	 */
	if (position >= vbl_start)
		position -= vbl_end;
	else
		position += vtotal - vbl_end;
927

928
	if (use_scanline_counter) {
929 930 931 932 933 934
		*vpos = position;
		*hpos = 0;
	} else {
		*vpos = position / htotal;
		*hpos = position - (*vpos * htotal);
	}
935

936
	return true;
937 938
}

939 940 941 942 943
bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error,
				     ktime_t *vblank_time, bool in_vblank_irq)
{
	return drm_crtc_vblank_helper_get_vblank_timestamp_internal(
		crtc, max_error, vblank_time, in_vblank_irq,
944
		i915_get_crtc_scanoutpos);
945 946
}

947 948
int intel_get_crtc_scanline(struct intel_crtc *crtc)
{
949
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
950 951 952 953 954 955 956 957 958 959
	unsigned long irqflags;
	int position;

	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
	position = __intel_get_crtc_scanline(crtc);
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);

	return position;
}

960
/**
961
 * ivb_parity_work - Workqueue called when a parity error interrupt
962 963 964 965 966 967 968
 * occurred.
 * @work: workqueue struct
 *
 * Doesn't actually do anything except notify userspace. As a consequence of
 * this event, userspace should try to remap the bad rows since statistically
 * it is likely the same row is more likely to go bad again.
 */
969
static void ivb_parity_work(struct work_struct *work)
970
{
971
	struct drm_i915_private *dev_priv =
972
		container_of(work, typeof(*dev_priv), l3_parity.error_work);
973
	struct intel_gt *gt = &dev_priv->gt;
974
	u32 error_status, row, bank, subbank;
975
	char *parity_event[6];
976 977
	u32 misccpctl;
	u8 slice = 0;
978 979 980 981 982

	/* We must turn off DOP level clock gating to access the L3 registers.
	 * In order to prevent a get/put style interface, acquire struct mutex
	 * any time we access those registers.
	 */
983
	mutex_lock(&dev_priv->drm.struct_mutex);
984

985
	/* If we've screwed up tracking, just let the interrupt fire again */
986
	if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice))
987 988
		goto out;

989 990 991 992
	misccpctl = I915_READ(GEN7_MISCCPCTL);
	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
	POSTING_READ(GEN7_MISCCPCTL);

993
	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
994
		i915_reg_t reg;
995

996
		slice--;
997 998
		if (drm_WARN_ON_ONCE(&dev_priv->drm,
				     slice >= NUM_L3_SLICES(dev_priv)))
999
			break;
1000

1001
		dev_priv->l3_parity.which_slice &= ~(1<<slice);
1002

1003
		reg = GEN7_L3CDERRST1(slice);
1004

1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019
		error_status = I915_READ(reg);
		row = GEN7_PARITY_ERROR_ROW(error_status);
		bank = GEN7_PARITY_ERROR_BANK(error_status);
		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);

		I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
		POSTING_READ(reg);

		parity_event[0] = I915_L3_PARITY_UEVENT "=1";
		parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
		parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
		parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
		parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
		parity_event[5] = NULL;

1020
		kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
1021
				   KOBJ_CHANGE, parity_event);
1022

1023 1024
		DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
			  slice, row, bank, subbank);
1025

1026 1027 1028 1029 1030
		kfree(parity_event[4]);
		kfree(parity_event[3]);
		kfree(parity_event[2]);
		kfree(parity_event[1]);
	}
1031

1032
	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1033

1034
out:
1035
	drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice);
1036 1037 1038
	spin_lock_irq(&gt->irq_lock);
	gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv));
	spin_unlock_irq(&gt->irq_lock);
1039

1040
	mutex_unlock(&dev_priv->drm.struct_mutex);
1041 1042
}

1043
static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1044
{
1045
	switch (pin) {
1046
	case HPD_PORT_TC1:
1047
		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC1);
1048
	case HPD_PORT_TC2:
1049
		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC2);
1050
	case HPD_PORT_TC3:
1051
		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC3);
1052
	case HPD_PORT_TC4:
1053
		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC4);
1054
	case HPD_PORT_TC5:
1055
		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC5);
1056
	case HPD_PORT_TC6:
1057
		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC6);
1058 1059 1060 1061 1062
	default:
		return false;
	}
}

1063
static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1064
{
1065 1066
	switch (pin) {
	case HPD_PORT_A:
1067
		return val & PORTA_HOTPLUG_LONG_DETECT;
1068
	case HPD_PORT_B:
1069
		return val & PORTB_HOTPLUG_LONG_DETECT;
1070
	case HPD_PORT_C:
1071 1072 1073 1074 1075 1076
		return val & PORTC_HOTPLUG_LONG_DETECT;
	default:
		return false;
	}
}

1077
static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1078
{
1079 1080
	switch (pin) {
	case HPD_PORT_A:
1081
		return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(HPD_PORT_A);
1082
	case HPD_PORT_B:
1083
		return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(HPD_PORT_B);
1084
	case HPD_PORT_C:
1085
		return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(HPD_PORT_C);
1086
	case HPD_PORT_D:
1087
		return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(HPD_PORT_D);
1088 1089 1090 1091 1092
	default:
		return false;
	}
}

1093
static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1094
{
1095
	switch (pin) {
1096
	case HPD_PORT_TC1:
1097
		return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC1);
1098
	case HPD_PORT_TC2:
1099
		return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC2);
1100
	case HPD_PORT_TC3:
1101
		return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC3);
1102
	case HPD_PORT_TC4:
1103
		return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC4);
1104
	case HPD_PORT_TC5:
1105
		return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC5);
1106
	case HPD_PORT_TC6:
1107
		return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC6);
1108 1109 1110 1111 1112
	default:
		return false;
	}
}

1113
static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val)
1114
{
1115 1116
	switch (pin) {
	case HPD_PORT_E:
1117 1118 1119 1120 1121 1122
		return val & PORTE_HOTPLUG_LONG_DETECT;
	default:
		return false;
	}
}

1123
static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1124
{
1125 1126
	switch (pin) {
	case HPD_PORT_A:
1127
		return val & PORTA_HOTPLUG_LONG_DETECT;
1128
	case HPD_PORT_B:
1129
		return val & PORTB_HOTPLUG_LONG_DETECT;
1130
	case HPD_PORT_C:
1131
		return val & PORTC_HOTPLUG_LONG_DETECT;
1132
	case HPD_PORT_D:
1133 1134 1135 1136 1137 1138
		return val & PORTD_HOTPLUG_LONG_DETECT;
	default:
		return false;
	}
}

1139
static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1140
{
1141 1142
	switch (pin) {
	case HPD_PORT_A:
1143 1144 1145 1146 1147 1148
		return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
	default:
		return false;
	}
}

1149
static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1150
{
1151 1152
	switch (pin) {
	case HPD_PORT_B:
1153
		return val & PORTB_HOTPLUG_LONG_DETECT;
1154
	case HPD_PORT_C:
1155
		return val & PORTC_HOTPLUG_LONG_DETECT;
1156
	case HPD_PORT_D:
1157 1158 1159
		return val & PORTD_HOTPLUG_LONG_DETECT;
	default:
		return false;
1160 1161 1162
	}
}

1163
static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1164
{
1165 1166
	switch (pin) {
	case HPD_PORT_B:
1167
		return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1168
	case HPD_PORT_C:
1169
		return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1170
	case HPD_PORT_D:
1171 1172 1173
		return val & PORTD_HOTPLUG_INT_LONG_PULSE;
	default:
		return false;
1174 1175 1176
	}
}

1177 1178 1179 1180 1181 1182 1183
/*
 * Get a bit mask of pins that have triggered, and which ones may be long.
 * This can be called multiple times with the same masks to accumulate
 * hotplug detection results from several registers.
 *
 * Note that the caller is expected to zero out the masks initially.
 */
1184 1185 1186 1187
static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
			       u32 *pin_mask, u32 *long_mask,
			       u32 hotplug_trigger, u32 dig_hotplug_reg,
			       const u32 hpd[HPD_NUM_PINS],
1188
			       bool long_pulse_detect(enum hpd_pin pin, u32 val))
1189
{
1190
	enum hpd_pin pin;
1191

1192 1193
	BUILD_BUG_ON(BITS_PER_TYPE(*pin_mask) < HPD_NUM_PINS);

1194 1195
	for_each_hpd_pin(pin) {
		if ((hpd[pin] & hotplug_trigger) == 0)
1196
			continue;
1197

1198
		*pin_mask |= BIT(pin);
1199

1200
		if (long_pulse_detect(pin, dig_hotplug_reg))
1201
			*long_mask |= BIT(pin);
1202 1203
	}

1204 1205 1206
	drm_dbg(&dev_priv->drm,
		"hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
		hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
1207 1208 1209

}

1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234
static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
				  const u32 hpd[HPD_NUM_PINS])
{
	struct intel_encoder *encoder;
	u32 enabled_irqs = 0;

	for_each_intel_encoder(&dev_priv->drm, encoder)
		if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
			enabled_irqs |= hpd[encoder->hpd_pin];

	return enabled_irqs;
}

static u32 intel_hpd_hotplug_irqs(struct drm_i915_private *dev_priv,
				  const u32 hpd[HPD_NUM_PINS])
{
	struct intel_encoder *encoder;
	u32 hotplug_irqs = 0;

	for_each_intel_encoder(&dev_priv->drm, encoder)
		hotplug_irqs |= hpd[encoder->hpd_pin];

	return hotplug_irqs;
}

1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246
static u32 intel_hpd_hotplug_enables(struct drm_i915_private *i915,
				     hotplug_enables_func hotplug_enables)
{
	struct intel_encoder *encoder;
	u32 hotplug = 0;

	for_each_intel_encoder(&i915->drm, encoder)
		hotplug |= hotplug_enables(i915, encoder->hpd_pin);

	return hotplug;
}

1247
static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
1248
{
1249
	wake_up_all(&dev_priv->gmbus_wait_queue);
1250 1251
}

1252
static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
1253
{
1254
	wake_up_all(&dev_priv->gmbus_wait_queue);
1255 1256
}

1257
#if defined(CONFIG_DEBUG_FS)
1258 1259
static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
					 enum pipe pipe,
1260 1261 1262
					 u32 crc0, u32 crc1,
					 u32 crc2, u32 crc3,
					 u32 crc4)
1263
{
T
Tomeu Vizoso 已提交
1264
	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
1265
	struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
1266 1267 1268
	u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };

	trace_intel_pipe_crc(crtc, crcs);
1269

1270
	spin_lock(&pipe_crc->lock);
1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281
	/*
	 * For some not yet identified reason, the first CRC is
	 * bonkers. So let's just wait for the next vblank and read
	 * out the buggy result.
	 *
	 * On GEN8+ sometimes the second CRC is bonkers as well, so
	 * don't trust that one either.
	 */
	if (pipe_crc->skipped <= 0 ||
	    (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
		pipe_crc->skipped++;
T
Tomeu Vizoso 已提交
1282
		spin_unlock(&pipe_crc->lock);
1283
		return;
T
Tomeu Vizoso 已提交
1284
	}
1285 1286 1287 1288 1289
	spin_unlock(&pipe_crc->lock);

	drm_crtc_add_crc_entry(&crtc->base, true,
				drm_crtc_accurate_vblank_count(&crtc->base),
				crcs);
1290
}
1291 1292
#else
static inline void
1293 1294
display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
			     enum pipe pipe,
1295 1296 1297
			     u32 crc0, u32 crc1,
			     u32 crc2, u32 crc3,
			     u32 crc4) {}
1298 1299
#endif

1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316
static void flip_done_handler(struct drm_i915_private *i915,
			      enum pipe pipe)
{
	struct intel_crtc *crtc = intel_get_crtc_for_pipe(i915, pipe);
	struct drm_crtc_state *crtc_state = crtc->base.state;
	struct drm_pending_vblank_event *e = crtc_state->event;
	struct drm_device *dev = &i915->drm;
	unsigned long irqflags;

	spin_lock_irqsave(&dev->event_lock, irqflags);

	crtc_state->event = NULL;

	drm_crtc_send_vblank_event(&crtc->base, e);

	spin_unlock_irqrestore(&dev->event_lock, irqflags);
}
1317

1318 1319
static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
				     enum pipe pipe)
D
Daniel Vetter 已提交
1320
{
1321
	display_pipe_crc_irq_handler(dev_priv, pipe,
1322 1323
				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
				     0, 0, 0, 0);
D
Daniel Vetter 已提交
1324 1325
}

1326 1327
static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
				     enum pipe pipe)
1328
{
1329
	display_pipe_crc_irq_handler(dev_priv, pipe,
1330 1331 1332 1333 1334
				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
				     I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
				     I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
				     I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
				     I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1335
}
1336

1337 1338
static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
				      enum pipe pipe)
1339
{
1340
	u32 res1, res2;
1341

1342
	if (INTEL_GEN(dev_priv) >= 3)
1343 1344 1345 1346
		res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
	else
		res1 = 0;

1347
	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1348 1349 1350
		res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
	else
		res2 = 0;
1351

1352
	display_pipe_crc_irq_handler(dev_priv, pipe,
1353 1354 1355 1356
				     I915_READ(PIPE_CRC_RES_RED(pipe)),
				     I915_READ(PIPE_CRC_RES_GREEN(pipe)),
				     I915_READ(PIPE_CRC_RES_BLUE(pipe)),
				     res1, res2);
1357
}
1358

1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371
static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
{
	enum pipe pipe;

	for_each_pipe(dev_priv, pipe) {
		I915_WRITE(PIPESTAT(pipe),
			   PIPESTAT_INT_STATUS_MASK |
			   PIPE_FIFO_UNDERRUN_STATUS);

		dev_priv->pipestat_irq_mask[pipe] = 0;
	}
}

1372 1373
static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
				  u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1374
{
1375
	enum pipe pipe;
1376

1377
	spin_lock(&dev_priv->irq_lock);
1378 1379 1380 1381 1382 1383

	if (!dev_priv->display_irqs_enabled) {
		spin_unlock(&dev_priv->irq_lock);
		return;
	}

1384
	for_each_pipe(dev_priv, pipe) {
1385
		i915_reg_t reg;
1386
		u32 status_mask, enable_mask, iir_bit = 0;
1387

1388 1389 1390 1391 1392 1393 1394
		/*
		 * PIPESTAT bits get signalled even when the interrupt is
		 * disabled with the mask bits, and some of the status bits do
		 * not generate interrupts at all (like the underrun bit). Hence
		 * we need to be careful that we only handle what we want to
		 * handle.
		 */
1395 1396

		/* fifo underruns are filterered in the underrun handler. */
1397
		status_mask = PIPE_FIFO_UNDERRUN_STATUS;
1398 1399

		switch (pipe) {
1400
		default:
1401 1402 1403 1404 1405 1406
		case PIPE_A:
			iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
			break;
		case PIPE_B:
			iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
			break;
1407 1408 1409
		case PIPE_C:
			iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
			break;
1410 1411
		}
		if (iir & iir_bit)
1412
			status_mask |= dev_priv->pipestat_irq_mask[pipe];
1413

1414
		if (!status_mask)
1415 1416 1417
			continue;

		reg = PIPESTAT(pipe);
1418 1419
		pipe_stats[pipe] = I915_READ(reg) & status_mask;
		enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
1420 1421 1422

		/*
		 * Clear the PIPE*STAT regs before the IIR
1423 1424 1425 1426 1427 1428
		 *
		 * Toggle the enable bits to make sure we get an
		 * edge in the ISR pipe event bit if we don't clear
		 * all the enabled status bits. Otherwise the edge
		 * triggered IIR on i965/g4x wouldn't notice that
		 * an interrupt is still pending.
1429
		 */
1430 1431 1432 1433
		if (pipe_stats[pipe]) {
			I915_WRITE(reg, pipe_stats[pipe]);
			I915_WRITE(reg, enable_mask);
		}
1434
	}
1435
	spin_unlock(&dev_priv->irq_lock);
1436 1437
}

1438 1439 1440 1441 1442 1443 1444
static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
				      u16 iir, u32 pipe_stats[I915_MAX_PIPES])
{
	enum pipe pipe;

	for_each_pipe(dev_priv, pipe) {
		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1445
			intel_handle_vblank(dev_priv, pipe);
1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462

		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
			i9xx_pipe_crc_irq_handler(dev_priv, pipe);

		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
	}
}

static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
{
	bool blc_event = false;
	enum pipe pipe;

	for_each_pipe(dev_priv, pipe) {
		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1463
			intel_handle_vblank(dev_priv, pipe);
1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486

		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
			blc_event = true;

		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
			i9xx_pipe_crc_irq_handler(dev_priv, pipe);

		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
	}

	if (blc_event || (iir & I915_ASLE_INTERRUPT))
		intel_opregion_asle_intr(dev_priv);
}

static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
{
	bool blc_event = false;
	enum pipe pipe;

	for_each_pipe(dev_priv, pipe) {
		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1487
			intel_handle_vblank(dev_priv, pipe);
1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505

		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
			blc_event = true;

		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
			i9xx_pipe_crc_irq_handler(dev_priv, pipe);

		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
	}

	if (blc_event || (iir & I915_ASLE_INTERRUPT))
		intel_opregion_asle_intr(dev_priv);

	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
		gmbus_irq_handler(dev_priv);
}

1506
static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1507 1508 1509
					    u32 pipe_stats[I915_MAX_PIPES])
{
	enum pipe pipe;
1510

1511
	for_each_pipe(dev_priv, pipe) {
1512
		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1513
			intel_handle_vblank(dev_priv, pipe);
1514 1515

		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1516
			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1517

1518 1519
		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1520 1521 1522
	}

	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1523
		gmbus_irq_handler(dev_priv);
1524 1525
}

1526
static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
1527
{
1528 1529 1530 1531 1532 1533 1534 1535 1536
	u32 hotplug_status = 0, hotplug_status_mask;
	int i;

	if (IS_G4X(dev_priv) ||
	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
		hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
			DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
	else
		hotplug_status_mask = HOTPLUG_INT_STATUS_I915;
1537

1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553
	/*
	 * We absolutely have to clear all the pending interrupt
	 * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port
	 * interrupt bit won't have an edge, and the i965/g4x
	 * edge triggered IIR will not notice that an interrupt
	 * is still pending. We can't use PORT_HOTPLUG_EN to
	 * guarantee the edge as the act of toggling the enable
	 * bits can itself generate a new hotplug interrupt :(
	 */
	for (i = 0; i < 10; i++) {
		u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask;

		if (tmp == 0)
			return hotplug_status;

		hotplug_status |= tmp;
1554
		I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1555 1556
	}

1557 1558 1559
	drm_WARN_ONCE(&dev_priv->drm, 1,
		      "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
		      I915_READ(PORT_HOTPLUG_STAT));
1560

1561 1562 1563
	return hotplug_status;
}

1564
static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1565 1566 1567
				 u32 hotplug_status)
{
	u32 pin_mask = 0, long_mask = 0;
1568
	u32 hotplug_trigger;
1569

1570 1571 1572 1573 1574
	if (IS_G4X(dev_priv) ||
	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
		hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
	else
		hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1575

1576 1577 1578 1579 1580
	if (hotplug_trigger) {
		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
				   hotplug_trigger, hotplug_trigger,
				   dev_priv->hotplug.hpd,
				   i9xx_port_hotplug_long_detect);
1581

1582
		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1583
	}
1584 1585 1586 1587 1588

	if ((IS_G4X(dev_priv) ||
	     IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
	    hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
		dp_aux_irq_handler(dev_priv);
1589 1590
}

1591
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
J
Jesse Barnes 已提交
1592
{
1593
	struct drm_i915_private *dev_priv = arg;
J
Jesse Barnes 已提交
1594 1595
	irqreturn_t ret = IRQ_NONE;

1596 1597 1598
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

1599
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
1600
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1601

1602
	do {
1603
		u32 iir, gt_iir, pm_iir;
1604
		u32 pipe_stats[I915_MAX_PIPES] = {};
1605
		u32 hotplug_status = 0;
1606
		u32 ier = 0;
1607

J
Jesse Barnes 已提交
1608 1609
		gt_iir = I915_READ(GTIIR);
		pm_iir = I915_READ(GEN6_PMIIR);
1610
		iir = I915_READ(VLV_IIR);
J
Jesse Barnes 已提交
1611 1612

		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1613
			break;
J
Jesse Barnes 已提交
1614 1615 1616

		ret = IRQ_HANDLED;

1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629
		/*
		 * Theory on interrupt generation, based on empirical evidence:
		 *
		 * x = ((VLV_IIR & VLV_IER) ||
		 *      (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
		 *       (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
		 *
		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
		 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
		 * guarantee the CPU interrupt will be raised again even if we
		 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
		 * bits this time around.
		 */
1630
		I915_WRITE(VLV_MASTER_IER, 0);
1631 1632
		ier = I915_READ(VLV_IER);
		I915_WRITE(VLV_IER, 0);
1633 1634 1635 1636 1637 1638

		if (gt_iir)
			I915_WRITE(GTIIR, gt_iir);
		if (pm_iir)
			I915_WRITE(GEN6_PMIIR, pm_iir);

1639
		if (iir & I915_DISPLAY_PORT_INTERRUPT)
1640
			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1641

1642 1643
		/* Call regardless, as some status bits might not be
		 * signalled in iir */
1644
		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1645

1646 1647 1648 1649
		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
			   I915_LPE_PIPE_B_INTERRUPT))
			intel_lpe_audio_irq_handler(dev_priv);

1650 1651 1652 1653 1654 1655
		/*
		 * VLV_IIR is single buffered, and reflects the level
		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
		 */
		if (iir)
			I915_WRITE(VLV_IIR, iir);
1656

1657
		I915_WRITE(VLV_IER, ier);
1658
		I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
1659

1660
		if (gt_iir)
1661
			gen6_gt_irq_handler(&dev_priv->gt, gt_iir);
1662
		if (pm_iir)
1663
			gen6_rps_irq_handler(&dev_priv->gt.rps, pm_iir);
1664

1665
		if (hotplug_status)
1666
			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1667

1668
		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1669
	} while (0);
J
Jesse Barnes 已提交
1670

1671
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1672

J
Jesse Barnes 已提交
1673 1674 1675
	return ret;
}

1676 1677
static irqreturn_t cherryview_irq_handler(int irq, void *arg)
{
1678
	struct drm_i915_private *dev_priv = arg;
1679 1680
	irqreturn_t ret = IRQ_NONE;

1681 1682 1683
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

1684
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
1685
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1686

1687
	do {
1688
		u32 master_ctl, iir;
1689
		u32 pipe_stats[I915_MAX_PIPES] = {};
1690
		u32 hotplug_status = 0;
1691 1692
		u32 ier = 0;

1693 1694
		master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
		iir = I915_READ(VLV_IIR);
1695

1696 1697
		if (master_ctl == 0 && iir == 0)
			break;
1698

1699 1700
		ret = IRQ_HANDLED;

1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713
		/*
		 * Theory on interrupt generation, based on empirical evidence:
		 *
		 * x = ((VLV_IIR & VLV_IER) ||
		 *      ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
		 *       (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
		 *
		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
		 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
		 * guarantee the CPU interrupt will be raised again even if we
		 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
		 * bits this time around.
		 */
1714
		I915_WRITE(GEN8_MASTER_IRQ, 0);
1715 1716
		ier = I915_READ(VLV_IER);
		I915_WRITE(VLV_IER, 0);
1717

1718
		gen8_gt_irq_handler(&dev_priv->gt, master_ctl);
1719

1720
		if (iir & I915_DISPLAY_PORT_INTERRUPT)
1721
			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1722

1723 1724
		/* Call regardless, as some status bits might not be
		 * signalled in iir */
1725
		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1726

1727 1728 1729 1730 1731
		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
			   I915_LPE_PIPE_B_INTERRUPT |
			   I915_LPE_PIPE_C_INTERRUPT))
			intel_lpe_audio_irq_handler(dev_priv);

1732 1733 1734 1735 1736 1737 1738
		/*
		 * VLV_IIR is single buffered, and reflects the level
		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
		 */
		if (iir)
			I915_WRITE(VLV_IIR, iir);

1739
		I915_WRITE(VLV_IER, ier);
1740
		I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
1741 1742

		if (hotplug_status)
1743
			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1744

1745
		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1746
	} while (0);
1747

1748
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1749

1750 1751 1752
	return ret;
}

1753
static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1754
				u32 hotplug_trigger)
1755 1756 1757
{
	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;

1758 1759 1760 1761 1762 1763
	/*
	 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
	 * unless we touch the hotplug register, even if hotplug_trigger is
	 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
	 * errors.
	 */
1764
	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1765 1766 1767 1768 1769 1770 1771 1772
	if (!hotplug_trigger) {
		u32 mask = PORTA_HOTPLUG_STATUS_MASK |
			PORTD_HOTPLUG_STATUS_MASK |
			PORTC_HOTPLUG_STATUS_MASK |
			PORTB_HOTPLUG_STATUS_MASK;
		dig_hotplug_reg &= ~mask;
	}

1773
	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1774 1775
	if (!hotplug_trigger)
		return;
1776

1777 1778 1779
	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
			   hotplug_trigger, dig_hotplug_reg,
			   dev_priv->hotplug.pch_hpd,
1780 1781
			   pch_port_hotplug_long_detect);

1782
	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1783 1784
}

1785
static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1786
{
1787
	enum pipe pipe;
1788
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1789

1790
	ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
1791

1792 1793 1794
	if (pch_iir & SDE_AUDIO_POWER_MASK) {
		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
			       SDE_AUDIO_POWER_SHIFT);
1795 1796
		drm_dbg(&dev_priv->drm, "PCH audio power change on port %d\n",
			port_name(port));
1797
	}
1798

1799
	if (pch_iir & SDE_AUX_MASK)
1800
		dp_aux_irq_handler(dev_priv);
1801

1802
	if (pch_iir & SDE_GMBUS)
1803
		gmbus_irq_handler(dev_priv);
1804 1805

	if (pch_iir & SDE_AUDIO_HDCP_MASK)
1806
		drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n");
1807 1808

	if (pch_iir & SDE_AUDIO_TRANS_MASK)
1809
		drm_dbg(&dev_priv->drm, "PCH transcoder audio interrupt\n");
1810 1811

	if (pch_iir & SDE_POISON)
1812
		drm_err(&dev_priv->drm, "PCH poison interrupt\n");
1813

1814
	if (pch_iir & SDE_FDI_MASK) {
1815
		for_each_pipe(dev_priv, pipe)
1816 1817 1818
			drm_dbg(&dev_priv->drm, "  pipe %c FDI IIR: 0x%08x\n",
				pipe_name(pipe),
				I915_READ(FDI_RX_IIR(pipe)));
1819
	}
1820 1821

	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1822
		drm_dbg(&dev_priv->drm, "PCH transcoder CRC done interrupt\n");
1823 1824

	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1825 1826
		drm_dbg(&dev_priv->drm,
			"PCH transcoder CRC error interrupt\n");
1827 1828

	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1829
		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
1830 1831

	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1832
		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
1833 1834
}

1835
static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
1836 1837
{
	u32 err_int = I915_READ(GEN7_ERR_INT);
D
Daniel Vetter 已提交
1838
	enum pipe pipe;
1839

1840
	if (err_int & ERR_INT_POISON)
1841
		drm_err(&dev_priv->drm, "Poison interrupt\n");
1842

1843
	for_each_pipe(dev_priv, pipe) {
1844 1845
		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1846

D
Daniel Vetter 已提交
1847
		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1848 1849
			if (IS_IVYBRIDGE(dev_priv))
				ivb_pipe_crc_irq_handler(dev_priv, pipe);
D
Daniel Vetter 已提交
1850
			else
1851
				hsw_pipe_crc_irq_handler(dev_priv, pipe);
D
Daniel Vetter 已提交
1852 1853
		}
	}
1854

1855 1856 1857
	I915_WRITE(GEN7_ERR_INT, err_int);
}

1858
static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
1859 1860
{
	u32 serr_int = I915_READ(SERR_INT);
1861
	enum pipe pipe;
1862

1863
	if (serr_int & SERR_INT_POISON)
1864
		drm_err(&dev_priv->drm, "PCH poison interrupt\n");
1865

1866 1867 1868
	for_each_pipe(dev_priv, pipe)
		if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
			intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
1869 1870

	I915_WRITE(SERR_INT, serr_int);
1871 1872
}

1873
static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1874
{
1875
	enum pipe pipe;
1876
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1877

1878
	ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
1879

1880 1881 1882
	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
			       SDE_AUDIO_POWER_SHIFT_CPT);
1883 1884
		drm_dbg(&dev_priv->drm, "PCH audio power change on port %c\n",
			port_name(port));
1885
	}
1886 1887

	if (pch_iir & SDE_AUX_MASK_CPT)
1888
		dp_aux_irq_handler(dev_priv);
1889 1890

	if (pch_iir & SDE_GMBUS_CPT)
1891
		gmbus_irq_handler(dev_priv);
1892 1893

	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1894
		drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n");
1895 1896

	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1897
		drm_dbg(&dev_priv->drm, "Audio CP change interrupt\n");
1898

1899
	if (pch_iir & SDE_FDI_MASK_CPT) {
1900
		for_each_pipe(dev_priv, pipe)
1901 1902 1903
			drm_dbg(&dev_priv->drm, "  pipe %c FDI IIR: 0x%08x\n",
				pipe_name(pipe),
				I915_READ(FDI_RX_IIR(pipe)));
1904
	}
1905 1906

	if (pch_iir & SDE_ERROR_CPT)
1907
		cpt_serr_int_handler(dev_priv);
1908 1909
}

1910
static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1911
{
1912 1913
	u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_HOTPLUG_MASK_ICP;
	u32 tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_MASK_ICP;
1914 1915 1916 1917 1918 1919 1920 1921 1922
	u32 pin_mask = 0, long_mask = 0;

	if (ddi_hotplug_trigger) {
		u32 dig_hotplug_reg;

		dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI);
		I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg);

		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1923 1924
				   ddi_hotplug_trigger, dig_hotplug_reg,
				   dev_priv->hotplug.pch_hpd,
1925 1926 1927 1928 1929 1930 1931 1932 1933 1934
				   icp_ddi_port_hotplug_long_detect);
	}

	if (tc_hotplug_trigger) {
		u32 dig_hotplug_reg;

		dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC);
		I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg);

		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1935 1936
				   tc_hotplug_trigger, dig_hotplug_reg,
				   dev_priv->hotplug.pch_hpd,
1937
				   icp_tc_port_hotplug_long_detect);
1938 1939 1940 1941 1942 1943 1944 1945 1946
	}

	if (pin_mask)
		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);

	if (pch_iir & SDE_GMBUS_ICP)
		gmbus_irq_handler(dev_priv);
}

1947
static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959
{
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
		~SDE_PORTE_HOTPLUG_SPT;
	u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
	u32 pin_mask = 0, long_mask = 0;

	if (hotplug_trigger) {
		u32 dig_hotplug_reg;

		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
		I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);

1960
		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1961 1962
				   hotplug_trigger, dig_hotplug_reg,
				   dev_priv->hotplug.pch_hpd,
1963
				   spt_port_hotplug_long_detect);
1964 1965 1966 1967 1968 1969 1970 1971
	}

	if (hotplug2_trigger) {
		u32 dig_hotplug_reg;

		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
		I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);

1972
		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1973 1974
				   hotplug2_trigger, dig_hotplug_reg,
				   dev_priv->hotplug.pch_hpd,
1975 1976 1977 1978
				   spt_port_hotplug2_long_detect);
	}

	if (pin_mask)
1979
		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1980 1981

	if (pch_iir & SDE_GMBUS_CPT)
1982
		gmbus_irq_handler(dev_priv);
1983 1984
}

1985
static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
1986
				u32 hotplug_trigger)
1987 1988 1989 1990 1991 1992
{
	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;

	dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);

1993 1994 1995
	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
			   hotplug_trigger, dig_hotplug_reg,
			   dev_priv->hotplug.hpd,
1996 1997
			   ilk_port_hotplug_long_detect);

1998
	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1999 2000
}

2001 2002
static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
				    u32 de_iir)
2003
{
2004
	enum pipe pipe;
2005 2006
	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;

2007
	if (hotplug_trigger)
2008
		ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
2009 2010

	if (de_iir & DE_AUX_CHANNEL_A)
2011
		dp_aux_irq_handler(dev_priv);
2012 2013

	if (de_iir & DE_GSE)
2014
		intel_opregion_asle_intr(dev_priv);
2015 2016

	if (de_iir & DE_POISON)
2017
		drm_err(&dev_priv->drm, "Poison interrupt\n");
2018

2019
	for_each_pipe(dev_priv, pipe) {
2020
		if (de_iir & DE_PIPE_VBLANK(pipe))
2021
			intel_handle_vblank(dev_priv, pipe);
2022

2023
		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2024
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2025

2026
		if (de_iir & DE_PIPE_CRC_DONE(pipe))
2027
			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2028 2029 2030 2031 2032 2033
	}

	/* check event from PCH */
	if (de_iir & DE_PCH_EVENT) {
		u32 pch_iir = I915_READ(SDEIIR);

2034 2035
		if (HAS_PCH_CPT(dev_priv))
			cpt_irq_handler(dev_priv, pch_iir);
2036
		else
2037
			ibx_irq_handler(dev_priv, pch_iir);
2038 2039 2040 2041 2042

		/* should clear PCH hotplug event before clear CPU irq */
		I915_WRITE(SDEIIR, pch_iir);
	}

2043
	if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT)
2044
		gen5_rps_irq_handler(&dev_priv->gt.rps);
2045 2046
}

2047 2048
static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
				    u32 de_iir)
2049
{
2050
	enum pipe pipe;
2051 2052
	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;

2053
	if (hotplug_trigger)
2054
		ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
2055 2056

	if (de_iir & DE_ERR_INT_IVB)
2057
		ivb_err_int_handler(dev_priv);
2058

2059 2060 2061 2062 2063 2064
	if (de_iir & DE_EDP_PSR_INT_HSW) {
		u32 psr_iir = I915_READ(EDP_PSR_IIR);

		intel_psr_irq_handler(dev_priv, psr_iir);
		I915_WRITE(EDP_PSR_IIR, psr_iir);
	}
2065

2066
	if (de_iir & DE_AUX_CHANNEL_A_IVB)
2067
		dp_aux_irq_handler(dev_priv);
2068 2069

	if (de_iir & DE_GSE_IVB)
2070
		intel_opregion_asle_intr(dev_priv);
2071

2072
	for_each_pipe(dev_priv, pipe) {
2073
		if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
2074
			intel_handle_vblank(dev_priv, pipe);
2075 2076 2077
	}

	/* check event from PCH */
2078
	if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
2079 2080
		u32 pch_iir = I915_READ(SDEIIR);

2081
		cpt_irq_handler(dev_priv, pch_iir);
2082 2083 2084 2085 2086 2087

		/* clear PCH hotplug event before clear CPU irq */
		I915_WRITE(SDEIIR, pch_iir);
	}
}

2088 2089 2090 2091 2092 2093 2094 2095
/*
 * To handle irqs with the minimum potential races with fresh interrupts, we:
 * 1 - Disable Master Interrupt Control.
 * 2 - Find the source(s) of the interrupt.
 * 3 - Clear the Interrupt Identity bits (IIR).
 * 4 - Process the interrupt(s) that had bits set in the IIRs.
 * 5 - Re-enable Master Interrupt Control.
 */
2096
static irqreturn_t ilk_irq_handler(int irq, void *arg)
2097
{
2098 2099
	struct drm_i915_private *i915 = arg;
	void __iomem * const regs = i915->uncore.regs;
2100
	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2101
	irqreturn_t ret = IRQ_NONE;
2102

2103
	if (unlikely(!intel_irqs_enabled(i915)))
2104 2105
		return IRQ_NONE;

2106
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2107
	disable_rpm_wakeref_asserts(&i915->runtime_pm);
2108

2109
	/* disable master interrupt before clearing iir  */
2110 2111
	de_ier = raw_reg_read(regs, DEIER);
	raw_reg_write(regs, DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2112

2113 2114 2115 2116 2117
	/* Disable south interrupts. We'll only write to SDEIIR once, so further
	 * interrupts will will be stored on its back queue, and then we'll be
	 * able to process them after we restore SDEIER (as soon as we restore
	 * it, we'll get an interrupt if SDEIIR still has something to process
	 * due to its back queue). */
2118 2119 2120
	if (!HAS_PCH_NOP(i915)) {
		sde_ier = raw_reg_read(regs, SDEIER);
		raw_reg_write(regs, SDEIER, 0);
2121
	}
2122

2123 2124
	/* Find, clear, then process each source of interrupt */

2125
	gt_iir = raw_reg_read(regs, GTIIR);
2126
	if (gt_iir) {
2127 2128 2129
		raw_reg_write(regs, GTIIR, gt_iir);
		if (INTEL_GEN(i915) >= 6)
			gen6_gt_irq_handler(&i915->gt, gt_iir);
2130
		else
2131 2132
			gen5_gt_irq_handler(&i915->gt, gt_iir);
		ret = IRQ_HANDLED;
2133 2134
	}

2135
	de_iir = raw_reg_read(regs, DEIIR);
2136
	if (de_iir) {
2137 2138 2139
		raw_reg_write(regs, DEIIR, de_iir);
		if (INTEL_GEN(i915) >= 7)
			ivb_display_irq_handler(i915, de_iir);
2140
		else
2141 2142
			ilk_display_irq_handler(i915, de_iir);
		ret = IRQ_HANDLED;
2143 2144
	}

2145 2146
	if (INTEL_GEN(i915) >= 6) {
		u32 pm_iir = raw_reg_read(regs, GEN6_PMIIR);
2147
		if (pm_iir) {
2148 2149
			raw_reg_write(regs, GEN6_PMIIR, pm_iir);
			gen6_rps_irq_handler(&i915->gt.rps, pm_iir);
2150 2151
			ret = IRQ_HANDLED;
		}
2152
	}
2153

2154 2155 2156
	raw_reg_write(regs, DEIER, de_ier);
	if (sde_ier)
		raw_reg_write(regs, SDEIER, sde_ier);
2157

2158
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2159
	enable_rpm_wakeref_asserts(&i915->runtime_pm);
2160

2161 2162 2163
	return ret;
}

2164
static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
2165
				u32 hotplug_trigger)
2166
{
2167
	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2168

2169 2170
	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2171

2172 2173 2174
	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
			   hotplug_trigger, dig_hotplug_reg,
			   dev_priv->hotplug.hpd,
2175
			   bxt_port_hotplug_long_detect);
2176

2177
	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2178 2179
}

2180 2181 2182
static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
{
	u32 pin_mask = 0, long_mask = 0;
2183 2184
	u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
	u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
2185 2186

	if (trigger_tc) {
2187 2188
		u32 dig_hotplug_reg;

2189 2190 2191
		dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL);
		I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);

2192 2193 2194
		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
				   trigger_tc, dig_hotplug_reg,
				   dev_priv->hotplug.hpd,
2195
				   gen11_port_hotplug_long_detect);
2196 2197 2198 2199 2200 2201 2202 2203
	}

	if (trigger_tbt) {
		u32 dig_hotplug_reg;

		dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL);
		I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);

2204 2205 2206
		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
				   trigger_tbt, dig_hotplug_reg,
				   dev_priv->hotplug.hpd,
2207
				   gen11_port_hotplug_long_detect);
2208 2209 2210
	}

	if (pin_mask)
2211
		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2212
	else
2213 2214
		drm_err(&dev_priv->drm,
			"Unexpected DE HPD interrupt 0x%08x\n", iir);
2215 2216
}

2217 2218
static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
{
2219
	u32 mask;
2220

2221 2222 2223
	if (INTEL_GEN(dev_priv) >= 12)
		return TGL_DE_PORT_AUX_DDIA |
			TGL_DE_PORT_AUX_DDIB |
2224 2225 2226 2227 2228 2229 2230 2231
			TGL_DE_PORT_AUX_DDIC |
			TGL_DE_PORT_AUX_USBC1 |
			TGL_DE_PORT_AUX_USBC2 |
			TGL_DE_PORT_AUX_USBC3 |
			TGL_DE_PORT_AUX_USBC4 |
			TGL_DE_PORT_AUX_USBC5 |
			TGL_DE_PORT_AUX_USBC6;

2232 2233

	mask = GEN8_AUX_CHANNEL_A;
2234 2235 2236 2237 2238
	if (INTEL_GEN(dev_priv) >= 9)
		mask |= GEN9_AUX_CHANNEL_B |
			GEN9_AUX_CHANNEL_C |
			GEN9_AUX_CHANNEL_D;

2239
	if (IS_CNL_WITH_PORT_F(dev_priv) || IS_GEN(dev_priv, 11))
2240 2241
		mask |= CNL_AUX_CHANNEL_F;

2242 2243
	if (IS_GEN(dev_priv, 11))
		mask |= ICL_AUX_CHANNEL_E;
2244 2245 2246 2247

	return mask;
}

2248 2249
static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
{
2250 2251 2252
	if (IS_ROCKETLAKE(dev_priv))
		return RKL_DE_PIPE_IRQ_FAULT_ERRORS;
	else if (INTEL_GEN(dev_priv) >= 11)
2253 2254
		return GEN11_DE_PIPE_IRQ_FAULT_ERRORS;
	else if (INTEL_GEN(dev_priv) >= 9)
2255 2256 2257 2258 2259
		return GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
	else
		return GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
}

2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270
static void
gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
{
	bool found = false;

	if (iir & GEN8_DE_MISC_GSE) {
		intel_opregion_asle_intr(dev_priv);
		found = true;
	}

	if (iir & GEN8_DE_EDP_PSR) {
2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283
		u32 psr_iir;
		i915_reg_t iir_reg;

		if (INTEL_GEN(dev_priv) >= 12)
			iir_reg = TRANS_PSR_IIR(dev_priv->psr.transcoder);
		else
			iir_reg = EDP_PSR_IIR;

		psr_iir = I915_READ(iir_reg);
		I915_WRITE(iir_reg, psr_iir);

		if (psr_iir)
			found = true;
2284 2285 2286 2287 2288

		intel_psr_irq_handler(dev_priv, psr_iir);
	}

	if (!found)
2289
		drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt\n");
2290 2291
}

2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348
static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
					   u32 te_trigger)
{
	enum pipe pipe = INVALID_PIPE;
	enum transcoder dsi_trans;
	enum port port;
	u32 val, tmp;

	/*
	 * Incase of dual link, TE comes from DSI_1
	 * this is to check if dual link is enabled
	 */
	val = I915_READ(TRANS_DDI_FUNC_CTL2(TRANSCODER_DSI_0));
	val &= PORT_SYNC_MODE_ENABLE;

	/*
	 * if dual link is enabled, then read DSI_0
	 * transcoder registers
	 */
	port = ((te_trigger & DSI1_TE && val) || (te_trigger & DSI0_TE)) ?
						  PORT_A : PORT_B;
	dsi_trans = (port == PORT_A) ? TRANSCODER_DSI_0 : TRANSCODER_DSI_1;

	/* Check if DSI configured in command mode */
	val = I915_READ(DSI_TRANS_FUNC_CONF(dsi_trans));
	val = val & OP_MODE_MASK;

	if (val != CMD_MODE_NO_GATE && val != CMD_MODE_TE_GATE) {
		drm_err(&dev_priv->drm, "DSI trancoder not configured in command mode\n");
		return;
	}

	/* Get PIPE for handling VBLANK event */
	val = I915_READ(TRANS_DDI_FUNC_CTL(dsi_trans));
	switch (val & TRANS_DDI_EDP_INPUT_MASK) {
	case TRANS_DDI_EDP_INPUT_A_ON:
		pipe = PIPE_A;
		break;
	case TRANS_DDI_EDP_INPUT_B_ONOFF:
		pipe = PIPE_B;
		break;
	case TRANS_DDI_EDP_INPUT_C_ONOFF:
		pipe = PIPE_C;
		break;
	default:
		drm_err(&dev_priv->drm, "Invalid PIPE\n");
		return;
	}

	intel_handle_vblank(dev_priv, pipe);

	/* clear TE in dsi IIR */
	port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A;
	tmp = I915_READ(DSI_INTR_IDENT_REG(port));
	I915_WRITE(DSI_INTR_IDENT_REG(port), tmp);
}

2349 2350
static irqreturn_t
gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2351 2352
{
	irqreturn_t ret = IRQ_NONE;
2353
	u32 iir;
2354
	enum pipe pipe;
J
Jesse Barnes 已提交
2355

2356
	if (master_ctl & GEN8_DE_MISC_IRQ) {
2357 2358 2359
		iir = I915_READ(GEN8_DE_MISC_IIR);
		if (iir) {
			I915_WRITE(GEN8_DE_MISC_IIR, iir);
2360
			ret = IRQ_HANDLED;
2361 2362
			gen8_de_misc_irq_handler(dev_priv, iir);
		} else {
2363 2364
			drm_err(&dev_priv->drm,
				"The master control interrupt lied (DE MISC)!\n");
2365
		}
2366 2367
	}

2368 2369 2370 2371 2372 2373 2374
	if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
		iir = I915_READ(GEN11_DE_HPD_IIR);
		if (iir) {
			I915_WRITE(GEN11_DE_HPD_IIR, iir);
			ret = IRQ_HANDLED;
			gen11_hpd_irq_handler(dev_priv, iir);
		} else {
2375 2376
			drm_err(&dev_priv->drm,
				"The master control interrupt lied, (DE HPD)!\n");
2377 2378 2379
		}
	}

2380
	if (master_ctl & GEN8_DE_PORT_IRQ) {
2381 2382
		iir = I915_READ(GEN8_DE_PORT_IIR);
		if (iir) {
2383
			bool found = false;
2384

2385
			I915_WRITE(GEN8_DE_PORT_IIR, iir);
2386
			ret = IRQ_HANDLED;
J
Jesse Barnes 已提交
2387

2388
			if (iir & gen8_de_port_aux_mask(dev_priv)) {
2389
				dp_aux_irq_handler(dev_priv);
2390 2391 2392
				found = true;
			}

2393
			if (IS_GEN9_LP(dev_priv)) {
V
Ville Syrjälä 已提交
2394 2395 2396 2397
				u32 hotplug_trigger = iir & BXT_DE_PORT_HOTPLUG_MASK;

				if (hotplug_trigger) {
					bxt_hpd_irq_handler(dev_priv, hotplug_trigger);
2398 2399 2400
					found = true;
				}
			} else if (IS_BROADWELL(dev_priv)) {
V
Ville Syrjälä 已提交
2401 2402 2403 2404
				u32 hotplug_trigger = iir & BDW_DE_PORT_HOTPLUG_MASK;

				if (hotplug_trigger) {
					ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
2405 2406
					found = true;
				}
2407 2408
			}

2409
			if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
2410
				gmbus_irq_handler(dev_priv);
S
Shashank Sharma 已提交
2411 2412 2413
				found = true;
			}

2414
			if (INTEL_GEN(dev_priv) >= 11) {
V
Ville Syrjälä 已提交
2415 2416 2417 2418
				u32 te_trigger = iir & (DSI0_TE | DSI1_TE);

				if (te_trigger) {
					gen11_dsi_te_interrupt_handler(dev_priv, te_trigger);
2419 2420 2421 2422
					found = true;
				}
			}

2423
			if (!found)
2424 2425
				drm_err(&dev_priv->drm,
					"Unexpected DE Port interrupt\n");
2426
		}
2427
		else
2428 2429
			drm_err(&dev_priv->drm,
				"The master control interrupt lied (DE PORT)!\n");
2430 2431
	}

2432
	for_each_pipe(dev_priv, pipe) {
2433
		u32 fault_errors;
2434

2435 2436
		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
			continue;
2437

2438 2439
		iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
		if (!iir) {
2440 2441
			drm_err(&dev_priv->drm,
				"The master control interrupt lied (DE PIPE)!\n");
2442 2443
			continue;
		}
2444

2445 2446
		ret = IRQ_HANDLED;
		I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
2447

2448
		if (iir & GEN8_PIPE_VBLANK)
2449
			intel_handle_vblank(dev_priv, pipe);
2450

2451 2452 2453
		if (iir & GEN9_PIPE_PLANE1_FLIP_DONE)
			flip_done_handler(dev_priv, pipe);

2454
		if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
2455
			hsw_pipe_crc_irq_handler(dev_priv, pipe);
2456

2457 2458
		if (iir & GEN8_PIPE_FIFO_UNDERRUN)
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2459

2460
		fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv);
2461
		if (fault_errors)
2462 2463 2464 2465
			drm_err(&dev_priv->drm,
				"Fault errors on pipe %c: 0x%08x\n",
				pipe_name(pipe),
				fault_errors);
2466 2467
	}

2468
	if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2469
	    master_ctl & GEN8_DE_PCH_IRQ) {
2470 2471 2472 2473 2474
		/*
		 * FIXME(BDW): Assume for now that the new interrupt handling
		 * scheme also closed the SDE interrupt handling race we've seen
		 * on older pch-split platforms. But this needs testing.
		 */
2475 2476 2477
		iir = I915_READ(SDEIIR);
		if (iir) {
			I915_WRITE(SDEIIR, iir);
2478
			ret = IRQ_HANDLED;
2479

2480 2481
			if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
				icp_irq_handler(dev_priv, iir);
2482
			else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
2483
				spt_irq_handler(dev_priv, iir);
2484
			else
2485
				cpt_irq_handler(dev_priv, iir);
2486 2487 2488 2489 2490
		} else {
			/*
			 * Like on previous PCH there seems to be something
			 * fishy going on with forwarding PCH interrupts.
			 */
2491 2492
			drm_dbg(&dev_priv->drm,
				"The master control interrupt lied (SDE)!\n");
2493
		}
2494 2495
	}

2496 2497 2498
	return ret;
}

2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516
static inline u32 gen8_master_intr_disable(void __iomem * const regs)
{
	raw_reg_write(regs, GEN8_MASTER_IRQ, 0);

	/*
	 * Now with master disabled, get a sample of level indications
	 * for this interrupt. Indications will be cleared on related acks.
	 * New indications can and will light up during processing,
	 * and will generate new interrupt after enabling master.
	 */
	return raw_reg_read(regs, GEN8_MASTER_IRQ);
}

static inline void gen8_master_intr_enable(void __iomem * const regs)
{
	raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
}

2517 2518
static irqreturn_t gen8_irq_handler(int irq, void *arg)
{
2519
	struct drm_i915_private *dev_priv = arg;
2520
	void __iomem * const regs = dev_priv->uncore.regs;
2521 2522 2523 2524 2525
	u32 master_ctl;

	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

2526 2527 2528
	master_ctl = gen8_master_intr_disable(regs);
	if (!master_ctl) {
		gen8_master_intr_enable(regs);
2529
		return IRQ_NONE;
2530
	}
2531

2532 2533
	/* Find, queue (onto bottom-halves), then clear each source */
	gen8_gt_irq_handler(&dev_priv->gt, master_ctl);
2534 2535 2536

	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
	if (master_ctl & ~GEN8_GT_IRQS) {
2537
		disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2538
		gen8_de_irq_handler(dev_priv, master_ctl);
2539
		enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2540
	}
2541

2542
	gen8_master_intr_enable(regs);
2543

2544
	return IRQ_HANDLED;
2545 2546
}

2547
static u32
2548
gen11_gu_misc_irq_ack(struct intel_gt *gt, const u32 master_ctl)
2549
{
2550
	void __iomem * const regs = gt->uncore->regs;
2551
	u32 iir;
2552 2553

	if (!(master_ctl & GEN11_GU_MISC_IRQ))
2554 2555 2556 2557 2558
		return 0;

	iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
	if (likely(iir))
		raw_reg_write(regs, GEN11_GU_MISC_IIR, iir);
2559

2560
	return iir;
2561 2562 2563
}

static void
2564
gen11_gu_misc_irq_handler(struct intel_gt *gt, const u32 iir)
2565 2566
{
	if (iir & GEN11_GU_MISC_GSE)
2567
		intel_opregion_asle_intr(gt->i915);
2568 2569
}

2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587
static inline u32 gen11_master_intr_disable(void __iomem * const regs)
{
	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);

	/*
	 * Now with master disabled, get a sample of level indications
	 * for this interrupt. Indications will be cleared on related acks.
	 * New indications can and will light up during processing,
	 * and will generate new interrupt after enabling master.
	 */
	return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
}

static inline void gen11_master_intr_enable(void __iomem * const regs)
{
	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
}

2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606
static void
gen11_display_irq_handler(struct drm_i915_private *i915)
{
	void __iomem * const regs = i915->uncore.regs;
	const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);

	disable_rpm_wakeref_asserts(&i915->runtime_pm);
	/*
	 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
	 * for the display related bits.
	 */
	raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 0x0);
	gen8_de_irq_handler(i915, disp_ctl);
	raw_reg_write(regs, GEN11_DISPLAY_INT_CTL,
		      GEN11_DISPLAY_IRQ_ENABLE);

	enable_rpm_wakeref_asserts(&i915->runtime_pm);
}

2607 2608 2609 2610
static __always_inline irqreturn_t
__gen11_irq_handler(struct drm_i915_private * const i915,
		    u32 (*intr_disable)(void __iomem * const regs),
		    void (*intr_enable)(void __iomem * const regs))
M
Mika Kuoppala 已提交
2611
{
2612
	void __iomem * const regs = i915->uncore.regs;
2613
	struct intel_gt *gt = &i915->gt;
M
Mika Kuoppala 已提交
2614
	u32 master_ctl;
2615
	u32 gu_misc_iir;
M
Mika Kuoppala 已提交
2616 2617 2618 2619

	if (!intel_irqs_enabled(i915))
		return IRQ_NONE;

2620
	master_ctl = intr_disable(regs);
2621
	if (!master_ctl) {
2622
		intr_enable(regs);
M
Mika Kuoppala 已提交
2623
		return IRQ_NONE;
2624
	}
M
Mika Kuoppala 已提交
2625

2626
	/* Find, queue (onto bottom-halves), then clear each source */
2627
	gen11_gt_irq_handler(gt, master_ctl);
M
Mika Kuoppala 已提交
2628 2629

	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2630 2631
	if (master_ctl & GEN11_DISPLAY_IRQ)
		gen11_display_irq_handler(i915);
M
Mika Kuoppala 已提交
2632

2633
	gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl);
2634

2635
	intr_enable(regs);
M
Mika Kuoppala 已提交
2636

2637
	gen11_gu_misc_irq_handler(gt, gu_misc_iir);
2638

M
Mika Kuoppala 已提交
2639 2640 2641
	return IRQ_HANDLED;
}

2642 2643 2644 2645 2646 2647 2648
static irqreturn_t gen11_irq_handler(int irq, void *arg)
{
	return __gen11_irq_handler(arg,
				   gen11_master_intr_disable,
				   gen11_master_intr_enable);
}

2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688
static u32 dg1_master_intr_disable_and_ack(void __iomem * const regs)
{
	u32 val;

	/* First disable interrupts */
	raw_reg_write(regs, DG1_MSTR_UNIT_INTR, 0);

	/* Get the indication levels and ack the master unit */
	val = raw_reg_read(regs, DG1_MSTR_UNIT_INTR);
	if (unlikely(!val))
		return 0;

	raw_reg_write(regs, DG1_MSTR_UNIT_INTR, val);

	/*
	 * Now with master disabled, get a sample of level indications
	 * for this interrupt and ack them right away - we keep GEN11_MASTER_IRQ
	 * out as this bit doesn't exist anymore for DG1
	 */
	val = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ) & ~GEN11_MASTER_IRQ;
	if (unlikely(!val))
		return 0;

	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, val);

	return val;
}

static inline void dg1_master_intr_enable(void __iomem * const regs)
{
	raw_reg_write(regs, DG1_MSTR_UNIT_INTR, DG1_MSTR_IRQ);
}

static irqreturn_t dg1_irq_handler(int irq, void *arg)
{
	return __gen11_irq_handler(arg,
				   dg1_master_intr_disable_and_ack,
				   dg1_master_intr_enable);
}

2689 2690 2691
/* Called from drm generic code, passed 'crtc' which
 * we use as a pipe index
 */
2692
int i8xx_enable_vblank(struct drm_crtc *crtc)
2693
{
2694 2695
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2696
	unsigned long irqflags;
2697

2698
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2699
	i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2700
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2701

2702 2703 2704
	return 0;
}

2705
int i915gm_enable_vblank(struct drm_crtc *crtc)
2706
{
2707
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2708

2709 2710 2711 2712 2713 2714 2715 2716
	/*
	 * Vblank interrupts fail to wake the device up from C2+.
	 * Disabling render clock gating during C-states avoids
	 * the problem. There is a small power cost so we do this
	 * only when vblank interrupts are actually enabled.
	 */
	if (dev_priv->vblank_enabled++ == 0)
		I915_WRITE(SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
2717

2718
	return i8xx_enable_vblank(crtc);
2719 2720
}

2721
int i965_enable_vblank(struct drm_crtc *crtc)
2722
{
2723 2724
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2725 2726 2727
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2728 2729
	i915_enable_pipestat(dev_priv, pipe,
			     PIPE_START_VBLANK_INTERRUPT_STATUS);
2730 2731 2732 2733 2734
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);

	return 0;
}

2735
int ilk_enable_vblank(struct drm_crtc *crtc)
J
Jesse Barnes 已提交
2736
{
2737 2738
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
J
Jesse Barnes 已提交
2739
	unsigned long irqflags;
2740
	u32 bit = INTEL_GEN(dev_priv) >= 7 ?
2741
		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
J
Jesse Barnes 已提交
2742 2743

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2744
	ilk_enable_display_irq(dev_priv, bit);
J
Jesse Barnes 已提交
2745 2746
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);

2747 2748 2749 2750
	/* Even though there is no DMC, frame counter can get stuck when
	 * PSR is active as no frames are generated.
	 */
	if (HAS_PSR(dev_priv))
2751
		drm_crtc_vblank_restore(crtc);
2752

J
Jesse Barnes 已提交
2753 2754 2755
	return 0;
}

2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786
static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc,
				   bool enable)
{
	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
	enum port port;
	u32 tmp;

	if (!(intel_crtc->mode_flags &
	    (I915_MODE_FLAG_DSI_USE_TE1 | I915_MODE_FLAG_DSI_USE_TE0)))
		return false;

	/* for dual link cases we consider TE from slave */
	if (intel_crtc->mode_flags & I915_MODE_FLAG_DSI_USE_TE1)
		port = PORT_B;
	else
		port = PORT_A;

	tmp =  I915_READ(DSI_INTR_MASK_REG(port));
	if (enable)
		tmp &= ~DSI_TE_EVENT;
	else
		tmp |= DSI_TE_EVENT;

	I915_WRITE(DSI_INTR_MASK_REG(port), tmp);

	tmp = I915_READ(DSI_INTR_IDENT_REG(port));
	I915_WRITE(DSI_INTR_IDENT_REG(port), tmp);

	return true;
}

2787
int bdw_enable_vblank(struct drm_crtc *crtc)
2788
{
2789
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2790 2791
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
	enum pipe pipe = intel_crtc->pipe;
2792 2793
	unsigned long irqflags;

2794 2795 2796
	if (gen11_dsi_configure_te(intel_crtc, true))
		return 0;

2797
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2798
	bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2799
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2800

2801 2802 2803 2804
	/* Even if there is no DMC, frame counter can get stuck when
	 * PSR is active as no frames are generated, so check only for PSR.
	 */
	if (HAS_PSR(dev_priv))
2805
		drm_crtc_vblank_restore(crtc);
2806

2807 2808 2809
	return 0;
}

2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822
void skl_enable_flip_done(struct intel_crtc *crtc)
{
	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
	enum pipe pipe = crtc->pipe;
	unsigned long irqflags;

	spin_lock_irqsave(&i915->irq_lock, irqflags);

	bdw_enable_pipe_irq(i915, pipe, GEN9_PIPE_PLANE1_FLIP_DONE);

	spin_unlock_irqrestore(&i915->irq_lock, irqflags);
}

2823 2824 2825
/* Called from drm generic code, passed 'crtc' which
 * we use as a pipe index
 */
2826
void i8xx_disable_vblank(struct drm_crtc *crtc)
2827
{
2828 2829
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2830
	unsigned long irqflags;
2831

2832
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2833
	i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2834 2835 2836
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

2837
void i915gm_disable_vblank(struct drm_crtc *crtc)
2838
{
2839
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2840

2841
	i8xx_disable_vblank(crtc);
2842

2843 2844
	if (--dev_priv->vblank_enabled == 0)
		I915_WRITE(SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
2845 2846
}

2847
void i965_disable_vblank(struct drm_crtc *crtc)
2848
{
2849 2850
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2851 2852 2853
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2854 2855
	i915_disable_pipestat(dev_priv, pipe,
			      PIPE_START_VBLANK_INTERRUPT_STATUS);
2856 2857 2858
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

2859
void ilk_disable_vblank(struct drm_crtc *crtc)
J
Jesse Barnes 已提交
2860
{
2861 2862
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
J
Jesse Barnes 已提交
2863
	unsigned long irqflags;
2864
	u32 bit = INTEL_GEN(dev_priv) >= 7 ?
2865
		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
J
Jesse Barnes 已提交
2866 2867

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2868
	ilk_disable_display_irq(dev_priv, bit);
J
Jesse Barnes 已提交
2869 2870 2871
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

2872
void bdw_disable_vblank(struct drm_crtc *crtc)
2873
{
2874
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2875 2876
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
	enum pipe pipe = intel_crtc->pipe;
2877 2878
	unsigned long irqflags;

2879 2880 2881
	if (gen11_dsi_configure_te(intel_crtc, false))
		return;

2882
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2883
	bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2884 2885 2886
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899
void skl_disable_flip_done(struct intel_crtc *crtc)
{
	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
	enum pipe pipe = crtc->pipe;
	unsigned long irqflags;

	spin_lock_irqsave(&i915->irq_lock, irqflags);

	bdw_disable_pipe_irq(i915, pipe, GEN9_PIPE_PLANE1_FLIP_DONE);

	spin_unlock_irqrestore(&i915->irq_lock, irqflags);
}

2900
static void ibx_irq_reset(struct drm_i915_private *dev_priv)
P
Paulo Zanoni 已提交
2901
{
2902 2903
	struct intel_uncore *uncore = &dev_priv->uncore;

2904
	if (HAS_PCH_NOP(dev_priv))
P
Paulo Zanoni 已提交
2905 2906
		return;

2907
	GEN3_IRQ_RESET(uncore, SDE);
2908

2909
	if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
2910
		I915_WRITE(SERR_INT, 0xffffffff);
P
Paulo Zanoni 已提交
2911
}
2912

2913 2914
static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
{
2915 2916
	struct intel_uncore *uncore = &dev_priv->uncore;

2917
	if (IS_CHERRYVIEW(dev_priv))
2918
		intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
2919
	else
2920
		intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK);
2921

2922
	i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
2923
	intel_uncore_write(uncore, PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2924

2925
	i9xx_pipestat_irq_reset(dev_priv);
2926

2927
	GEN3_IRQ_RESET(uncore, VLV_);
2928
	dev_priv->irq_mask = ~0u;
2929 2930
}

2931 2932
static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
{
2933 2934
	struct intel_uncore *uncore = &dev_priv->uncore;

2935
	u32 pipestat_mask;
2936
	u32 enable_mask;
2937 2938
	enum pipe pipe;

2939
	pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
2940 2941 2942 2943 2944

	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
	for_each_pipe(dev_priv, pipe)
		i915_enable_pipestat(dev_priv, pipe, pipestat_mask);

2945 2946
	enable_mask = I915_DISPLAY_PORT_INTERRUPT |
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2947 2948 2949 2950
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
		I915_LPE_PIPE_A_INTERRUPT |
		I915_LPE_PIPE_B_INTERRUPT;

2951
	if (IS_CHERRYVIEW(dev_priv))
2952 2953
		enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
			I915_LPE_PIPE_C_INTERRUPT;
2954

2955
	drm_WARN_ON(&dev_priv->drm, dev_priv->irq_mask != ~0u);
2956

2957 2958
	dev_priv->irq_mask = ~enable_mask;

2959
	GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask);
2960 2961 2962 2963
}

/* drm_dma.h hooks
*/
2964
static void ilk_irq_reset(struct drm_i915_private *dev_priv)
2965
{
2966
	struct intel_uncore *uncore = &dev_priv->uncore;
2967

2968
	GEN3_IRQ_RESET(uncore, DE);
2969 2970
	dev_priv->irq_mask = ~0u;

2971
	if (IS_GEN(dev_priv, 7))
2972
		intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff);
2973

2974
	if (IS_HASWELL(dev_priv)) {
2975 2976
		intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
		intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
2977 2978
	}

2979
	gen5_gt_irq_reset(&dev_priv->gt);
2980

2981
	ibx_irq_reset(dev_priv);
2982 2983
}

2984
static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
J
Jesse Barnes 已提交
2985
{
2986 2987 2988
	I915_WRITE(VLV_MASTER_IER, 0);
	POSTING_READ(VLV_MASTER_IER);

2989
	gen5_gt_irq_reset(&dev_priv->gt);
J
Jesse Barnes 已提交
2990

2991
	spin_lock_irq(&dev_priv->irq_lock);
2992 2993
	if (dev_priv->display_irqs_enabled)
		vlv_display_irq_reset(dev_priv);
2994
	spin_unlock_irq(&dev_priv->irq_lock);
J
Jesse Barnes 已提交
2995 2996
}

2997
static void gen8_irq_reset(struct drm_i915_private *dev_priv)
2998
{
2999
	struct intel_uncore *uncore = &dev_priv->uncore;
3000
	enum pipe pipe;
3001

3002
	gen8_master_intr_disable(dev_priv->uncore.regs);
3003

3004
	gen8_gt_irq_reset(&dev_priv->gt);
3005

3006 3007
	intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
	intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
3008

3009
	for_each_pipe(dev_priv, pipe)
3010 3011
		if (intel_display_power_is_enabled(dev_priv,
						   POWER_DOMAIN_PIPE(pipe)))
3012
			GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3013

3014 3015 3016
	GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
	GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3017

3018
	if (HAS_PCH_SPLIT(dev_priv))
3019
		ibx_irq_reset(dev_priv);
3020
}
3021

3022
static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
M
Mika Kuoppala 已提交
3023
{
3024
	struct intel_uncore *uncore = &dev_priv->uncore;
3025
	enum pipe pipe;
3026 3027
	u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
		BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
M
Mika Kuoppala 已提交
3028

3029
	intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0);
M
Mika Kuoppala 已提交
3030

3031 3032 3033
	if (INTEL_GEN(dev_priv) >= 12) {
		enum transcoder trans;

3034
		for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047
			enum intel_display_power_domain domain;

			domain = POWER_DOMAIN_TRANSCODER(trans);
			if (!intel_display_power_is_enabled(dev_priv, domain))
				continue;

			intel_uncore_write(uncore, TRANS_PSR_IMR(trans), 0xffffffff);
			intel_uncore_write(uncore, TRANS_PSR_IIR(trans), 0xffffffff);
		}
	} else {
		intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
		intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
	}
3048

M
Mika Kuoppala 已提交
3049 3050 3051
	for_each_pipe(dev_priv, pipe)
		if (intel_display_power_is_enabled(dev_priv,
						   POWER_DOMAIN_PIPE(pipe)))
3052
			GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
M
Mika Kuoppala 已提交
3053

3054 3055 3056
	GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
	GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
	GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_);
3057

3058
	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3059
		GEN3_IRQ_RESET(uncore, SDE);
M
Matt Roper 已提交
3060

3061 3062
	/* Wa_14010685332:icl,jsl,ehl,tgl,rkl */
	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) {
M
Matt Roper 已提交
3063 3064 3065 3066 3067
		intel_uncore_rmw(uncore, SOUTH_CHICKEN1,
				 SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
		intel_uncore_rmw(uncore, SOUTH_CHICKEN1,
				 SBCLK_RUN_REFCLK_DIS, 0);
	}
M
Mika Kuoppala 已提交
3068 3069
}

3070 3071 3072 3073
static void gen11_irq_reset(struct drm_i915_private *dev_priv)
{
	struct intel_uncore *uncore = &dev_priv->uncore;

3074 3075 3076 3077
	if (HAS_MASTER_UNIT_IRQ(dev_priv))
		dg1_master_intr_disable_and_ack(dev_priv->uncore.regs);
	else
		gen11_master_intr_disable(dev_priv->uncore.regs);
3078 3079 3080 3081 3082 3083 3084 3085

	gen11_gt_irq_reset(&dev_priv->gt);
	gen11_display_irq_reset(dev_priv);

	GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
}

3086
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3087
				     u8 pipe_mask)
3088
{
3089 3090
	struct intel_uncore *uncore = &dev_priv->uncore;

3091
	u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3092
	enum pipe pipe;
3093

3094 3095 3096
	if (INTEL_GEN(dev_priv) >= 9)
		extra_ier |= GEN9_PIPE_PLANE1_FLIP_DONE;

3097
	spin_lock_irq(&dev_priv->irq_lock);
3098 3099 3100 3101 3102 3103

	if (!intel_irqs_enabled(dev_priv)) {
		spin_unlock_irq(&dev_priv->irq_lock);
		return;
	}

3104
	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3105
		GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
3106 3107
				  dev_priv->de_irq_mask[pipe],
				  ~dev_priv->de_irq_mask[pipe] | extra_ier);
3108

3109
	spin_unlock_irq(&dev_priv->irq_lock);
3110 3111
}

3112
void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
3113
				     u8 pipe_mask)
3114
{
3115
	struct intel_uncore *uncore = &dev_priv->uncore;
3116 3117
	enum pipe pipe;

3118
	spin_lock_irq(&dev_priv->irq_lock);
3119 3120 3121 3122 3123 3124

	if (!intel_irqs_enabled(dev_priv)) {
		spin_unlock_irq(&dev_priv->irq_lock);
		return;
	}

3125
	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3126
		GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3127

3128 3129 3130
	spin_unlock_irq(&dev_priv->irq_lock);

	/* make sure we're done processing display irqs */
3131
	intel_synchronize_irq(dev_priv);
3132 3133
}

3134
static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
3135
{
3136
	struct intel_uncore *uncore = &dev_priv->uncore;
3137 3138 3139 3140

	I915_WRITE(GEN8_MASTER_IRQ, 0);
	POSTING_READ(GEN8_MASTER_IRQ);

3141
	gen8_gt_irq_reset(&dev_priv->gt);
3142

3143
	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3144

3145
	spin_lock_irq(&dev_priv->irq_lock);
3146 3147
	if (dev_priv->display_irqs_enabled)
		vlv_display_irq_reset(dev_priv);
3148
	spin_unlock_irq(&dev_priv->irq_lock);
3149 3150
}

3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175
static u32 ibx_hotplug_enables(struct drm_i915_private *i915,
			       enum hpd_pin pin)
{
	switch (pin) {
	case HPD_PORT_A:
		/*
		 * When CPU and PCH are on the same package, port A
		 * HPD must be enabled in both north and south.
		 */
		return HAS_PCH_LPT_LP(i915) ?
			PORTA_HOTPLUG_ENABLE : 0;
	case HPD_PORT_B:
		return PORTB_HOTPLUG_ENABLE |
			PORTB_PULSE_DURATION_2ms;
	case HPD_PORT_C:
		return PORTC_HOTPLUG_ENABLE |
			PORTC_PULSE_DURATION_2ms;
	case HPD_PORT_D:
		return PORTD_HOTPLUG_ENABLE |
			PORTD_PULSE_DURATION_2ms;
	default:
		return 0;
	}
}

3176
static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
3177
{
3178
	u32 hotplug;
3179 3180 3181

	/*
	 * Enable digital hotplug on the PCH, and configure the DP short pulse
3182 3183
	 * duration to 2ms (which is the minimum in the Display Port spec).
	 * The pulse duration bits are reserved on LPT+.
3184
	 */
3185
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3186 3187 3188 3189 3190
	hotplug &= ~(PORTA_HOTPLUG_ENABLE |
		     PORTB_HOTPLUG_ENABLE |
		     PORTC_HOTPLUG_ENABLE |
		     PORTD_HOTPLUG_ENABLE |
		     PORTB_PULSE_DURATION_MASK |
3191 3192
		     PORTC_PULSE_DURATION_MASK |
		     PORTD_PULSE_DURATION_MASK);
3193
	hotplug |= intel_hpd_hotplug_enables(dev_priv, ibx_hotplug_enables);
3194
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3195
}
X
Xiong Zhang 已提交
3196

3197 3198 3199 3200
static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug_irqs, enabled_irqs;

3201
	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3202
	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3203 3204 3205 3206 3207 3208

	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);

	ibx_hpd_detection_setup(dev_priv);
}

3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239
static u32 icp_ddi_hotplug_enables(struct drm_i915_private *i915,
				   enum hpd_pin pin)
{
	switch (pin) {
	case HPD_PORT_A:
	case HPD_PORT_B:
	case HPD_PORT_C:
	case HPD_PORT_D:
		return SHOTPLUG_CTL_DDI_HPD_ENABLE(pin);
	default:
		return 0;
	}
}

static u32 icp_tc_hotplug_enables(struct drm_i915_private *i915,
				  enum hpd_pin pin)
{
	switch (pin) {
	case HPD_PORT_TC1:
	case HPD_PORT_TC2:
	case HPD_PORT_TC3:
	case HPD_PORT_TC4:
	case HPD_PORT_TC5:
	case HPD_PORT_TC6:
		return ICP_TC_HPD_ENABLE(pin);
	default:
		return 0;
	}
}

static void icp_ddi_hpd_detection_setup(struct drm_i915_private *dev_priv)
3240 3241 3242 3243
{
	u32 hotplug;

	hotplug = I915_READ(SHOTPLUG_CTL_DDI);
3244 3245 3246 3247 3248
	hotplug &= ~(SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_A) |
		     SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_B) |
		     SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_C) |
		     SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_D));
	hotplug |= intel_hpd_hotplug_enables(dev_priv, icp_ddi_hotplug_enables);
3249
	I915_WRITE(SHOTPLUG_CTL_DDI, hotplug);
3250
}
3251

3252
static void icp_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
3253 3254 3255 3256
{
	u32 hotplug;

	hotplug = I915_READ(SHOTPLUG_CTL_TC);
3257 3258 3259 3260 3261 3262 3263
	hotplug &= ~(ICP_TC_HPD_ENABLE(HPD_PORT_TC1) |
		     ICP_TC_HPD_ENABLE(HPD_PORT_TC2) |
		     ICP_TC_HPD_ENABLE(HPD_PORT_TC3) |
		     ICP_TC_HPD_ENABLE(HPD_PORT_TC4) |
		     ICP_TC_HPD_ENABLE(HPD_PORT_TC5) |
		     ICP_TC_HPD_ENABLE(HPD_PORT_TC6));
	hotplug |= intel_hpd_hotplug_enables(dev_priv, icp_tc_hotplug_enables);
3264
	I915_WRITE(SHOTPLUG_CTL_TC, hotplug);
3265 3266
}

3267
static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
3268 3269 3270
{
	u32 hotplug_irqs, enabled_irqs;

3271
	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3272
	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3273

3274 3275
	if (INTEL_PCH_TYPE(dev_priv) <= PCH_TGP)
		I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
3276

3277 3278
	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);

3279 3280
	icp_ddi_hpd_detection_setup(dev_priv);
	icp_tc_hpd_detection_setup(dev_priv);
3281 3282
}

3283 3284
static u32 gen11_hotplug_enables(struct drm_i915_private *i915,
				 enum hpd_pin pin)
3285
{
3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296
	switch (pin) {
	case HPD_PORT_TC1:
	case HPD_PORT_TC2:
	case HPD_PORT_TC3:
	case HPD_PORT_TC4:
	case HPD_PORT_TC5:
	case HPD_PORT_TC6:
		return GEN11_HOTPLUG_CTL_ENABLE(pin);
	default:
		return 0;
	}
M
Matt Roper 已提交
3297 3298
}

3299 3300
static void dg1_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
C
Clinton A Taylor 已提交
3301 3302 3303 3304 3305 3306 3307 3308 3309
	u32 val;

	val = I915_READ(SOUTH_CHICKEN1);
	val |= (INVERT_DDIA_HPD |
		INVERT_DDIB_HPD |
		INVERT_DDIC_HPD |
		INVERT_DDID_HPD);
	I915_WRITE(SOUTH_CHICKEN1, val);

3310
	icp_hpd_irq_setup(dev_priv);
3311 3312
}

3313
static void gen11_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
3314 3315 3316 3317
{
	u32 hotplug;

	hotplug = I915_READ(GEN11_TC_HOTPLUG_CTL);
3318 3319 3320 3321 3322 3323 3324
	hotplug &= ~(GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) |
		     GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) |
		     GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) |
		     GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC4) |
		     GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) |
		     GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6));
	hotplug |= intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables);
3325
	I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug);
3326 3327 3328 3329 3330
}

static void gen11_tbt_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug;
3331 3332

	hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL);
3333 3334 3335 3336 3337 3338 3339
	hotplug &= ~(GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) |
		     GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) |
		     GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) |
		     GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC4) |
		     GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) |
		     GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6));
	hotplug |= intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables);
3340
	I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug);
3341 3342 3343 3344 3345 3346 3347
}

static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug_irqs, enabled_irqs;
	u32 val;

3348
	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
3349
	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd);
3350 3351 3352

	val = I915_READ(GEN11_DE_HPD_IMR);
	val &= ~hotplug_irqs;
3353
	val |= ~enabled_irqs & hotplug_irqs;
3354 3355 3356
	I915_WRITE(GEN11_DE_HPD_IMR, val);
	POSTING_READ(GEN11_DE_HPD_IMR);

3357 3358
	gen11_tc_hpd_detection_setup(dev_priv);
	gen11_tbt_hpd_detection_setup(dev_priv);
3359

3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389
	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
		icp_hpd_irq_setup(dev_priv);
}

static u32 spt_hotplug_enables(struct drm_i915_private *i915,
			       enum hpd_pin pin)
{
	switch (pin) {
	case HPD_PORT_A:
		return PORTA_HOTPLUG_ENABLE;
	case HPD_PORT_B:
		return PORTB_HOTPLUG_ENABLE;
	case HPD_PORT_C:
		return PORTC_HOTPLUG_ENABLE;
	case HPD_PORT_D:
		return PORTD_HOTPLUG_ENABLE;
	default:
		return 0;
	}
}

static u32 spt_hotplug2_enables(struct drm_i915_private *i915,
				enum hpd_pin pin)
{
	switch (pin) {
	case HPD_PORT_E:
		return PORTE_HOTPLUG_ENABLE;
	default:
		return 0;
	}
3390 3391
}

3392
static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3393
{
3394 3395 3396 3397 3398 3399 3400 3401 3402
	u32 val, hotplug;

	/* Display WA #1179 WaHardHangonHotPlug: cnp */
	if (HAS_PCH_CNP(dev_priv)) {
		val = I915_READ(SOUTH_CHICKEN1);
		val &= ~CHASSIS_CLK_REQ_DURATION_MASK;
		val |= CHASSIS_CLK_REQ_DURATION(0xf);
		I915_WRITE(SOUTH_CHICKEN1, val);
	}
3403 3404 3405

	/* Enable digital hotplug on the PCH */
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3406 3407 3408 3409 3410
	hotplug &= ~(PORTA_HOTPLUG_ENABLE |
		     PORTB_HOTPLUG_ENABLE |
		     PORTC_HOTPLUG_ENABLE |
		     PORTD_HOTPLUG_ENABLE);
	hotplug |= intel_hpd_hotplug_enables(dev_priv, spt_hotplug_enables);
3411 3412 3413
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);

	hotplug = I915_READ(PCH_PORT_HOTPLUG2);
3414 3415
	hotplug &= ~PORTE_HOTPLUG_ENABLE;
	hotplug |= intel_hpd_hotplug_enables(dev_priv, spt_hotplug2_enables);
3416
	I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3417 3418
}

3419 3420 3421 3422
static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug_irqs, enabled_irqs;

3423 3424 3425
	if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
		I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);

3426
	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3427
	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3428 3429 3430 3431 3432 3433

	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);

	spt_hpd_detection_setup(dev_priv);
}

3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445
static u32 ilk_hotplug_enables(struct drm_i915_private *i915,
			       enum hpd_pin pin)
{
	switch (pin) {
	case HPD_PORT_A:
		return DIGITAL_PORTA_HOTPLUG_ENABLE |
			DIGITAL_PORTA_PULSE_DURATION_2ms;
	default:
		return 0;
	}
}

3446 3447 3448 3449 3450 3451 3452 3453 3454 3455
static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug;

	/*
	 * Enable digital hotplug on the CPU, and configure the DP short pulse
	 * duration to 2ms (which is the minimum in the Display Port spec)
	 * The pulse duration bits are reserved on HSW+.
	 */
	hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
3456 3457 3458
	hotplug &= ~(DIGITAL_PORTA_HOTPLUG_ENABLE |
		     DIGITAL_PORTA_PULSE_DURATION_MASK);
	hotplug |= intel_hpd_hotplug_enables(dev_priv, ilk_hotplug_enables);
3459 3460 3461
	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
}

3462
static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3463
{
3464
	u32 hotplug_irqs, enabled_irqs;
3465

3466 3467
	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd);
3468

3469
	if (INTEL_GEN(dev_priv) >= 8)
3470
		bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3471
	else
3472
		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3473

3474
	ilk_hpd_detection_setup(dev_priv);
3475

3476
	ibx_hpd_irq_setup(dev_priv);
3477 3478
}

3479 3480
static u32 bxt_hotplug_enables(struct drm_i915_private *i915,
			       enum hpd_pin pin)
3481
{
3482
	u32 hotplug;
3483

3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503
	switch (pin) {
	case HPD_PORT_A:
		hotplug = PORTA_HOTPLUG_ENABLE;
		if (intel_bios_is_port_hpd_inverted(i915, PORT_A))
			hotplug |= BXT_DDIA_HPD_INVERT;
		return hotplug;
	case HPD_PORT_B:
		hotplug = PORTB_HOTPLUG_ENABLE;
		if (intel_bios_is_port_hpd_inverted(i915, PORT_B))
			hotplug |= BXT_DDIB_HPD_INVERT;
		return hotplug;
	case HPD_PORT_C:
		hotplug = PORTC_HOTPLUG_ENABLE;
		if (intel_bios_is_port_hpd_inverted(i915, PORT_C))
			hotplug |= BXT_DDIC_HPD_INVERT;
		return hotplug;
	default:
		return 0;
	}
}
3504

3505 3506 3507
static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug;
3508

3509 3510 3511 3512 3513 3514 3515 3516
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
	hotplug &= ~(PORTA_HOTPLUG_ENABLE |
		     PORTB_HOTPLUG_ENABLE |
		     PORTC_HOTPLUG_ENABLE |
		     BXT_DDIA_HPD_INVERT |
		     BXT_DDIB_HPD_INVERT |
		     BXT_DDIC_HPD_INVERT);
	hotplug |= intel_hpd_hotplug_enables(dev_priv, bxt_hotplug_enables);
3517
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3518 3519
}

3520 3521 3522 3523
static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug_irqs, enabled_irqs;

3524
	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
3525
	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd);
3526 3527 3528

	bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);

3529
	bxt_hpd_detection_setup(dev_priv);
3530 3531
}

3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542
/*
 * SDEIER is also touched by the interrupt handler to work around missed PCH
 * interrupts. Hence we can't update it after the interrupt handler is enabled -
 * instead we unconditionally enable all PCH interrupt sources here, but then
 * only unmask them as needed with SDEIMR.
 *
 * Note that we currently do this after installing the interrupt handler,
 * but before we enable the master interrupt. That should be sufficient
 * to avoid races with the irq handler, assuming we have MSI. Shared legacy
 * interrupts could still race.
 */
3543
static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
P
Paulo Zanoni 已提交
3544
{
3545
	struct intel_uncore *uncore = &dev_priv->uncore;
3546
	u32 mask;
3547

3548
	if (HAS_PCH_NOP(dev_priv))
D
Daniel Vetter 已提交
3549 3550
		return;

3551
	if (HAS_PCH_IBX(dev_priv))
3552
		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3553
	else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3554
		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3555 3556
	else
		mask = SDE_GMBUS_CPT;
3557

3558
	GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff);
P
Paulo Zanoni 已提交
3559 3560
}

3561
static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
3562
{
3563
	struct intel_uncore *uncore = &dev_priv->uncore;
3564 3565
	u32 display_mask, extra_mask;

3566
	if (INTEL_GEN(dev_priv) >= 7) {
3567
		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3568
				DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
3569
		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3570 3571
			      DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
			      DE_DP_A_HOTPLUG_IVB);
3572 3573
	} else {
		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3574 3575
				DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
				DE_PIPEA_CRC_DONE | DE_POISON);
3576
		extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK |
3577 3578
			      DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
			      DE_DP_A_HOTPLUG);
3579
	}
3580

3581
	if (IS_HASWELL(dev_priv)) {
3582
		gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
3583 3584 3585
		display_mask |= DE_EDP_PSR_INT_HSW;
	}

3586 3587 3588
	if (IS_IRONLAKE_M(dev_priv))
		extra_mask |= DE_PCU_EVENT;

3589
	dev_priv->irq_mask = ~display_mask;
3590

3591
	ibx_irq_postinstall(dev_priv);
P
Paulo Zanoni 已提交
3592

3593 3594
	gen5_gt_irq_postinstall(&dev_priv->gt);

3595 3596
	GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
		      display_mask | extra_mask);
3597 3598
}

3599 3600
void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
{
3601
	lockdep_assert_held(&dev_priv->irq_lock);
3602 3603 3604 3605 3606 3607

	if (dev_priv->display_irqs_enabled)
		return;

	dev_priv->display_irqs_enabled = true;

3608 3609
	if (intel_irqs_enabled(dev_priv)) {
		vlv_display_irq_reset(dev_priv);
3610
		vlv_display_irq_postinstall(dev_priv);
3611
	}
3612 3613 3614 3615
}

void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
{
3616
	lockdep_assert_held(&dev_priv->irq_lock);
3617 3618 3619 3620 3621 3622

	if (!dev_priv->display_irqs_enabled)
		return;

	dev_priv->display_irqs_enabled = false;

3623
	if (intel_irqs_enabled(dev_priv))
3624
		vlv_display_irq_reset(dev_priv);
3625 3626
}

3627

3628
static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
3629
{
3630
	gen5_gt_irq_postinstall(&dev_priv->gt);
J
Jesse Barnes 已提交
3631

3632
	spin_lock_irq(&dev_priv->irq_lock);
3633 3634
	if (dev_priv->display_irqs_enabled)
		vlv_display_irq_postinstall(dev_priv);
3635 3636
	spin_unlock_irq(&dev_priv->irq_lock);

J
Jesse Barnes 已提交
3637
	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3638
	POSTING_READ(VLV_MASTER_IER);
3639 3640
}

3641 3642
static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
{
3643 3644
	struct intel_uncore *uncore = &dev_priv->uncore;

3645 3646
	u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) |
		GEN8_PIPE_CDCLK_CRC_DONE;
3647
	u32 de_pipe_enables;
3648
	u32 de_port_masked = gen8_de_port_aux_mask(dev_priv);
3649
	u32 de_port_enables;
3650
	u32 de_misc_masked = GEN8_DE_EDP_PSR;
3651 3652
	u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
		BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
3653
	enum pipe pipe;
3654

3655 3656 3657
	if (INTEL_GEN(dev_priv) <= 10)
		de_misc_masked |= GEN8_DE_MISC_GSE;

3658 3659
	if (IS_GEN9_LP(dev_priv))
		de_port_masked |= BXT_DE_PORT_GMBUS;
R
Rodrigo Vivi 已提交
3660

3661 3662 3663 3664 3665 3666 3667
	if (INTEL_GEN(dev_priv) >= 11) {
		enum port port;

		if (intel_bios_is_dsi_present(dev_priv, &port))
			de_port_masked |= DSI0_TE | DSI1_TE;
	}

3668 3669 3670
	de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
					   GEN8_PIPE_FIFO_UNDERRUN;

3671 3672 3673
	if (INTEL_GEN(dev_priv) >= 9)
		de_pipe_enables |= GEN9_PIPE_PLANE1_FLIP_DONE;

3674
	de_port_enables = de_port_masked;
3675
	if (IS_GEN9_LP(dev_priv))
3676 3677
		de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
	else if (IS_BROADWELL(dev_priv))
3678
		de_port_enables |= BDW_DE_PORT_HOTPLUG_MASK;
3679

3680 3681 3682
	if (INTEL_GEN(dev_priv) >= 12) {
		enum transcoder trans;

3683
		for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694
			enum intel_display_power_domain domain;

			domain = POWER_DOMAIN_TRANSCODER(trans);
			if (!intel_display_power_is_enabled(dev_priv, domain))
				continue;

			gen3_assert_iir_is_zero(uncore, TRANS_PSR_IIR(trans));
		}
	} else {
		gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
	}
3695

M
Mika Kahola 已提交
3696 3697
	for_each_pipe(dev_priv, pipe) {
		dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
3698

3699
		if (intel_display_power_is_enabled(dev_priv,
3700
				POWER_DOMAIN_PIPE(pipe)))
3701
			GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
3702 3703
					  dev_priv->de_irq_mask[pipe],
					  de_pipe_enables);
M
Mika Kahola 已提交
3704
	}
3705

3706 3707
	GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
	GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
3708

3709 3710
	if (INTEL_GEN(dev_priv) >= 11) {
		u32 de_hpd_masked = 0;
3711 3712
		u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
				     GEN11_DE_TBT_HOTPLUG_MASK;
3713

3714 3715
		GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked,
			      de_hpd_enables);
3716
	}
3717 3718
}

3719
static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
3720
{
3721
	if (HAS_PCH_SPLIT(dev_priv))
3722
		ibx_irq_postinstall(dev_priv);
P
Paulo Zanoni 已提交
3723

3724
	gen8_gt_irq_postinstall(&dev_priv->gt);
3725 3726
	gen8_de_irq_postinstall(dev_priv);

3727
	gen8_master_intr_enable(dev_priv->uncore.regs);
3728 3729
}

3730
static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
3731
{
3732
	struct intel_uncore *uncore = &dev_priv->uncore;
3733 3734
	u32 mask = SDE_GMBUS_ICP;

3735
	GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff);
3736 3737
}

3738
static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
M
Mika Kuoppala 已提交
3739
{
3740
	struct intel_uncore *uncore = &dev_priv->uncore;
3741
	u32 gu_misc_masked = GEN11_GU_MISC_GSE;
M
Mika Kuoppala 已提交
3742

3743
	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3744
		icp_irq_postinstall(dev_priv);
3745

3746
	gen11_gt_irq_postinstall(&dev_priv->gt);
M
Mika Kuoppala 已提交
3747 3748
	gen8_de_irq_postinstall(dev_priv);

3749
	GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
3750

M
Mika Kuoppala 已提交
3751 3752
	I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);

3753 3754 3755 3756 3757 3758 3759
	if (HAS_MASTER_UNIT_IRQ(dev_priv)) {
		dg1_master_intr_enable(uncore->regs);
		POSTING_READ(DG1_MSTR_UNIT_INTR);
	} else {
		gen11_master_intr_enable(uncore->regs);
		POSTING_READ(GEN11_GFX_MSTR_IRQ);
	}
M
Mika Kuoppala 已提交
3760 3761
}

3762
static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
3763
{
3764
	gen8_gt_irq_postinstall(&dev_priv->gt);
3765

3766
	spin_lock_irq(&dev_priv->irq_lock);
3767 3768
	if (dev_priv->display_irqs_enabled)
		vlv_display_irq_postinstall(dev_priv);
3769 3770
	spin_unlock_irq(&dev_priv->irq_lock);

3771
	I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3772 3773 3774
	POSTING_READ(GEN8_MASTER_IRQ);
}

3775
static void i8xx_irq_reset(struct drm_i915_private *dev_priv)
L
Linus Torvalds 已提交
3776
{
3777
	struct intel_uncore *uncore = &dev_priv->uncore;
3778

3779 3780
	i9xx_pipestat_irq_reset(dev_priv);

3781
	GEN2_IRQ_RESET(uncore);
3782
	dev_priv->irq_mask = ~0u;
C
Chris Wilson 已提交
3783 3784
}

3785
static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv)
C
Chris Wilson 已提交
3786
{
3787
	struct intel_uncore *uncore = &dev_priv->uncore;
3788
	u16 enable_mask;
C
Chris Wilson 已提交
3789

3790 3791 3792 3793
	intel_uncore_write16(uncore,
			     EMR,
			     ~(I915_ERROR_PAGE_TABLE |
			       I915_ERROR_MEMORY_REFRESH));
C
Chris Wilson 已提交
3794 3795 3796 3797

	/* Unmask the interrupts that we always want on. */
	dev_priv->irq_mask =
		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3798 3799
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
		  I915_MASTER_ERROR_INTERRUPT);
C
Chris Wilson 已提交
3800

3801 3802 3803
	enable_mask =
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3804
		I915_MASTER_ERROR_INTERRUPT |
3805 3806
		I915_USER_INTERRUPT;

3807
	GEN2_IRQ_INIT(uncore, dev_priv->irq_mask, enable_mask);
C
Chris Wilson 已提交
3808

3809 3810
	/* Interrupt setup is already guaranteed to be single-threaded, this is
	 * just to make the assert_spin_locked check happy. */
3811
	spin_lock_irq(&dev_priv->irq_lock);
3812 3813
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3814
	spin_unlock_irq(&dev_priv->irq_lock);
C
Chris Wilson 已提交
3815 3816
}

3817
static void i8xx_error_irq_ack(struct drm_i915_private *i915,
3818 3819
			       u16 *eir, u16 *eir_stuck)
{
3820
	struct intel_uncore *uncore = &i915->uncore;
3821 3822
	u16 emr;

3823
	*eir = intel_uncore_read16(uncore, EIR);
3824 3825

	if (*eir)
3826
		intel_uncore_write16(uncore, EIR, *eir);
3827

3828
	*eir_stuck = intel_uncore_read16(uncore, EIR);
3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841
	if (*eir_stuck == 0)
		return;

	/*
	 * Toggle all EMR bits to make sure we get an edge
	 * in the ISR master error bit if we don't clear
	 * all the EIR bits. Otherwise the edge triggered
	 * IIR on i965/g4x wouldn't notice that an interrupt
	 * is still pending. Also some EIR bits can't be
	 * cleared except by handling the underlying error
	 * (or by a GPU reset) so we mask any bit that
	 * remains set.
	 */
3842 3843 3844
	emr = intel_uncore_read16(uncore, EMR);
	intel_uncore_write16(uncore, EMR, 0xffff);
	intel_uncore_write16(uncore, EMR, emr | *eir_stuck);
3845 3846 3847 3848 3849 3850 3851 3852
}

static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
				   u16 eir, u16 eir_stuck)
{
	DRM_DEBUG("Master Error: EIR 0x%04x\n", eir);

	if (eir_stuck)
3853 3854
		drm_dbg(&dev_priv->drm, "EIR stuck: 0x%04x, masked\n",
			eir_stuck);
3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890
}

static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
			       u32 *eir, u32 *eir_stuck)
{
	u32 emr;

	*eir = I915_READ(EIR);

	I915_WRITE(EIR, *eir);

	*eir_stuck = I915_READ(EIR);
	if (*eir_stuck == 0)
		return;

	/*
	 * Toggle all EMR bits to make sure we get an edge
	 * in the ISR master error bit if we don't clear
	 * all the EIR bits. Otherwise the edge triggered
	 * IIR on i965/g4x wouldn't notice that an interrupt
	 * is still pending. Also some EIR bits can't be
	 * cleared except by handling the underlying error
	 * (or by a GPU reset) so we mask any bit that
	 * remains set.
	 */
	emr = I915_READ(EMR);
	I915_WRITE(EMR, 0xffffffff);
	I915_WRITE(EMR, emr | *eir_stuck);
}

static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
				   u32 eir, u32 eir_stuck)
{
	DRM_DEBUG("Master Error, EIR 0x%08x\n", eir);

	if (eir_stuck)
3891 3892
		drm_dbg(&dev_priv->drm, "EIR stuck: 0x%08x, masked\n",
			eir_stuck);
3893 3894
}

3895
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
C
Chris Wilson 已提交
3896
{
3897
	struct drm_i915_private *dev_priv = arg;
3898
	irqreturn_t ret = IRQ_NONE;
C
Chris Wilson 已提交
3899

3900 3901 3902
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

3903
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
3904
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3905

3906
	do {
3907
		u32 pipe_stats[I915_MAX_PIPES] = {};
3908
		u16 eir = 0, eir_stuck = 0;
3909
		u16 iir;
3910

3911
		iir = intel_uncore_read16(&dev_priv->uncore, GEN2_IIR);
3912 3913 3914 3915
		if (iir == 0)
			break;

		ret = IRQ_HANDLED;
C
Chris Wilson 已提交
3916

3917 3918 3919
		/* Call regardless, as some status bits might not be
		 * signalled in iir */
		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
C
Chris Wilson 已提交
3920

3921 3922 3923
		if (iir & I915_MASTER_ERROR_INTERRUPT)
			i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);

3924
		intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir);
C
Chris Wilson 已提交
3925 3926

		if (iir & I915_USER_INTERRUPT)
3927
			intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]);
C
Chris Wilson 已提交
3928

3929 3930
		if (iir & I915_MASTER_ERROR_INTERRUPT)
			i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
C
Chris Wilson 已提交
3931

3932 3933
		i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
	} while (0);
3934

3935
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
C
Chris Wilson 已提交
3936

3937
	return ret;
C
Chris Wilson 已提交
3938 3939
}

3940
static void i915_irq_reset(struct drm_i915_private *dev_priv)
3941
{
3942
	struct intel_uncore *uncore = &dev_priv->uncore;
3943

3944
	if (I915_HAS_HOTPLUG(dev_priv)) {
3945
		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3946 3947 3948
		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
	}

3949 3950
	i9xx_pipestat_irq_reset(dev_priv);

3951
	GEN3_IRQ_RESET(uncore, GEN2_);
3952
	dev_priv->irq_mask = ~0u;
3953 3954
}

3955
static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
3956
{
3957
	struct intel_uncore *uncore = &dev_priv->uncore;
3958
	u32 enable_mask;
3959

3960 3961
	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE |
			  I915_ERROR_MEMORY_REFRESH));
3962 3963 3964 3965 3966

	/* Unmask the interrupts that we always want on. */
	dev_priv->irq_mask =
		~(I915_ASLE_INTERRUPT |
		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3967 3968
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
		  I915_MASTER_ERROR_INTERRUPT);
3969 3970 3971 3972 3973

	enable_mask =
		I915_ASLE_INTERRUPT |
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3974
		I915_MASTER_ERROR_INTERRUPT |
3975 3976
		I915_USER_INTERRUPT;

3977
	if (I915_HAS_HOTPLUG(dev_priv)) {
3978 3979 3980 3981 3982 3983
		/* Enable in IER... */
		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
		/* and unmask in IMR */
		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
	}

3984
	GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
3985

3986 3987
	/* Interrupt setup is already guaranteed to be single-threaded, this is
	 * just to make the assert_spin_locked check happy. */
3988
	spin_lock_irq(&dev_priv->irq_lock);
3989 3990
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3991
	spin_unlock_irq(&dev_priv->irq_lock);
3992

3993
	i915_enable_asle_pipestat(dev_priv);
3994 3995
}

3996
static irqreturn_t i915_irq_handler(int irq, void *arg)
3997
{
3998
	struct drm_i915_private *dev_priv = arg;
3999
	irqreturn_t ret = IRQ_NONE;
4000

4001 4002 4003
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

4004
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
4005
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4006

4007
	do {
4008
		u32 pipe_stats[I915_MAX_PIPES] = {};
4009
		u32 eir = 0, eir_stuck = 0;
4010 4011
		u32 hotplug_status = 0;
		u32 iir;
4012

4013
		iir = I915_READ(GEN2_IIR);
4014 4015 4016 4017 4018 4019 4020 4021
		if (iir == 0)
			break;

		ret = IRQ_HANDLED;

		if (I915_HAS_HOTPLUG(dev_priv) &&
		    iir & I915_DISPLAY_PORT_INTERRUPT)
			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4022

4023 4024 4025
		/* Call regardless, as some status bits might not be
		 * signalled in iir */
		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4026

4027 4028 4029
		if (iir & I915_MASTER_ERROR_INTERRUPT)
			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);

4030
		I915_WRITE(GEN2_IIR, iir);
4031 4032

		if (iir & I915_USER_INTERRUPT)
4033
			intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]);
4034

4035 4036
		if (iir & I915_MASTER_ERROR_INTERRUPT)
			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4037

4038 4039 4040 4041 4042
		if (hotplug_status)
			i9xx_hpd_irq_handler(dev_priv, hotplug_status);

		i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
	} while (0);
4043

4044
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4045

4046 4047 4048
	return ret;
}

4049
static void i965_irq_reset(struct drm_i915_private *dev_priv)
4050
{
4051
	struct intel_uncore *uncore = &dev_priv->uncore;
4052

4053
	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4054
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4055

4056 4057
	i9xx_pipestat_irq_reset(dev_priv);

4058
	GEN3_IRQ_RESET(uncore, GEN2_);
4059
	dev_priv->irq_mask = ~0u;
4060 4061
}

4062
static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
4063
{
4064
	struct intel_uncore *uncore = &dev_priv->uncore;
4065
	u32 enable_mask;
4066 4067
	u32 error_mask;

4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082
	/*
	 * Enable some error detection, note the instruction error mask
	 * bit is reserved, so we leave it masked.
	 */
	if (IS_G4X(dev_priv)) {
		error_mask = ~(GM45_ERROR_PAGE_TABLE |
			       GM45_ERROR_MEM_PRIV |
			       GM45_ERROR_CP_PRIV |
			       I915_ERROR_MEMORY_REFRESH);
	} else {
		error_mask = ~(I915_ERROR_PAGE_TABLE |
			       I915_ERROR_MEMORY_REFRESH);
	}
	I915_WRITE(EMR, error_mask);

4083
	/* Unmask the interrupts that we always want on. */
4084 4085 4086 4087 4088
	dev_priv->irq_mask =
		~(I915_ASLE_INTERRUPT |
		  I915_DISPLAY_PORT_INTERRUPT |
		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4089
		  I915_MASTER_ERROR_INTERRUPT);
4090

4091 4092 4093 4094 4095
	enable_mask =
		I915_ASLE_INTERRUPT |
		I915_DISPLAY_PORT_INTERRUPT |
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4096
		I915_MASTER_ERROR_INTERRUPT |
4097
		I915_USER_INTERRUPT;
4098

4099
	if (IS_G4X(dev_priv))
4100
		enable_mask |= I915_BSD_USER_INTERRUPT;
4101

4102
	GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
4103

4104 4105
	/* Interrupt setup is already guaranteed to be single-threaded, this is
	 * just to make the assert_spin_locked check happy. */
4106
	spin_lock_irq(&dev_priv->irq_lock);
4107 4108 4109
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4110
	spin_unlock_irq(&dev_priv->irq_lock);
4111

4112
	i915_enable_asle_pipestat(dev_priv);
4113 4114
}

4115
static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
4116 4117 4118
{
	u32 hotplug_en;

4119
	lockdep_assert_held(&dev_priv->irq_lock);
4120

4121 4122
	/* Note HDMI and DP share hotplug bits */
	/* enable bits are the same for all generations */
4123
	hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
4124 4125 4126 4127
	/* Programming the CRT detection parameters tends
	   to generate a spurious hotplug event about three
	   seconds later.  So just do it once.
	*/
4128
	if (IS_G4X(dev_priv))
4129 4130 4131 4132
		hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
	hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;

	/* Ignore TV since it's buggy */
4133
	i915_hotplug_interrupt_update_locked(dev_priv,
4134 4135 4136 4137
					     HOTPLUG_INT_EN_MASK |
					     CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
					     CRT_HOTPLUG_ACTIVATION_PERIOD_64,
					     hotplug_en);
4138 4139
}

4140
static irqreturn_t i965_irq_handler(int irq, void *arg)
4141
{
4142
	struct drm_i915_private *dev_priv = arg;
4143
	irqreturn_t ret = IRQ_NONE;
4144

4145 4146 4147
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

4148
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
4149
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4150

4151
	do {
4152
		u32 pipe_stats[I915_MAX_PIPES] = {};
4153
		u32 eir = 0, eir_stuck = 0;
4154 4155
		u32 hotplug_status = 0;
		u32 iir;
4156

4157
		iir = I915_READ(GEN2_IIR);
4158
		if (iir == 0)
4159 4160 4161 4162
			break;

		ret = IRQ_HANDLED;

4163 4164 4165 4166 4167 4168
		if (iir & I915_DISPLAY_PORT_INTERRUPT)
			hotplug_status = i9xx_hpd_irq_ack(dev_priv);

		/* Call regardless, as some status bits might not be
		 * signalled in iir */
		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4169

4170 4171 4172
		if (iir & I915_MASTER_ERROR_INTERRUPT)
			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);

4173
		I915_WRITE(GEN2_IIR, iir);
4174 4175

		if (iir & I915_USER_INTERRUPT)
4176
			intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]);
4177

4178
		if (iir & I915_BSD_USER_INTERRUPT)
4179
			intel_engine_signal_breadcrumbs(dev_priv->gt.engine[VCS0]);
4180

4181 4182
		if (iir & I915_MASTER_ERROR_INTERRUPT)
			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4183

4184 4185 4186 4187 4188
		if (hotplug_status)
			i9xx_hpd_irq_handler(dev_priv, hotplug_status);

		i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
	} while (0);
4189

4190
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4191

4192 4193 4194
	return ret;
}

4195 4196 4197 4198 4199 4200 4201
/**
 * intel_irq_init - initializes irq support
 * @dev_priv: i915 device instance
 *
 * This function initializes all the irq support including work items, timers
 * and all the vtables. It does not setup the interrupt itself though.
 */
4202
void intel_irq_init(struct drm_i915_private *dev_priv)
4203
{
4204
	struct drm_device *dev = &dev_priv->drm;
4205
	int i;
4206

4207
	INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work);
4208 4209
	for (i = 0; i < MAX_L3_SLICES; ++i)
		dev_priv->l3_parity.remap_info[i] = NULL;
4210

4211
	/* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */
4212
	if (HAS_GT_UC(dev_priv) && INTEL_GEN(dev_priv) < 11)
4213
		dev_priv->gt.pm_guc_events = GUC_INTR_GUC2HOST << 16;
4214

4215 4216 4217
	if (!HAS_DISPLAY(dev_priv))
		return;

4218 4219 4220 4221
	intel_hpd_init_pins(dev_priv);

	intel_hpd_init_work(dev_priv);

4222
	dev->vblank_disable_immediate = true;
4223

4224 4225 4226 4227 4228 4229 4230 4231 4232 4233
	/* Most platforms treat the display irq block as an always-on
	 * power domain. vlv/chv can disable it at runtime and need
	 * special care to avoid writing any of the display block registers
	 * outside of the power domain. We defer setting up the display irqs
	 * in this case to the runtime pm.
	 */
	dev_priv->display_irqs_enabled = true;
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
		dev_priv->display_irqs_enabled = false;

L
Lyude 已提交
4234
	dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4235 4236 4237 4238 4239 4240 4241
	/* If we have MST support, we want to avoid doing short HPD IRQ storm
	 * detection, as short HPD storms will occur as a natural part of
	 * sideband messaging with MST.
	 * On older platforms however, IRQ storms can occur with both long and
	 * short pulses, as seen on some G4x systems.
	 */
	dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
L
Lyude 已提交
4242

4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254
	if (HAS_PCH_DG1(dev_priv))
		dev_priv->display.hpd_irq_setup = dg1_hpd_irq_setup;
	else if (INTEL_GEN(dev_priv) >= 11)
		dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
	else if (IS_GEN9_LP(dev_priv))
		dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
	else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
		dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
	else if (HAS_GMCH(dev_priv) && I915_HAS_HOTPLUG(dev_priv))
		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
	else
		dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4255
}
4256

4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270
/**
 * intel_irq_fini - deinitializes IRQ support
 * @i915: i915 device instance
 *
 * This function deinitializes all the IRQ support.
 */
void intel_irq_fini(struct drm_i915_private *i915)
{
	int i;

	for (i = 0; i < MAX_L3_SLICES; ++i)
		kfree(i915->l3_parity.remap_info[i]);
}

4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284
static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
{
	if (HAS_GMCH(dev_priv)) {
		if (IS_CHERRYVIEW(dev_priv))
			return cherryview_irq_handler;
		else if (IS_VALLEYVIEW(dev_priv))
			return valleyview_irq_handler;
		else if (IS_GEN(dev_priv, 4))
			return i965_irq_handler;
		else if (IS_GEN(dev_priv, 3))
			return i915_irq_handler;
		else
			return i8xx_irq_handler;
	} else {
4285 4286
		if (HAS_MASTER_UNIT_IRQ(dev_priv))
			return dg1_irq_handler;
4287 4288 4289 4290 4291
		if (INTEL_GEN(dev_priv) >= 11)
			return gen11_irq_handler;
		else if (INTEL_GEN(dev_priv) >= 8)
			return gen8_irq_handler;
		else
4292
			return ilk_irq_handler;
4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314
	}
}

static void intel_irq_reset(struct drm_i915_private *dev_priv)
{
	if (HAS_GMCH(dev_priv)) {
		if (IS_CHERRYVIEW(dev_priv))
			cherryview_irq_reset(dev_priv);
		else if (IS_VALLEYVIEW(dev_priv))
			valleyview_irq_reset(dev_priv);
		else if (IS_GEN(dev_priv, 4))
			i965_irq_reset(dev_priv);
		else if (IS_GEN(dev_priv, 3))
			i915_irq_reset(dev_priv);
		else
			i8xx_irq_reset(dev_priv);
	} else {
		if (INTEL_GEN(dev_priv) >= 11)
			gen11_irq_reset(dev_priv);
		else if (INTEL_GEN(dev_priv) >= 8)
			gen8_irq_reset(dev_priv);
		else
4315
			ilk_irq_reset(dev_priv);
4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337
	}
}

static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
{
	if (HAS_GMCH(dev_priv)) {
		if (IS_CHERRYVIEW(dev_priv))
			cherryview_irq_postinstall(dev_priv);
		else if (IS_VALLEYVIEW(dev_priv))
			valleyview_irq_postinstall(dev_priv);
		else if (IS_GEN(dev_priv, 4))
			i965_irq_postinstall(dev_priv);
		else if (IS_GEN(dev_priv, 3))
			i915_irq_postinstall(dev_priv);
		else
			i8xx_irq_postinstall(dev_priv);
	} else {
		if (INTEL_GEN(dev_priv) >= 11)
			gen11_irq_postinstall(dev_priv);
		else if (INTEL_GEN(dev_priv) >= 8)
			gen8_irq_postinstall(dev_priv);
		else
4338
			ilk_irq_postinstall(dev_priv);
4339 4340 4341
	}
}

4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352
/**
 * intel_irq_install - enables the hardware interrupt
 * @dev_priv: i915 device instance
 *
 * This function enables the hardware interrupt handling, but leaves the hotplug
 * handling still disabled. It is called after intel_irq_init().
 *
 * In the driver load and resume code we need working interrupts in a few places
 * but don't want to deal with the hassle of concurrent probe and hotplug
 * workers. Hence the split into this two-stage approach.
 */
4353 4354
int intel_irq_install(struct drm_i915_private *dev_priv)
{
4355 4356 4357
	int irq = dev_priv->drm.pdev->irq;
	int ret;

4358 4359 4360 4361 4362
	/*
	 * We enable some interrupt sources in our postinstall hooks, so mark
	 * interrupts as enabled _before_ actually enabling them to avoid
	 * special cases in our ordering checks.
	 */
4363
	dev_priv->runtime_pm.irqs_enabled = true;
4364

4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378
	dev_priv->drm.irq_enabled = true;

	intel_irq_reset(dev_priv);

	ret = request_irq(irq, intel_irq_handler(dev_priv),
			  IRQF_SHARED, DRIVER_NAME, dev_priv);
	if (ret < 0) {
		dev_priv->drm.irq_enabled = false;
		return ret;
	}

	intel_irq_postinstall(dev_priv);

	return ret;
4379 4380
}

4381 4382 4383 4384 4385 4386 4387
/**
 * intel_irq_uninstall - finilizes all irq handling
 * @dev_priv: i915 device instance
 *
 * This stops interrupt and hotplug handling and unregisters and frees all
 * resources acquired in the init functions.
 */
4388 4389
void intel_irq_uninstall(struct drm_i915_private *dev_priv)
{
4390 4391 4392
	int irq = dev_priv->drm.pdev->irq;

	/*
4393 4394 4395 4396
	 * FIXME we can get called twice during driver probe
	 * error handling as well as during driver remove due to
	 * intel_modeset_driver_remove() calling us out of sequence.
	 * Would be nice if it didn't do that...
4397 4398 4399 4400 4401 4402 4403 4404 4405 4406
	 */
	if (!dev_priv->drm.irq_enabled)
		return;

	dev_priv->drm.irq_enabled = false;

	intel_irq_reset(dev_priv);

	free_irq(irq, dev_priv);

4407
	intel_hpd_cancel_work(dev_priv);
4408
	dev_priv->runtime_pm.irqs_enabled = false;
4409 4410
}

4411 4412 4413 4414 4415 4416 4417
/**
 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
 * @dev_priv: i915 device instance
 *
 * This function is used to disable interrupts at runtime, both in the runtime
 * pm and the system suspend/resume code.
 */
4418
void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4419
{
4420
	intel_irq_reset(dev_priv);
4421
	dev_priv->runtime_pm.irqs_enabled = false;
4422
	intel_synchronize_irq(dev_priv);
4423 4424
}

4425 4426 4427 4428 4429 4430 4431
/**
 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
 * @dev_priv: i915 device instance
 *
 * This function is used to enable interrupts at runtime, both in the runtime
 * pm and the system suspend/resume code.
 */
4432
void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4433
{
4434
	dev_priv->runtime_pm.irqs_enabled = true;
4435 4436
	intel_irq_reset(dev_priv);
	intel_irq_postinstall(dev_priv);
4437
}
4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451

bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
{
	/*
	 * We only use drm_irq_uninstall() at unload and VT switch, so
	 * this is the only thing we need to check.
	 */
	return dev_priv->runtime_pm.irqs_enabled;
}

void intel_synchronize_irq(struct drm_i915_private *i915)
{
	synchronize_irq(i915->drm.pdev->irq);
}