i915_irq.c 126.6 KB
Newer Older
D
Dave Airlie 已提交
1
/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
L
Linus Torvalds 已提交
2
 */
D
Dave Airlie 已提交
3
/*
L
Linus Torvalds 已提交
4 5
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
 * All Rights Reserved.
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
D
Dave Airlie 已提交
27
 */
L
Linus Torvalds 已提交
28

29 30
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

31
#include <linux/circ_buf.h>
32 33 34
#include <linux/slab.h>
#include <linux/sysrq.h>

35
#include <drm/drm_drv.h>
36 37
#include <drm/drm_irq.h>

38
#include "display/intel_display_types.h"
39 40 41 42 43
#include "display/intel_fifo_underrun.h"
#include "display/intel_hotplug.h"
#include "display/intel_lpe_audio.h"
#include "display/intel_psr.h"

44
#include "gt/intel_breadcrumbs.h"
45
#include "gt/intel_gt.h"
46
#include "gt/intel_gt_irq.h"
47
#include "gt/intel_gt_pm_irq.h"
48
#include "gt/intel_rps.h"
49

L
Linus Torvalds 已提交
50
#include "i915_drv.h"
51
#include "i915_irq.h"
C
Chris Wilson 已提交
52
#include "i915_trace.h"
53
#include "intel_pm.h"
L
Linus Torvalds 已提交
54

55 56 57 58 59 60 61 62
/**
 * DOC: interrupt handling
 *
 * These functions provide the basic support for enabling and disabling the
 * interrupt handling support. There's a lot more functionality in i915_irq.c
 * and related files, but that will be described in separate chapters.
 */

63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80
/*
 * Interrupt statistic for PMU. Increments the counter only if the
 * interrupt originated from the the GPU so interrupts from a device which
 * shares the interrupt line are not accounted.
 */
static inline void pmu_irq_stats(struct drm_i915_private *i915,
				 irqreturn_t res)
{
	if (unlikely(res != IRQ_HANDLED))
		return;

	/*
	 * A clever compiler translates that into INC. A not so clever one
	 * should at least prevent store tearing.
	 */
	WRITE_ONCE(i915->pmu.irq_count, i915->pmu.irq_count + 1);
}

81
typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val);
82 83
typedef u32 (*hotplug_enables_func)(struct drm_i915_private *i915,
				    enum hpd_pin pin);
84

85 86 87 88
static const u32 hpd_ilk[HPD_NUM_PINS] = {
	[HPD_PORT_A] = DE_DP_A_HOTPLUG,
};

89 90 91 92
static const u32 hpd_ivb[HPD_NUM_PINS] = {
	[HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
};

93
static const u32 hpd_bdw[HPD_NUM_PINS] = {
94
	[HPD_PORT_A] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_A),
95 96
};

97
static const u32 hpd_ibx[HPD_NUM_PINS] = {
98 99 100 101
	[HPD_CRT] = SDE_CRT_HOTPLUG,
	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
102
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG,
103 104
};

105
static const u32 hpd_cpt[HPD_NUM_PINS] = {
106
	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
107
	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
108 109
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
110
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
111 112
};

X
Xiong Zhang 已提交
113
static const u32 hpd_spt[HPD_NUM_PINS] = {
114
	[HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
X
Xiong Zhang 已提交
115 116 117
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
118
	[HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT,
X
Xiong Zhang 已提交
119 120
};

121
static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
122 123 124 125 126
	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
127
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN,
128 129
};

130
static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
131 132 133 134 135
	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
136
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
137 138
};

139
static const u32 hpd_status_i915[HPD_NUM_PINS] = {
140 141 142 143 144
	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
145
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
146 147
};

148
static const u32 hpd_bxt[HPD_NUM_PINS] = {
149 150 151
	[HPD_PORT_A] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_A),
	[HPD_PORT_B] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_B),
	[HPD_PORT_C] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_C),
152 153
};

154
static const u32 hpd_gen11[HPD_NUM_PINS] = {
155 156 157 158 159 160
	[HPD_PORT_TC1] = GEN11_TC_HOTPLUG(HPD_PORT_TC1) | GEN11_TBT_HOTPLUG(HPD_PORT_TC1),
	[HPD_PORT_TC2] = GEN11_TC_HOTPLUG(HPD_PORT_TC2) | GEN11_TBT_HOTPLUG(HPD_PORT_TC2),
	[HPD_PORT_TC3] = GEN11_TC_HOTPLUG(HPD_PORT_TC3) | GEN11_TBT_HOTPLUG(HPD_PORT_TC3),
	[HPD_PORT_TC4] = GEN11_TC_HOTPLUG(HPD_PORT_TC4) | GEN11_TBT_HOTPLUG(HPD_PORT_TC4),
	[HPD_PORT_TC5] = GEN11_TC_HOTPLUG(HPD_PORT_TC5) | GEN11_TBT_HOTPLUG(HPD_PORT_TC5),
	[HPD_PORT_TC6] = GEN11_TC_HOTPLUG(HPD_PORT_TC6) | GEN11_TBT_HOTPLUG(HPD_PORT_TC6),
161 162
};

163
static const u32 hpd_icp[HPD_NUM_PINS] = {
164 165 166
	[HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A),
	[HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B),
	[HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C),
167 168 169 170 171 172
	[HPD_PORT_TC1] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC1),
	[HPD_PORT_TC2] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC2),
	[HPD_PORT_TC3] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC3),
	[HPD_PORT_TC4] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC4),
	[HPD_PORT_TC5] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC5),
	[HPD_PORT_TC6] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC6),
173 174
};

175
static const u32 hpd_sde_dg1[HPD_NUM_PINS] = {
176 177 178 179
	[HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A),
	[HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B),
	[HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C),
	[HPD_PORT_D] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_D),
180 181
};

182 183 184 185 186 187 188 189 190 191 192 193 194
static void intel_hpd_init_pins(struct drm_i915_private *dev_priv)
{
	struct i915_hotplug *hpd = &dev_priv->hotplug;

	if (HAS_GMCH(dev_priv)) {
		if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
		    IS_CHERRYVIEW(dev_priv))
			hpd->hpd = hpd_status_g4x;
		else
			hpd->hpd = hpd_status_i915;
		return;
	}

195
	if (INTEL_GEN(dev_priv) >= 11)
196 197 198 199 200 201 202 203 204 205
		hpd->hpd = hpd_gen11;
	else if (IS_GEN9_LP(dev_priv))
		hpd->hpd = hpd_bxt;
	else if (INTEL_GEN(dev_priv) >= 8)
		hpd->hpd = hpd_bdw;
	else if (INTEL_GEN(dev_priv) >= 7)
		hpd->hpd = hpd_ivb;
	else
		hpd->hpd = hpd_ilk;

206 207
	if ((INTEL_PCH_TYPE(dev_priv) < PCH_DG1) &&
	    (!HAS_PCH_SPLIT(dev_priv) || HAS_PCH_NOP(dev_priv)))
208 209
		return;

210 211
	if (HAS_PCH_DG1(dev_priv))
		hpd->pch_hpd = hpd_sde_dg1;
212
	else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
213 214 215 216 217 218 219 220 221 222 223
		hpd->pch_hpd = hpd_icp;
	else if (HAS_PCH_CNP(dev_priv) || HAS_PCH_SPT(dev_priv))
		hpd->pch_hpd = hpd_spt;
	else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_CPT(dev_priv))
		hpd->pch_hpd = hpd_cpt;
	else if (HAS_PCH_IBX(dev_priv))
		hpd->pch_hpd = hpd_ibx;
	else
		MISSING_CASE(INTEL_PCH_TYPE(dev_priv));
}

224 225 226 227 228 229 230 231
static void
intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
{
	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);

	drm_crtc_handle_vblank(&crtc->base);
}

232 233
void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
		    i915_reg_t iir, i915_reg_t ier)
234
{
235 236
	intel_uncore_write(uncore, imr, 0xffffffff);
	intel_uncore_posting_read(uncore, imr);
237

238
	intel_uncore_write(uncore, ier, 0);
239 240

	/* IIR can theoretically queue up two events. Be paranoid. */
241 242 243 244
	intel_uncore_write(uncore, iir, 0xffffffff);
	intel_uncore_posting_read(uncore, iir);
	intel_uncore_write(uncore, iir, 0xffffffff);
	intel_uncore_posting_read(uncore, iir);
245 246
}

247
void gen2_irq_reset(struct intel_uncore *uncore)
248
{
249 250
	intel_uncore_write16(uncore, GEN2_IMR, 0xffff);
	intel_uncore_posting_read16(uncore, GEN2_IMR);
251

252
	intel_uncore_write16(uncore, GEN2_IER, 0);
253 254

	/* IIR can theoretically queue up two events. Be paranoid. */
255 256 257 258
	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
	intel_uncore_posting_read16(uncore, GEN2_IIR);
	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
	intel_uncore_posting_read16(uncore, GEN2_IIR);
259 260
}

261 262 263
/*
 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
 */
264
static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
265
{
266
	u32 val = intel_uncore_read(uncore, reg);
267 268 269 270

	if (val == 0)
		return;

271 272 273
	drm_WARN(&uncore->i915->drm, 1,
		 "Interrupt register 0x%x is not zero: 0x%08x\n",
		 i915_mmio_reg_offset(reg), val);
274 275 276 277
	intel_uncore_write(uncore, reg, 0xffffffff);
	intel_uncore_posting_read(uncore, reg);
	intel_uncore_write(uncore, reg, 0xffffffff);
	intel_uncore_posting_read(uncore, reg);
278
}
279

280
static void gen2_assert_iir_is_zero(struct intel_uncore *uncore)
281
{
282
	u16 val = intel_uncore_read16(uncore, GEN2_IIR);
283 284 285 286

	if (val == 0)
		return;

287 288 289
	drm_WARN(&uncore->i915->drm, 1,
		 "Interrupt register 0x%x is not zero: 0x%08x\n",
		 i915_mmio_reg_offset(GEN2_IIR), val);
290 291 292 293
	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
	intel_uncore_posting_read16(uncore, GEN2_IIR);
	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
	intel_uncore_posting_read16(uncore, GEN2_IIR);
294 295
}

296 297 298 299
void gen3_irq_init(struct intel_uncore *uncore,
		   i915_reg_t imr, u32 imr_val,
		   i915_reg_t ier, u32 ier_val,
		   i915_reg_t iir)
300
{
301
	gen3_assert_iir_is_zero(uncore, iir);
302

303 304 305
	intel_uncore_write(uncore, ier, ier_val);
	intel_uncore_write(uncore, imr, imr_val);
	intel_uncore_posting_read(uncore, imr);
306 307
}

308 309
void gen2_irq_init(struct intel_uncore *uncore,
		   u32 imr_val, u32 ier_val)
310
{
311
	gen2_assert_iir_is_zero(uncore);
312

313 314 315
	intel_uncore_write16(uncore, GEN2_IER, ier_val);
	intel_uncore_write16(uncore, GEN2_IMR, imr_val);
	intel_uncore_posting_read16(uncore, GEN2_IMR);
316 317
}

318 319 320
/* For display hotplug interrupt */
static inline void
i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
321 322
				     u32 mask,
				     u32 bits)
323
{
324
	u32 val;
325

326
	lockdep_assert_held(&dev_priv->irq_lock);
327
	drm_WARN_ON(&dev_priv->drm, bits & ~mask);
328

329
	val = intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_EN);
330 331
	val &= ~mask;
	val |= bits;
332
	intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_EN, val);
333 334 335 336 337 338 339 340 341 342 343 344 345 346 347
}

/**
 * i915_hotplug_interrupt_update - update hotplug interrupt enable
 * @dev_priv: driver private
 * @mask: bits to update
 * @bits: bits to enable
 * NOTE: the HPD enable bits are modified both inside and outside
 * of an interrupt context. To avoid that read-modify-write cycles
 * interfer, these bits are protected by a spinlock. Since this
 * function is usually not called from a context where the lock is
 * held already, this function acquires the lock itself. A non-locking
 * version is also available.
 */
void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
348 349
				   u32 mask,
				   u32 bits)
350 351 352 353 354 355
{
	spin_lock_irq(&dev_priv->irq_lock);
	i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
	spin_unlock_irq(&dev_priv->irq_lock);
}

356 357 358 359 360 361
/**
 * ilk_update_display_irq - update DEIMR
 * @dev_priv: driver private
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
362
void ilk_update_display_irq(struct drm_i915_private *dev_priv,
363 364
			    u32 interrupt_mask,
			    u32 enabled_irq_mask)
365
{
366
	u32 new_val;
367

368
	lockdep_assert_held(&dev_priv->irq_lock);
369
	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
370 371 372 373 374

	new_val = dev_priv->irq_mask;
	new_val &= ~interrupt_mask;
	new_val |= (~enabled_irq_mask & interrupt_mask);

375 376
	if (new_val != dev_priv->irq_mask &&
	    !drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) {
377
		dev_priv->irq_mask = new_val;
378 379
		intel_uncore_write(&dev_priv->uncore, DEIMR, dev_priv->irq_mask);
		intel_uncore_posting_read(&dev_priv->uncore, DEIMR);
380 381 382
	}
}

383
/**
384 385 386 387 388
 * bdw_update_port_irq - update DE port interrupt
 * @dev_priv: driver private
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
389
static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
390 391
				u32 interrupt_mask,
				u32 enabled_irq_mask)
392
{
393 394
	u32 new_val;
	u32 old_val;
395

396
	lockdep_assert_held(&dev_priv->irq_lock);
397

398
	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
399

400
	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
401 402
		return;

403
	old_val = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IMR);
404 405 406 407 408 409

	new_val = old_val;
	new_val &= ~interrupt_mask;
	new_val |= (~enabled_irq_mask & interrupt_mask);

	if (new_val != old_val) {
410 411
		intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IMR, new_val);
		intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PORT_IMR);
412 413 414
	}
}

415 416 417 418 419 420 421 422 423
/**
 * bdw_update_pipe_irq - update DE pipe interrupt
 * @dev_priv: driver private
 * @pipe: pipe whose interrupt to update
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
			 enum pipe pipe,
424 425
			 u32 interrupt_mask,
			 u32 enabled_irq_mask)
426
{
427
	u32 new_val;
428

429
	lockdep_assert_held(&dev_priv->irq_lock);
430

431
	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
432

433
	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
434 435 436 437 438 439 440 441
		return;

	new_val = dev_priv->de_irq_mask[pipe];
	new_val &= ~interrupt_mask;
	new_val |= (~enabled_irq_mask & interrupt_mask);

	if (new_val != dev_priv->de_irq_mask[pipe]) {
		dev_priv->de_irq_mask[pipe] = new_val;
442 443
		intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
		intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe));
444 445 446
	}
}

447 448 449 450 451 452
/**
 * ibx_display_interrupt_update - update SDEIMR
 * @dev_priv: driver private
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
453
void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
454 455
				  u32 interrupt_mask,
				  u32 enabled_irq_mask)
456
{
457
	u32 sdeimr = intel_uncore_read(&dev_priv->uncore, SDEIMR);
458 459 460
	sdeimr &= ~interrupt_mask;
	sdeimr |= (~enabled_irq_mask & interrupt_mask);

461
	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
462

463
	lockdep_assert_held(&dev_priv->irq_lock);
464

465
	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
466 467
		return;

468 469
	intel_uncore_write(&dev_priv->uncore, SDEIMR, sdeimr);
	intel_uncore_posting_read(&dev_priv->uncore, SDEIMR);
470
}
471

472 473
u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
			      enum pipe pipe)
474
{
475 476
	u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
	u32 enable_mask = status_mask << 16;
477

478
	lockdep_assert_held(&dev_priv->irq_lock);
479

480 481
	if (INTEL_GEN(dev_priv) < 5)
		goto out;
482 483

	/*
484 485
	 * On pipe A we don't support the PSR interrupt yet,
	 * on pipe B and C the same bit MBZ.
486
	 */
487 488
	if (drm_WARN_ON_ONCE(&dev_priv->drm,
			     status_mask & PIPE_A_PSR_STATUS_VLV))
489
		return 0;
490 491 492 493
	/*
	 * On pipe B and C we don't support the PSR interrupt yet, on pipe
	 * A the same bit is for perf counters which we don't use either.
	 */
494 495
	if (drm_WARN_ON_ONCE(&dev_priv->drm,
			     status_mask & PIPE_B_PSR_STATUS_VLV))
496
		return 0;
497 498 499 500 501 502 503 504 505

	enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
			 SPRITE0_FLIP_DONE_INT_EN_VLV |
			 SPRITE1_FLIP_DONE_INT_EN_VLV);
	if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
		enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
	if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
		enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;

506
out:
507 508 509 510 511
	drm_WARN_ONCE(&dev_priv->drm,
		      enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
		      pipe_name(pipe), enable_mask, status_mask);
512

513 514 515
	return enable_mask;
}

516 517
void i915_enable_pipestat(struct drm_i915_private *dev_priv,
			  enum pipe pipe, u32 status_mask)
518
{
519
	i915_reg_t reg = PIPESTAT(pipe);
520 521
	u32 enable_mask;

522 523 524
	drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
		      "pipe %c: status_mask=0x%x\n",
		      pipe_name(pipe), status_mask);
525 526

	lockdep_assert_held(&dev_priv->irq_lock);
527
	drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
528 529 530 531 532 533 534

	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
		return;

	dev_priv->pipestat_irq_mask[pipe] |= status_mask;
	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);

535 536
	intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
	intel_uncore_posting_read(&dev_priv->uncore, reg);
537 538
}

539 540
void i915_disable_pipestat(struct drm_i915_private *dev_priv,
			   enum pipe pipe, u32 status_mask)
541
{
542
	i915_reg_t reg = PIPESTAT(pipe);
543 544
	u32 enable_mask;

545 546 547
	drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
		      "pipe %c: status_mask=0x%x\n",
		      pipe_name(pipe), status_mask);
548 549

	lockdep_assert_held(&dev_priv->irq_lock);
550
	drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
551 552 553 554 555 556 557

	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
		return;

	dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);

558 559
	intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
	intel_uncore_posting_read(&dev_priv->uncore, reg);
560 561
}

562 563 564 565 566 567 568 569
static bool i915_has_asle(struct drm_i915_private *dev_priv)
{
	if (!dev_priv->opregion.asle)
		return false;

	return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
}

570
/**
571
 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
572
 * @dev_priv: i915 device private
573
 */
574
static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
575
{
576
	if (!i915_has_asle(dev_priv))
577 578
		return;

579
	spin_lock_irq(&dev_priv->irq_lock);
580

581
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
582
	if (INTEL_GEN(dev_priv) >= 4)
583
		i915_enable_pipestat(dev_priv, PIPE_A,
584
				     PIPE_LEGACY_BLC_EVENT_STATUS);
585

586
	spin_unlock_irq(&dev_priv->irq_lock);
587 588
}

589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638
/*
 * This timing diagram depicts the video signal in and
 * around the vertical blanking period.
 *
 * Assumptions about the fictitious mode used in this example:
 *  vblank_start >= 3
 *  vsync_start = vblank_start + 1
 *  vsync_end = vblank_start + 2
 *  vtotal = vblank_start + 3
 *
 *           start of vblank:
 *           latch double buffered registers
 *           increment frame counter (ctg+)
 *           generate start of vblank interrupt (gen4+)
 *           |
 *           |          frame start:
 *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
 *           |          may be shifted forward 1-3 extra lines via PIPECONF
 *           |          |
 *           |          |  start of vsync:
 *           |          |  generate vsync interrupt
 *           |          |  |
 * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
 *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
 * ----va---> <-----------------vb--------------------> <--------va-------------
 *       |          |       <----vs----->                     |
 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
 *       |          |                                         |
 *       last visible pixel                                   first visible pixel
 *                  |                                         increment frame counter (gen3/4)
 *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
 *
 * x  = horizontal active
 * _  = horizontal blanking
 * hs = horizontal sync
 * va = vertical active
 * vb = vertical blanking
 * vs = vertical sync
 * vbs = vblank_start (number)
 *
 * Summary:
 * - most events happen at the start of horizontal sync
 * - frame start happens at the start of horizontal blank, 1-4 lines
 *   (depending on PIPECONF settings) after the start of vblank
 * - gen3/4 pixel and frame counter are synchronized with the start
 *   of horizontal active on the first line of vertical active
 */

639 640 641
/* Called from drm generic code, passed a 'crtc', which
 * we use as a pipe index
 */
642
u32 i915_get_vblank_counter(struct drm_crtc *crtc)
643
{
644 645
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
646
	const struct drm_display_mode *mode = &vblank->hwmode;
647
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
648
	i915_reg_t high_frame, low_frame;
649
	u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
650
	unsigned long irqflags;
651

652 653 654 655 656 657 658 659 660 661 662 663 664 665
	/*
	 * On i965gm TV output the frame counter only works up to
	 * the point when we enable the TV encoder. After that the
	 * frame counter ceases to work and reads zero. We need a
	 * vblank wait before enabling the TV encoder and so we
	 * have to enable vblank interrupts while the frame counter
	 * is still in a working state. However the core vblank code
	 * does not like us returning non-zero frame counter values
	 * when we've told it that we don't have a working frame
	 * counter. Thus we must stop non-zero values leaking out.
	 */
	if (!vblank->max_vblank_count)
		return 0;

666 667 668 669 670
	htotal = mode->crtc_htotal;
	hsync_start = mode->crtc_hsync_start;
	vbl_start = mode->crtc_vblank_start;
	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
		vbl_start = DIV_ROUND_UP(vbl_start, 2);
671

672 673 674 675 676 677
	/* Convert to pixel count */
	vbl_start *= htotal;

	/* Start of vblank event occurs at start of hsync */
	vbl_start -= htotal - hsync_start;

678 679
	high_frame = PIPEFRAME(pipe);
	low_frame = PIPEFRAMEPIXEL(pipe);
680

681 682
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);

683 684 685 686 687 688
	/*
	 * High & low register fields aren't synchronized, so make sure
	 * we get a low value that's stable across two reads of the high
	 * register.
	 */
	do {
689 690 691
		high1 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK;
		low   = intel_de_read_fw(dev_priv, low_frame);
		high2 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK;
692 693
	} while (high1 != high2);

694 695
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);

696
	high1 >>= PIPE_FRAME_HIGH_SHIFT;
697
	pixel = low & PIPE_PIXEL_MASK;
698
	low >>= PIPE_FRAME_LOW_SHIFT;
699 700 701 702 703 704

	/*
	 * The frame counter increments at beginning of active.
	 * Cook up a vblank counter by also checking the pixel
	 * counter against vblank start.
	 */
705
	return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
706 707
}

708
u32 g4x_get_vblank_counter(struct drm_crtc *crtc)
709
{
710
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
711
	struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
712
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
713

714 715 716
	if (!vblank->max_vblank_count)
		return 0;

717
	return intel_uncore_read(&dev_priv->uncore, PIPE_FRMCOUNT_G4X(pipe));
718 719
}

720
static u32 intel_crtc_scanlines_since_frame_timestamp(struct intel_crtc *crtc)
721 722 723 724 725 726 727
{
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
	struct drm_vblank_crtc *vblank =
		&crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
	const struct drm_display_mode *mode = &vblank->hwmode;
	u32 htotal = mode->crtc_htotal;
	u32 clock = mode->crtc_clock;
728
	u32 scan_prev_time, scan_curr_time, scan_post_time;
729 730 731 732 733 734 735 736 737 738 739 740 741

	/*
	 * To avoid the race condition where we might cross into the
	 * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR
	 * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR
	 * during the same frame.
	 */
	do {
		/*
		 * This field provides read back of the display
		 * pipe frame time stamp. The time stamp value
		 * is sampled at every start of vertical blank.
		 */
742 743
		scan_prev_time = intel_de_read_fw(dev_priv,
						  PIPE_FRMTMSTMP(crtc->pipe));
744 745 746 747 748

		/*
		 * The TIMESTAMP_CTR register has the current
		 * time stamp value.
		 */
749
		scan_curr_time = intel_de_read_fw(dev_priv, IVB_TIMESTAMP_CTR);
750

751 752
		scan_post_time = intel_de_read_fw(dev_priv,
						  PIPE_FRMTMSTMP(crtc->pipe));
753 754
	} while (scan_post_time != scan_prev_time);

755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776
	return div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
				   clock), 1000 * htotal);
}

/*
 * On certain encoders on certain platforms, pipe
 * scanline register will not work to get the scanline,
 * since the timings are driven from the PORT or issues
 * with scanline register updates.
 * This function will use Framestamp and current
 * timestamp registers to calculate the scanline.
 */
static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
{
	struct drm_vblank_crtc *vblank =
		&crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
	const struct drm_display_mode *mode = &vblank->hwmode;
	u32 vblank_start = mode->crtc_vblank_start;
	u32 vtotal = mode->crtc_vtotal;
	u32 scanline;

	scanline = intel_crtc_scanlines_since_frame_timestamp(crtc);
777 778 779 780 781 782
	scanline = min(scanline, vtotal - 1);
	scanline = (scanline + vblank_start) % vtotal;

	return scanline;
}

783 784 785 786
/*
 * intel_de_read_fw(), only for fast reads of display block, no need for
 * forcewake etc.
 */
787 788 789
static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
{
	struct drm_device *dev = crtc->base.dev;
790
	struct drm_i915_private *dev_priv = to_i915(dev);
791 792
	const struct drm_display_mode *mode;
	struct drm_vblank_crtc *vblank;
793
	enum pipe pipe = crtc->pipe;
794
	int position, vtotal;
795

796 797 798
	if (!crtc->active)
		return -1;

799 800 801
	vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
	mode = &vblank->hwmode;

802
	if (crtc->mode_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP)
803 804
		return __intel_get_crtc_scanline_from_timestamp(crtc);

805
	vtotal = mode->crtc_vtotal;
806 807 808
	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
		vtotal /= 2;

809
	if (IS_GEN(dev_priv, 2))
810
		position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
811
	else
812
		position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
813

814 815 816 817 818 819 820 821 822 823 824 825
	/*
	 * On HSW, the DSL reg (0x70000) appears to return 0 if we
	 * read it just before the start of vblank.  So try it again
	 * so we don't accidentally end up spanning a vblank frame
	 * increment, causing the pipe_update_end() code to squak at us.
	 *
	 * The nature of this problem means we can't simply check the ISR
	 * bit and return the vblank start value; nor can we use the scanline
	 * debug register in the transcoder as it appears to have the same
	 * problem.  We may need to extend this to include other platforms,
	 * but so far testing only shows the problem on HSW.
	 */
826
	if (HAS_DDI(dev_priv) && !position) {
827 828 829 830
		int i, temp;

		for (i = 0; i < 100; i++) {
			udelay(1);
831
			temp = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
832 833 834 835 836 837 838
			if (temp != position) {
				position = temp;
				break;
			}
		}
	}

839
	/*
840 841
	 * See update_scanline_offset() for the details on the
	 * scanline_offset adjustment.
842
	 */
843
	return (position + crtc->scanline_offset) % vtotal;
844 845
}

846 847 848 849 850
static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
				     bool in_vblank_irq,
				     int *vpos, int *hpos,
				     ktime_t *stime, ktime_t *etime,
				     const struct drm_display_mode *mode)
851
{
852
	struct drm_device *dev = _crtc->dev;
853
	struct drm_i915_private *dev_priv = to_i915(dev);
854
	struct intel_crtc *crtc = to_intel_crtc(_crtc);
855
	enum pipe pipe = crtc->pipe;
856
	int position;
857
	int vbl_start, vbl_end, hsync_start, htotal, vtotal;
858
	unsigned long irqflags;
859 860
	bool use_scanline_counter = INTEL_GEN(dev_priv) >= 5 ||
		IS_G4X(dev_priv) || IS_GEN(dev_priv, 2) ||
861
		crtc->mode_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER;
862

863
	if (drm_WARN_ON(&dev_priv->drm, !mode->crtc_clock)) {
864 865 866
		drm_dbg(&dev_priv->drm,
			"trying to get scanoutpos for disabled "
			"pipe %c\n", pipe_name(pipe));
867
		return false;
868 869
	}

870
	htotal = mode->crtc_htotal;
871
	hsync_start = mode->crtc_hsync_start;
872 873 874
	vtotal = mode->crtc_vtotal;
	vbl_start = mode->crtc_vblank_start;
	vbl_end = mode->crtc_vblank_end;
875

876 877 878 879 880 881
	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
		vbl_start = DIV_ROUND_UP(vbl_start, 2);
		vbl_end /= 2;
		vtotal /= 2;
	}

882 883 884 885 886 887
	/*
	 * Lock uncore.lock, as we will do multiple timing critical raw
	 * register reads, potentially with preemption disabled, so the
	 * following code must not block on uncore.lock.
	 */
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
888

889 890 891 892 893 894
	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */

	/* Get optional system timestamp before query. */
	if (stime)
		*stime = ktime_get();

895 896 897 898 899 900 901 902 903 904 905 906 907 908
	if (crtc->mode_flags & I915_MODE_FLAG_VRR) {
		int scanlines = intel_crtc_scanlines_since_frame_timestamp(crtc);

		position = __intel_get_crtc_scanline(crtc);

		/*
		 * Already exiting vblank? If so, shift our position
		 * so it looks like we're already apporaching the full
		 * vblank end. This should make the generated timestamp
		 * more or less match when the active portion will start.
		 */
		if (position >= vbl_start && scanlines < position)
			position = min(crtc->vmax_vblank_start + scanlines, vtotal - 1);
	} else if (use_scanline_counter) {
909 910 911
		/* No obvious pixelcount register. Only query vertical
		 * scanout position from Display scan line register.
		 */
912
		position = __intel_get_crtc_scanline(crtc);
913 914 915 916 917
	} else {
		/* Have access to pixelcount since start of frame.
		 * We can split this into vertical and horizontal
		 * scanout position.
		 */
918
		position = (intel_de_read_fw(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
919

920 921 922 923
		/* convert to pixel counts */
		vbl_start *= htotal;
		vbl_end *= htotal;
		vtotal *= htotal;
924

925 926 927 928 929 930 931 932 933 934 935 936
		/*
		 * In interlaced modes, the pixel counter counts all pixels,
		 * so one field will have htotal more pixels. In order to avoid
		 * the reported position from jumping backwards when the pixel
		 * counter is beyond the length of the shorter field, just
		 * clamp the position the length of the shorter field. This
		 * matches how the scanline counter based position works since
		 * the scanline counter doesn't count the two half lines.
		 */
		if (position >= vtotal)
			position = vtotal - 1;

937 938 939 940 941 942 943 944 945 946
		/*
		 * Start of vblank interrupt is triggered at start of hsync,
		 * just prior to the first active line of vblank. However we
		 * consider lines to start at the leading edge of horizontal
		 * active. So, should we get here before we've crossed into
		 * the horizontal active of the first line in vblank, we would
		 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
		 * always add htotal-hsync_start to the current pixel position.
		 */
		position = (position + htotal - hsync_start) % vtotal;
947 948
	}

949 950 951 952 953 954 955 956
	/* Get optional system timestamp after query. */
	if (etime)
		*etime = ktime_get();

	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */

	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);

957 958 959 960 961 962 963 964 965 966
	/*
	 * While in vblank, position will be negative
	 * counting up towards 0 at vbl_end. And outside
	 * vblank, position will be positive counting
	 * up since vbl_end.
	 */
	if (position >= vbl_start)
		position -= vbl_end;
	else
		position += vtotal - vbl_end;
967

968
	if (use_scanline_counter) {
969 970 971 972 973 974
		*vpos = position;
		*hpos = 0;
	} else {
		*vpos = position / htotal;
		*hpos = position - (*vpos * htotal);
	}
975

976
	return true;
977 978
}

979 980 981 982 983
bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error,
				     ktime_t *vblank_time, bool in_vblank_irq)
{
	return drm_crtc_vblank_helper_get_vblank_timestamp_internal(
		crtc, max_error, vblank_time, in_vblank_irq,
984
		i915_get_crtc_scanoutpos);
985 986
}

987 988
int intel_get_crtc_scanline(struct intel_crtc *crtc)
{
989
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
990 991 992 993 994 995 996 997 998 999
	unsigned long irqflags;
	int position;

	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
	position = __intel_get_crtc_scanline(crtc);
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);

	return position;
}

1000
/**
1001
 * ivb_parity_work - Workqueue called when a parity error interrupt
1002 1003 1004 1005 1006 1007 1008
 * occurred.
 * @work: workqueue struct
 *
 * Doesn't actually do anything except notify userspace. As a consequence of
 * this event, userspace should try to remap the bad rows since statistically
 * it is likely the same row is more likely to go bad again.
 */
1009
static void ivb_parity_work(struct work_struct *work)
1010
{
1011
	struct drm_i915_private *dev_priv =
1012
		container_of(work, typeof(*dev_priv), l3_parity.error_work);
1013
	struct intel_gt *gt = &dev_priv->gt;
1014
	u32 error_status, row, bank, subbank;
1015
	char *parity_event[6];
1016 1017
	u32 misccpctl;
	u8 slice = 0;
1018 1019 1020 1021 1022

	/* We must turn off DOP level clock gating to access the L3 registers.
	 * In order to prevent a get/put style interface, acquire struct mutex
	 * any time we access those registers.
	 */
1023
	mutex_lock(&dev_priv->drm.struct_mutex);
1024

1025
	/* If we've screwed up tracking, just let the interrupt fire again */
1026
	if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice))
1027 1028
		goto out;

1029 1030 1031
	misccpctl = intel_uncore_read(&dev_priv->uncore, GEN7_MISCCPCTL);
	intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
	intel_uncore_posting_read(&dev_priv->uncore, GEN7_MISCCPCTL);
1032

1033
	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1034
		i915_reg_t reg;
1035

1036
		slice--;
1037 1038
		if (drm_WARN_ON_ONCE(&dev_priv->drm,
				     slice >= NUM_L3_SLICES(dev_priv)))
1039
			break;
1040

1041
		dev_priv->l3_parity.which_slice &= ~(1<<slice);
1042

1043
		reg = GEN7_L3CDERRST1(slice);
1044

1045
		error_status = intel_uncore_read(&dev_priv->uncore, reg);
1046 1047 1048 1049
		row = GEN7_PARITY_ERROR_ROW(error_status);
		bank = GEN7_PARITY_ERROR_BANK(error_status);
		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);

1050 1051
		intel_uncore_write(&dev_priv->uncore, reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
		intel_uncore_posting_read(&dev_priv->uncore, reg);
1052 1053 1054 1055 1056 1057 1058 1059

		parity_event[0] = I915_L3_PARITY_UEVENT "=1";
		parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
		parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
		parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
		parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
		parity_event[5] = NULL;

1060
		kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
1061
				   KOBJ_CHANGE, parity_event);
1062

1063 1064
		DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
			  slice, row, bank, subbank);
1065

1066 1067 1068 1069 1070
		kfree(parity_event[4]);
		kfree(parity_event[3]);
		kfree(parity_event[2]);
		kfree(parity_event[1]);
	}
1071

1072
	intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl);
1073

1074
out:
1075
	drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice);
1076 1077 1078
	spin_lock_irq(&gt->irq_lock);
	gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv));
	spin_unlock_irq(&gt->irq_lock);
1079

1080
	mutex_unlock(&dev_priv->drm.struct_mutex);
1081 1082
}

1083
static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1084
{
1085
	switch (pin) {
1086 1087 1088 1089 1090 1091
	case HPD_PORT_TC1:
	case HPD_PORT_TC2:
	case HPD_PORT_TC3:
	case HPD_PORT_TC4:
	case HPD_PORT_TC5:
	case HPD_PORT_TC6:
1092
		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(pin);
1093 1094 1095 1096 1097
	default:
		return false;
	}
}

1098
static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1099
{
1100 1101
	switch (pin) {
	case HPD_PORT_A:
1102
		return val & PORTA_HOTPLUG_LONG_DETECT;
1103
	case HPD_PORT_B:
1104
		return val & PORTB_HOTPLUG_LONG_DETECT;
1105
	case HPD_PORT_C:
1106 1107 1108 1109 1110 1111
		return val & PORTC_HOTPLUG_LONG_DETECT;
	default:
		return false;
	}
}

1112
static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1113
{
1114 1115 1116
	switch (pin) {
	case HPD_PORT_A:
	case HPD_PORT_B:
1117
	case HPD_PORT_C:
1118
	case HPD_PORT_D:
1119
		return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(pin);
1120 1121 1122 1123 1124
	default:
		return false;
	}
}

1125
static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1126
{
1127
	switch (pin) {
1128 1129 1130 1131 1132 1133
	case HPD_PORT_TC1:
	case HPD_PORT_TC2:
	case HPD_PORT_TC3:
	case HPD_PORT_TC4:
	case HPD_PORT_TC5:
	case HPD_PORT_TC6:
1134
		return val & ICP_TC_HPD_LONG_DETECT(pin);
1135 1136 1137 1138 1139
	default:
		return false;
	}
}

1140
static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val)
1141
{
1142 1143
	switch (pin) {
	case HPD_PORT_E:
1144 1145 1146 1147 1148 1149
		return val & PORTE_HOTPLUG_LONG_DETECT;
	default:
		return false;
	}
}

1150
static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1151
{
1152 1153
	switch (pin) {
	case HPD_PORT_A:
1154
		return val & PORTA_HOTPLUG_LONG_DETECT;
1155
	case HPD_PORT_B:
1156
		return val & PORTB_HOTPLUG_LONG_DETECT;
1157
	case HPD_PORT_C:
1158
		return val & PORTC_HOTPLUG_LONG_DETECT;
1159
	case HPD_PORT_D:
1160 1161 1162 1163 1164 1165
		return val & PORTD_HOTPLUG_LONG_DETECT;
	default:
		return false;
	}
}

1166
static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1167
{
1168 1169
	switch (pin) {
	case HPD_PORT_A:
1170 1171 1172 1173 1174 1175
		return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
	default:
		return false;
	}
}

1176
static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1177
{
1178 1179
	switch (pin) {
	case HPD_PORT_B:
1180
		return val & PORTB_HOTPLUG_LONG_DETECT;
1181
	case HPD_PORT_C:
1182
		return val & PORTC_HOTPLUG_LONG_DETECT;
1183
	case HPD_PORT_D:
1184 1185 1186
		return val & PORTD_HOTPLUG_LONG_DETECT;
	default:
		return false;
1187 1188 1189
	}
}

1190
static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1191
{
1192 1193
	switch (pin) {
	case HPD_PORT_B:
1194
		return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1195
	case HPD_PORT_C:
1196
		return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1197
	case HPD_PORT_D:
1198 1199 1200
		return val & PORTD_HOTPLUG_INT_LONG_PULSE;
	default:
		return false;
1201 1202 1203
	}
}

1204 1205 1206 1207 1208 1209 1210
/*
 * Get a bit mask of pins that have triggered, and which ones may be long.
 * This can be called multiple times with the same masks to accumulate
 * hotplug detection results from several registers.
 *
 * Note that the caller is expected to zero out the masks initially.
 */
1211 1212 1213 1214
static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
			       u32 *pin_mask, u32 *long_mask,
			       u32 hotplug_trigger, u32 dig_hotplug_reg,
			       const u32 hpd[HPD_NUM_PINS],
1215
			       bool long_pulse_detect(enum hpd_pin pin, u32 val))
1216
{
1217
	enum hpd_pin pin;
1218

1219 1220
	BUILD_BUG_ON(BITS_PER_TYPE(*pin_mask) < HPD_NUM_PINS);

1221 1222
	for_each_hpd_pin(pin) {
		if ((hpd[pin] & hotplug_trigger) == 0)
1223
			continue;
1224

1225
		*pin_mask |= BIT(pin);
1226

1227
		if (long_pulse_detect(pin, dig_hotplug_reg))
1228
			*long_mask |= BIT(pin);
1229 1230
	}

1231 1232 1233
	drm_dbg(&dev_priv->drm,
		"hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
		hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
1234 1235 1236

}

1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261
static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
				  const u32 hpd[HPD_NUM_PINS])
{
	struct intel_encoder *encoder;
	u32 enabled_irqs = 0;

	for_each_intel_encoder(&dev_priv->drm, encoder)
		if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
			enabled_irqs |= hpd[encoder->hpd_pin];

	return enabled_irqs;
}

static u32 intel_hpd_hotplug_irqs(struct drm_i915_private *dev_priv,
				  const u32 hpd[HPD_NUM_PINS])
{
	struct intel_encoder *encoder;
	u32 hotplug_irqs = 0;

	for_each_intel_encoder(&dev_priv->drm, encoder)
		hotplug_irqs |= hpd[encoder->hpd_pin];

	return hotplug_irqs;
}

1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273
static u32 intel_hpd_hotplug_enables(struct drm_i915_private *i915,
				     hotplug_enables_func hotplug_enables)
{
	struct intel_encoder *encoder;
	u32 hotplug = 0;

	for_each_intel_encoder(&i915->drm, encoder)
		hotplug |= hotplug_enables(i915, encoder->hpd_pin);

	return hotplug;
}

1274
static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
1275
{
1276
	wake_up_all(&dev_priv->gmbus_wait_queue);
1277 1278
}

1279
static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
1280
{
1281
	wake_up_all(&dev_priv->gmbus_wait_queue);
1282 1283
}

1284
#if defined(CONFIG_DEBUG_FS)
1285 1286
static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
					 enum pipe pipe,
1287 1288 1289
					 u32 crc0, u32 crc1,
					 u32 crc2, u32 crc3,
					 u32 crc4)
1290
{
T
Tomeu Vizoso 已提交
1291
	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
1292
	struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
1293 1294 1295
	u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };

	trace_intel_pipe_crc(crtc, crcs);
1296

1297
	spin_lock(&pipe_crc->lock);
1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308
	/*
	 * For some not yet identified reason, the first CRC is
	 * bonkers. So let's just wait for the next vblank and read
	 * out the buggy result.
	 *
	 * On GEN8+ sometimes the second CRC is bonkers as well, so
	 * don't trust that one either.
	 */
	if (pipe_crc->skipped <= 0 ||
	    (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
		pipe_crc->skipped++;
T
Tomeu Vizoso 已提交
1309
		spin_unlock(&pipe_crc->lock);
1310
		return;
T
Tomeu Vizoso 已提交
1311
	}
1312 1313 1314 1315 1316
	spin_unlock(&pipe_crc->lock);

	drm_crtc_add_crc_entry(&crtc->base, true,
				drm_crtc_accurate_vblank_count(&crtc->base),
				crcs);
1317
}
1318 1319
#else
static inline void
1320 1321
display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
			     enum pipe pipe,
1322 1323 1324
			     u32 crc0, u32 crc1,
			     u32 crc2, u32 crc3,
			     u32 crc4) {}
1325 1326
#endif

1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343
static void flip_done_handler(struct drm_i915_private *i915,
			      enum pipe pipe)
{
	struct intel_crtc *crtc = intel_get_crtc_for_pipe(i915, pipe);
	struct drm_crtc_state *crtc_state = crtc->base.state;
	struct drm_pending_vblank_event *e = crtc_state->event;
	struct drm_device *dev = &i915->drm;
	unsigned long irqflags;

	spin_lock_irqsave(&dev->event_lock, irqflags);

	crtc_state->event = NULL;

	drm_crtc_send_vblank_event(&crtc->base, e);

	spin_unlock_irqrestore(&dev->event_lock, irqflags);
}
1344

1345 1346
static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
				     enum pipe pipe)
D
Daniel Vetter 已提交
1347
{
1348
	display_pipe_crc_irq_handler(dev_priv, pipe,
1349
				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)),
1350
				     0, 0, 0, 0);
D
Daniel Vetter 已提交
1351 1352
}

1353 1354
static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
				     enum pipe pipe)
1355
{
1356
	display_pipe_crc_irq_handler(dev_priv, pipe,
1357 1358 1359 1360 1361
				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)),
				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_2_IVB(pipe)),
				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_3_IVB(pipe)),
				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_4_IVB(pipe)),
				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_5_IVB(pipe)));
1362
}
1363

1364 1365
static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
				      enum pipe pipe)
1366
{
1367
	u32 res1, res2;
1368

1369
	if (INTEL_GEN(dev_priv) >= 3)
1370
		res1 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES1_I915(pipe));
1371 1372 1373
	else
		res1 = 0;

1374
	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1375
		res2 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES2_G4X(pipe));
1376 1377
	else
		res2 = 0;
1378

1379
	display_pipe_crc_irq_handler(dev_priv, pipe,
1380 1381 1382
				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RED(pipe)),
				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_GREEN(pipe)),
				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_BLUE(pipe)),
1383
				     res1, res2);
1384
}
1385

1386 1387 1388 1389 1390
static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
{
	enum pipe pipe;

	for_each_pipe(dev_priv, pipe) {
1391
		intel_uncore_write(&dev_priv->uncore, PIPESTAT(pipe),
1392 1393 1394 1395 1396 1397 1398
			   PIPESTAT_INT_STATUS_MASK |
			   PIPE_FIFO_UNDERRUN_STATUS);

		dev_priv->pipestat_irq_mask[pipe] = 0;
	}
}

1399 1400
static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
				  u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1401
{
1402
	enum pipe pipe;
1403

1404
	spin_lock(&dev_priv->irq_lock);
1405 1406 1407 1408 1409 1410

	if (!dev_priv->display_irqs_enabled) {
		spin_unlock(&dev_priv->irq_lock);
		return;
	}

1411
	for_each_pipe(dev_priv, pipe) {
1412
		i915_reg_t reg;
1413
		u32 status_mask, enable_mask, iir_bit = 0;
1414

1415 1416 1417 1418 1419 1420 1421
		/*
		 * PIPESTAT bits get signalled even when the interrupt is
		 * disabled with the mask bits, and some of the status bits do
		 * not generate interrupts at all (like the underrun bit). Hence
		 * we need to be careful that we only handle what we want to
		 * handle.
		 */
1422 1423

		/* fifo underruns are filterered in the underrun handler. */
1424
		status_mask = PIPE_FIFO_UNDERRUN_STATUS;
1425 1426

		switch (pipe) {
1427
		default:
1428 1429 1430 1431 1432 1433
		case PIPE_A:
			iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
			break;
		case PIPE_B:
			iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
			break;
1434 1435 1436
		case PIPE_C:
			iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
			break;
1437 1438
		}
		if (iir & iir_bit)
1439
			status_mask |= dev_priv->pipestat_irq_mask[pipe];
1440

1441
		if (!status_mask)
1442 1443 1444
			continue;

		reg = PIPESTAT(pipe);
1445
		pipe_stats[pipe] = intel_uncore_read(&dev_priv->uncore, reg) & status_mask;
1446
		enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
1447 1448 1449

		/*
		 * Clear the PIPE*STAT regs before the IIR
1450 1451 1452 1453 1454 1455
		 *
		 * Toggle the enable bits to make sure we get an
		 * edge in the ISR pipe event bit if we don't clear
		 * all the enabled status bits. Otherwise the edge
		 * triggered IIR on i965/g4x wouldn't notice that
		 * an interrupt is still pending.
1456
		 */
1457
		if (pipe_stats[pipe]) {
1458 1459
			intel_uncore_write(&dev_priv->uncore, reg, pipe_stats[pipe]);
			intel_uncore_write(&dev_priv->uncore, reg, enable_mask);
1460
		}
1461
	}
1462
	spin_unlock(&dev_priv->irq_lock);
1463 1464
}

1465 1466 1467 1468 1469 1470 1471
static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
				      u16 iir, u32 pipe_stats[I915_MAX_PIPES])
{
	enum pipe pipe;

	for_each_pipe(dev_priv, pipe) {
		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1472
			intel_handle_vblank(dev_priv, pipe);
1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489

		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
			i9xx_pipe_crc_irq_handler(dev_priv, pipe);

		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
	}
}

static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
{
	bool blc_event = false;
	enum pipe pipe;

	for_each_pipe(dev_priv, pipe) {
		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1490
			intel_handle_vblank(dev_priv, pipe);
1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513

		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
			blc_event = true;

		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
			i9xx_pipe_crc_irq_handler(dev_priv, pipe);

		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
	}

	if (blc_event || (iir & I915_ASLE_INTERRUPT))
		intel_opregion_asle_intr(dev_priv);
}

static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
{
	bool blc_event = false;
	enum pipe pipe;

	for_each_pipe(dev_priv, pipe) {
		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1514
			intel_handle_vblank(dev_priv, pipe);
1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532

		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
			blc_event = true;

		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
			i9xx_pipe_crc_irq_handler(dev_priv, pipe);

		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
	}

	if (blc_event || (iir & I915_ASLE_INTERRUPT))
		intel_opregion_asle_intr(dev_priv);

	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
		gmbus_irq_handler(dev_priv);
}

1533
static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1534 1535 1536
					    u32 pipe_stats[I915_MAX_PIPES])
{
	enum pipe pipe;
1537

1538
	for_each_pipe(dev_priv, pipe) {
1539
		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1540
			intel_handle_vblank(dev_priv, pipe);
1541

1542 1543 1544
		if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV)
			flip_done_handler(dev_priv, pipe);

1545
		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1546
			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1547

1548 1549
		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1550 1551 1552
	}

	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1553
		gmbus_irq_handler(dev_priv);
1554 1555
}

1556
static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
1557
{
1558 1559 1560 1561 1562 1563 1564 1565 1566
	u32 hotplug_status = 0, hotplug_status_mask;
	int i;

	if (IS_G4X(dev_priv) ||
	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
		hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
			DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
	else
		hotplug_status_mask = HOTPLUG_INT_STATUS_I915;
1567

1568 1569 1570 1571 1572 1573 1574 1575 1576 1577
	/*
	 * We absolutely have to clear all the pending interrupt
	 * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port
	 * interrupt bit won't have an edge, and the i965/g4x
	 * edge triggered IIR will not notice that an interrupt
	 * is still pending. We can't use PORT_HOTPLUG_EN to
	 * guarantee the edge as the act of toggling the enable
	 * bits can itself generate a new hotplug interrupt :(
	 */
	for (i = 0; i < 10; i++) {
1578
		u32 tmp = intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT) & hotplug_status_mask;
1579 1580 1581 1582 1583

		if (tmp == 0)
			return hotplug_status;

		hotplug_status |= tmp;
1584
		intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, hotplug_status);
1585 1586
	}

1587 1588
	drm_WARN_ONCE(&dev_priv->drm, 1,
		      "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
1589
		      intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
1590

1591 1592 1593
	return hotplug_status;
}

1594
static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1595 1596 1597
				 u32 hotplug_status)
{
	u32 pin_mask = 0, long_mask = 0;
1598
	u32 hotplug_trigger;
1599

1600 1601 1602 1603 1604
	if (IS_G4X(dev_priv) ||
	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
		hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
	else
		hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1605

1606 1607 1608 1609 1610
	if (hotplug_trigger) {
		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
				   hotplug_trigger, hotplug_trigger,
				   dev_priv->hotplug.hpd,
				   i9xx_port_hotplug_long_detect);
1611

1612
		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1613
	}
1614 1615 1616 1617 1618

	if ((IS_G4X(dev_priv) ||
	     IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
	    hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
		dp_aux_irq_handler(dev_priv);
1619 1620
}

1621
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
J
Jesse Barnes 已提交
1622
{
1623
	struct drm_i915_private *dev_priv = arg;
J
Jesse Barnes 已提交
1624 1625
	irqreturn_t ret = IRQ_NONE;

1626 1627 1628
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

1629
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
1630
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1631

1632
	do {
1633
		u32 iir, gt_iir, pm_iir;
1634
		u32 pipe_stats[I915_MAX_PIPES] = {};
1635
		u32 hotplug_status = 0;
1636
		u32 ier = 0;
1637

1638 1639 1640
		gt_iir = intel_uncore_read(&dev_priv->uncore, GTIIR);
		pm_iir = intel_uncore_read(&dev_priv->uncore, GEN6_PMIIR);
		iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR);
J
Jesse Barnes 已提交
1641 1642

		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1643
			break;
J
Jesse Barnes 已提交
1644 1645 1646

		ret = IRQ_HANDLED;

1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659
		/*
		 * Theory on interrupt generation, based on empirical evidence:
		 *
		 * x = ((VLV_IIR & VLV_IER) ||
		 *      (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
		 *       (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
		 *
		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
		 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
		 * guarantee the CPU interrupt will be raised again even if we
		 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
		 * bits this time around.
		 */
1660 1661 1662
		intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
		ier = intel_uncore_read(&dev_priv->uncore, VLV_IER);
		intel_uncore_write(&dev_priv->uncore, VLV_IER, 0);
1663 1664

		if (gt_iir)
1665
			intel_uncore_write(&dev_priv->uncore, GTIIR, gt_iir);
1666
		if (pm_iir)
1667
			intel_uncore_write(&dev_priv->uncore, GEN6_PMIIR, pm_iir);
1668

1669
		if (iir & I915_DISPLAY_PORT_INTERRUPT)
1670
			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1671

1672 1673
		/* Call regardless, as some status bits might not be
		 * signalled in iir */
1674
		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1675

1676 1677 1678 1679
		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
			   I915_LPE_PIPE_B_INTERRUPT))
			intel_lpe_audio_irq_handler(dev_priv);

1680 1681 1682 1683 1684
		/*
		 * VLV_IIR is single buffered, and reflects the level
		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
		 */
		if (iir)
1685
			intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir);
1686

1687 1688
		intel_uncore_write(&dev_priv->uncore, VLV_IER, ier);
		intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
1689

1690
		if (gt_iir)
1691
			gen6_gt_irq_handler(&dev_priv->gt, gt_iir);
1692
		if (pm_iir)
1693
			gen6_rps_irq_handler(&dev_priv->gt.rps, pm_iir);
1694

1695
		if (hotplug_status)
1696
			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1697

1698
		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1699
	} while (0);
J
Jesse Barnes 已提交
1700

1701 1702
	pmu_irq_stats(dev_priv, ret);

1703
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1704

J
Jesse Barnes 已提交
1705 1706 1707
	return ret;
}

1708 1709
static irqreturn_t cherryview_irq_handler(int irq, void *arg)
{
1710
	struct drm_i915_private *dev_priv = arg;
1711 1712
	irqreturn_t ret = IRQ_NONE;

1713 1714 1715
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

1716
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
1717
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1718

1719
	do {
1720
		u32 master_ctl, iir;
1721
		u32 pipe_stats[I915_MAX_PIPES] = {};
1722
		u32 hotplug_status = 0;
1723 1724
		u32 ier = 0;

1725 1726
		master_ctl = intel_uncore_read(&dev_priv->uncore, GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
		iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR);
1727

1728 1729
		if (master_ctl == 0 && iir == 0)
			break;
1730

1731 1732
		ret = IRQ_HANDLED;

1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745
		/*
		 * Theory on interrupt generation, based on empirical evidence:
		 *
		 * x = ((VLV_IIR & VLV_IER) ||
		 *      ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
		 *       (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
		 *
		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
		 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
		 * guarantee the CPU interrupt will be raised again even if we
		 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
		 * bits this time around.
		 */
1746 1747 1748
		intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0);
		ier = intel_uncore_read(&dev_priv->uncore, VLV_IER);
		intel_uncore_write(&dev_priv->uncore, VLV_IER, 0);
1749

1750
		gen8_gt_irq_handler(&dev_priv->gt, master_ctl);
1751

1752
		if (iir & I915_DISPLAY_PORT_INTERRUPT)
1753
			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1754

1755 1756
		/* Call regardless, as some status bits might not be
		 * signalled in iir */
1757
		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1758

1759 1760 1761 1762 1763
		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
			   I915_LPE_PIPE_B_INTERRUPT |
			   I915_LPE_PIPE_C_INTERRUPT))
			intel_lpe_audio_irq_handler(dev_priv);

1764 1765 1766 1767 1768
		/*
		 * VLV_IIR is single buffered, and reflects the level
		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
		 */
		if (iir)
1769
			intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir);
1770

1771 1772
		intel_uncore_write(&dev_priv->uncore, VLV_IER, ier);
		intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
1773 1774

		if (hotplug_status)
1775
			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1776

1777
		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1778
	} while (0);
1779

1780 1781
	pmu_irq_stats(dev_priv, ret);

1782
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1783

1784 1785 1786
	return ret;
}

1787
static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1788
				u32 hotplug_trigger)
1789 1790 1791
{
	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;

1792 1793 1794 1795 1796 1797
	/*
	 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
	 * unless we touch the hotplug register, even if hotplug_trigger is
	 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
	 * errors.
	 */
1798
	dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
1799 1800 1801 1802 1803 1804 1805 1806
	if (!hotplug_trigger) {
		u32 mask = PORTA_HOTPLUG_STATUS_MASK |
			PORTD_HOTPLUG_STATUS_MASK |
			PORTC_HOTPLUG_STATUS_MASK |
			PORTB_HOTPLUG_STATUS_MASK;
		dig_hotplug_reg &= ~mask;
	}

1807
	intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg);
1808 1809
	if (!hotplug_trigger)
		return;
1810

1811 1812 1813
	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
			   hotplug_trigger, dig_hotplug_reg,
			   dev_priv->hotplug.pch_hpd,
1814 1815
			   pch_port_hotplug_long_detect);

1816
	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1817 1818
}

1819
static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1820
{
1821
	enum pipe pipe;
1822
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1823

1824
	ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
1825

1826 1827 1828
	if (pch_iir & SDE_AUDIO_POWER_MASK) {
		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
			       SDE_AUDIO_POWER_SHIFT);
1829 1830
		drm_dbg(&dev_priv->drm, "PCH audio power change on port %d\n",
			port_name(port));
1831
	}
1832

1833
	if (pch_iir & SDE_AUX_MASK)
1834
		dp_aux_irq_handler(dev_priv);
1835

1836
	if (pch_iir & SDE_GMBUS)
1837
		gmbus_irq_handler(dev_priv);
1838 1839

	if (pch_iir & SDE_AUDIO_HDCP_MASK)
1840
		drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n");
1841 1842

	if (pch_iir & SDE_AUDIO_TRANS_MASK)
1843
		drm_dbg(&dev_priv->drm, "PCH transcoder audio interrupt\n");
1844 1845

	if (pch_iir & SDE_POISON)
1846
		drm_err(&dev_priv->drm, "PCH poison interrupt\n");
1847

1848
	if (pch_iir & SDE_FDI_MASK) {
1849
		for_each_pipe(dev_priv, pipe)
1850 1851
			drm_dbg(&dev_priv->drm, "  pipe %c FDI IIR: 0x%08x\n",
				pipe_name(pipe),
1852
				intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe)));
1853
	}
1854 1855

	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1856
		drm_dbg(&dev_priv->drm, "PCH transcoder CRC done interrupt\n");
1857 1858

	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1859 1860
		drm_dbg(&dev_priv->drm,
			"PCH transcoder CRC error interrupt\n");
1861 1862

	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1863
		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
1864 1865

	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1866
		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
1867 1868
}

1869
static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
1870
{
1871
	u32 err_int = intel_uncore_read(&dev_priv->uncore, GEN7_ERR_INT);
D
Daniel Vetter 已提交
1872
	enum pipe pipe;
1873

1874
	if (err_int & ERR_INT_POISON)
1875
		drm_err(&dev_priv->drm, "Poison interrupt\n");
1876

1877
	for_each_pipe(dev_priv, pipe) {
1878 1879
		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1880

D
Daniel Vetter 已提交
1881
		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1882 1883
			if (IS_IVYBRIDGE(dev_priv))
				ivb_pipe_crc_irq_handler(dev_priv, pipe);
D
Daniel Vetter 已提交
1884
			else
1885
				hsw_pipe_crc_irq_handler(dev_priv, pipe);
D
Daniel Vetter 已提交
1886 1887
		}
	}
1888

1889
	intel_uncore_write(&dev_priv->uncore, GEN7_ERR_INT, err_int);
1890 1891
}

1892
static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
1893
{
1894
	u32 serr_int = intel_uncore_read(&dev_priv->uncore, SERR_INT);
1895
	enum pipe pipe;
1896

1897
	if (serr_int & SERR_INT_POISON)
1898
		drm_err(&dev_priv->drm, "PCH poison interrupt\n");
1899

1900 1901 1902
	for_each_pipe(dev_priv, pipe)
		if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
			intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
1903

1904
	intel_uncore_write(&dev_priv->uncore, SERR_INT, serr_int);
1905 1906
}

1907
static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1908
{
1909
	enum pipe pipe;
1910
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1911

1912
	ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
1913

1914 1915 1916
	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
			       SDE_AUDIO_POWER_SHIFT_CPT);
1917 1918
		drm_dbg(&dev_priv->drm, "PCH audio power change on port %c\n",
			port_name(port));
1919
	}
1920 1921

	if (pch_iir & SDE_AUX_MASK_CPT)
1922
		dp_aux_irq_handler(dev_priv);
1923 1924

	if (pch_iir & SDE_GMBUS_CPT)
1925
		gmbus_irq_handler(dev_priv);
1926 1927

	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1928
		drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n");
1929 1930

	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1931
		drm_dbg(&dev_priv->drm, "Audio CP change interrupt\n");
1932

1933
	if (pch_iir & SDE_FDI_MASK_CPT) {
1934
		for_each_pipe(dev_priv, pipe)
1935 1936
			drm_dbg(&dev_priv->drm, "  pipe %c FDI IIR: 0x%08x\n",
				pipe_name(pipe),
1937
				intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe)));
1938
	}
1939 1940

	if (pch_iir & SDE_ERROR_CPT)
1941
		cpt_serr_int_handler(dev_priv);
1942 1943
}

1944
static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1945
{
1946 1947
	u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_HOTPLUG_MASK_ICP;
	u32 tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_MASK_ICP;
1948 1949 1950 1951 1952
	u32 pin_mask = 0, long_mask = 0;

	if (ddi_hotplug_trigger) {
		u32 dig_hotplug_reg;

1953 1954
		dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_DDI);
		intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_DDI, dig_hotplug_reg);
1955 1956

		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1957 1958
				   ddi_hotplug_trigger, dig_hotplug_reg,
				   dev_priv->hotplug.pch_hpd,
1959 1960 1961 1962 1963 1964
				   icp_ddi_port_hotplug_long_detect);
	}

	if (tc_hotplug_trigger) {
		u32 dig_hotplug_reg;

1965 1966
		dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_TC);
		intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_TC, dig_hotplug_reg);
1967 1968

		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1969 1970
				   tc_hotplug_trigger, dig_hotplug_reg,
				   dev_priv->hotplug.pch_hpd,
1971
				   icp_tc_port_hotplug_long_detect);
1972 1973 1974 1975 1976 1977 1978 1979 1980
	}

	if (pin_mask)
		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);

	if (pch_iir & SDE_GMBUS_ICP)
		gmbus_irq_handler(dev_priv);
}

1981
static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1982 1983 1984 1985 1986 1987 1988 1989 1990
{
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
		~SDE_PORTE_HOTPLUG_SPT;
	u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
	u32 pin_mask = 0, long_mask = 0;

	if (hotplug_trigger) {
		u32 dig_hotplug_reg;

1991 1992
		dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
		intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg);
1993

1994
		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1995 1996
				   hotplug_trigger, dig_hotplug_reg,
				   dev_priv->hotplug.pch_hpd,
1997
				   spt_port_hotplug_long_detect);
1998 1999 2000 2001 2002
	}

	if (hotplug2_trigger) {
		u32 dig_hotplug_reg;

2003 2004
		dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG2);
		intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG2, dig_hotplug_reg);
2005

2006
		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2007 2008
				   hotplug2_trigger, dig_hotplug_reg,
				   dev_priv->hotplug.pch_hpd,
2009 2010 2011 2012
				   spt_port_hotplug2_long_detect);
	}

	if (pin_mask)
2013
		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2014 2015

	if (pch_iir & SDE_GMBUS_CPT)
2016
		gmbus_irq_handler(dev_priv);
2017 2018
}

2019
static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
2020
				u32 hotplug_trigger)
2021 2022 2023
{
	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;

2024 2025
	dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL);
	intel_uncore_write(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
2026

2027 2028 2029
	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
			   hotplug_trigger, dig_hotplug_reg,
			   dev_priv->hotplug.hpd,
2030 2031
			   ilk_port_hotplug_long_detect);

2032
	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2033 2034
}

2035 2036
static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
				    u32 de_iir)
2037
{
2038
	enum pipe pipe;
2039 2040
	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;

2041
	if (hotplug_trigger)
2042
		ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
2043 2044

	if (de_iir & DE_AUX_CHANNEL_A)
2045
		dp_aux_irq_handler(dev_priv);
2046 2047

	if (de_iir & DE_GSE)
2048
		intel_opregion_asle_intr(dev_priv);
2049 2050

	if (de_iir & DE_POISON)
2051
		drm_err(&dev_priv->drm, "Poison interrupt\n");
2052

2053
	for_each_pipe(dev_priv, pipe) {
2054
		if (de_iir & DE_PIPE_VBLANK(pipe))
2055
			intel_handle_vblank(dev_priv, pipe);
2056

2057 2058 2059
		if (de_iir & DE_PLANE_FLIP_DONE(pipe))
			flip_done_handler(dev_priv, pipe);

2060
		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2061
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2062

2063
		if (de_iir & DE_PIPE_CRC_DONE(pipe))
2064
			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2065 2066 2067 2068
	}

	/* check event from PCH */
	if (de_iir & DE_PCH_EVENT) {
2069
		u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
2070

2071 2072
		if (HAS_PCH_CPT(dev_priv))
			cpt_irq_handler(dev_priv, pch_iir);
2073
		else
2074
			ibx_irq_handler(dev_priv, pch_iir);
2075 2076

		/* should clear PCH hotplug event before clear CPU irq */
2077
		intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir);
2078 2079
	}

2080
	if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT)
2081
		gen5_rps_irq_handler(&dev_priv->gt.rps);
2082 2083
}

2084 2085
static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
				    u32 de_iir)
2086
{
2087
	enum pipe pipe;
2088 2089
	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;

2090
	if (hotplug_trigger)
2091
		ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
2092 2093

	if (de_iir & DE_ERR_INT_IVB)
2094
		ivb_err_int_handler(dev_priv);
2095

2096
	if (de_iir & DE_EDP_PSR_INT_HSW) {
2097
		u32 psr_iir = intel_uncore_read(&dev_priv->uncore, EDP_PSR_IIR);
2098 2099

		intel_psr_irq_handler(dev_priv, psr_iir);
2100
		intel_uncore_write(&dev_priv->uncore, EDP_PSR_IIR, psr_iir);
2101
	}
2102

2103
	if (de_iir & DE_AUX_CHANNEL_A_IVB)
2104
		dp_aux_irq_handler(dev_priv);
2105 2106

	if (de_iir & DE_GSE_IVB)
2107
		intel_opregion_asle_intr(dev_priv);
2108

2109
	for_each_pipe(dev_priv, pipe) {
2110
		if (de_iir & DE_PIPE_VBLANK_IVB(pipe))
2111
			intel_handle_vblank(dev_priv, pipe);
2112 2113 2114

		if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe))
			flip_done_handler(dev_priv, pipe);
2115 2116 2117
	}

	/* check event from PCH */
2118
	if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
2119
		u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
2120

2121
		cpt_irq_handler(dev_priv, pch_iir);
2122 2123

		/* clear PCH hotplug event before clear CPU irq */
2124
		intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir);
2125 2126 2127
	}
}

2128 2129 2130 2131 2132 2133 2134 2135
/*
 * To handle irqs with the minimum potential races with fresh interrupts, we:
 * 1 - Disable Master Interrupt Control.
 * 2 - Find the source(s) of the interrupt.
 * 3 - Clear the Interrupt Identity bits (IIR).
 * 4 - Process the interrupt(s) that had bits set in the IIRs.
 * 5 - Re-enable Master Interrupt Control.
 */
2136
static irqreturn_t ilk_irq_handler(int irq, void *arg)
2137
{
2138 2139
	struct drm_i915_private *i915 = arg;
	void __iomem * const regs = i915->uncore.regs;
2140
	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2141
	irqreturn_t ret = IRQ_NONE;
2142

2143
	if (unlikely(!intel_irqs_enabled(i915)))
2144 2145
		return IRQ_NONE;

2146
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2147
	disable_rpm_wakeref_asserts(&i915->runtime_pm);
2148

2149
	/* disable master interrupt before clearing iir  */
2150 2151
	de_ier = raw_reg_read(regs, DEIER);
	raw_reg_write(regs, DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2152

2153 2154 2155 2156 2157
	/* Disable south interrupts. We'll only write to SDEIIR once, so further
	 * interrupts will will be stored on its back queue, and then we'll be
	 * able to process them after we restore SDEIER (as soon as we restore
	 * it, we'll get an interrupt if SDEIIR still has something to process
	 * due to its back queue). */
2158 2159 2160
	if (!HAS_PCH_NOP(i915)) {
		sde_ier = raw_reg_read(regs, SDEIER);
		raw_reg_write(regs, SDEIER, 0);
2161
	}
2162

2163 2164
	/* Find, clear, then process each source of interrupt */

2165
	gt_iir = raw_reg_read(regs, GTIIR);
2166
	if (gt_iir) {
2167 2168 2169
		raw_reg_write(regs, GTIIR, gt_iir);
		if (INTEL_GEN(i915) >= 6)
			gen6_gt_irq_handler(&i915->gt, gt_iir);
2170
		else
2171 2172
			gen5_gt_irq_handler(&i915->gt, gt_iir);
		ret = IRQ_HANDLED;
2173 2174
	}

2175
	de_iir = raw_reg_read(regs, DEIIR);
2176
	if (de_iir) {
2177 2178 2179
		raw_reg_write(regs, DEIIR, de_iir);
		if (INTEL_GEN(i915) >= 7)
			ivb_display_irq_handler(i915, de_iir);
2180
		else
2181 2182
			ilk_display_irq_handler(i915, de_iir);
		ret = IRQ_HANDLED;
2183 2184
	}

2185 2186
	if (INTEL_GEN(i915) >= 6) {
		u32 pm_iir = raw_reg_read(regs, GEN6_PMIIR);
2187
		if (pm_iir) {
2188 2189
			raw_reg_write(regs, GEN6_PMIIR, pm_iir);
			gen6_rps_irq_handler(&i915->gt.rps, pm_iir);
2190 2191
			ret = IRQ_HANDLED;
		}
2192
	}
2193

2194 2195 2196
	raw_reg_write(regs, DEIER, de_ier);
	if (sde_ier)
		raw_reg_write(regs, SDEIER, sde_ier);
2197

2198 2199
	pmu_irq_stats(i915, ret);

2200
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2201
	enable_rpm_wakeref_asserts(&i915->runtime_pm);
2202

2203 2204 2205
	return ret;
}

2206
static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
2207
				u32 hotplug_trigger)
2208
{
2209
	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2210

2211 2212
	dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
	intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg);
2213

2214 2215 2216
	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
			   hotplug_trigger, dig_hotplug_reg,
			   dev_priv->hotplug.hpd,
2217
			   bxt_port_hotplug_long_detect);
2218

2219
	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2220 2221
}

2222 2223 2224
static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
{
	u32 pin_mask = 0, long_mask = 0;
2225 2226
	u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
	u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
2227 2228

	if (trigger_tc) {
2229 2230
		u32 dig_hotplug_reg;

2231 2232
		dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL);
		intel_uncore_write(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);
2233

2234 2235 2236
		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
				   trigger_tc, dig_hotplug_reg,
				   dev_priv->hotplug.hpd,
2237
				   gen11_port_hotplug_long_detect);
2238 2239 2240 2241 2242
	}

	if (trigger_tbt) {
		u32 dig_hotplug_reg;

2243 2244
		dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL);
		intel_uncore_write(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);
2245

2246 2247 2248
		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
				   trigger_tbt, dig_hotplug_reg,
				   dev_priv->hotplug.hpd,
2249
				   gen11_port_hotplug_long_detect);
2250 2251 2252
	}

	if (pin_mask)
2253
		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2254
	else
2255 2256
		drm_err(&dev_priv->drm,
			"Unexpected DE HPD interrupt 0x%08x\n", iir);
2257 2258
}

2259 2260
static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
{
2261
	u32 mask;
2262

2263 2264 2265
	if (INTEL_GEN(dev_priv) >= 12)
		return TGL_DE_PORT_AUX_DDIA |
			TGL_DE_PORT_AUX_DDIB |
2266 2267 2268 2269 2270 2271 2272 2273
			TGL_DE_PORT_AUX_DDIC |
			TGL_DE_PORT_AUX_USBC1 |
			TGL_DE_PORT_AUX_USBC2 |
			TGL_DE_PORT_AUX_USBC3 |
			TGL_DE_PORT_AUX_USBC4 |
			TGL_DE_PORT_AUX_USBC5 |
			TGL_DE_PORT_AUX_USBC6;

2274 2275

	mask = GEN8_AUX_CHANNEL_A;
2276 2277 2278 2279 2280
	if (INTEL_GEN(dev_priv) >= 9)
		mask |= GEN9_AUX_CHANNEL_B |
			GEN9_AUX_CHANNEL_C |
			GEN9_AUX_CHANNEL_D;

2281
	if (IS_CNL_WITH_PORT_F(dev_priv) || IS_GEN(dev_priv, 11))
2282 2283
		mask |= CNL_AUX_CHANNEL_F;

2284 2285
	if (IS_GEN(dev_priv, 11))
		mask |= ICL_AUX_CHANNEL_E;
2286 2287 2288 2289

	return mask;
}

2290 2291
static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
{
2292
	if (HAS_D12_PLANE_MINIMIZATION(dev_priv))
2293 2294
		return RKL_DE_PIPE_IRQ_FAULT_ERRORS;
	else if (INTEL_GEN(dev_priv) >= 11)
2295 2296
		return GEN11_DE_PIPE_IRQ_FAULT_ERRORS;
	else if (INTEL_GEN(dev_priv) >= 9)
2297 2298 2299 2300 2301
		return GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
	else
		return GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
}

2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312
static void
gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
{
	bool found = false;

	if (iir & GEN8_DE_MISC_GSE) {
		intel_opregion_asle_intr(dev_priv);
		found = true;
	}

	if (iir & GEN8_DE_EDP_PSR) {
2313 2314 2315 2316 2317 2318 2319 2320
		u32 psr_iir;
		i915_reg_t iir_reg;

		if (INTEL_GEN(dev_priv) >= 12)
			iir_reg = TRANS_PSR_IIR(dev_priv->psr.transcoder);
		else
			iir_reg = EDP_PSR_IIR;

2321 2322
		psr_iir = intel_uncore_read(&dev_priv->uncore, iir_reg);
		intel_uncore_write(&dev_priv->uncore, iir_reg, psr_iir);
2323 2324 2325

		if (psr_iir)
			found = true;
2326 2327 2328 2329 2330

		intel_psr_irq_handler(dev_priv, psr_iir);
	}

	if (!found)
2331
		drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt\n");
2332 2333
}

2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345
static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
					   u32 te_trigger)
{
	enum pipe pipe = INVALID_PIPE;
	enum transcoder dsi_trans;
	enum port port;
	u32 val, tmp;

	/*
	 * Incase of dual link, TE comes from DSI_1
	 * this is to check if dual link is enabled
	 */
2346
	val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL2(TRANSCODER_DSI_0));
2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357
	val &= PORT_SYNC_MODE_ENABLE;

	/*
	 * if dual link is enabled, then read DSI_0
	 * transcoder registers
	 */
	port = ((te_trigger & DSI1_TE && val) || (te_trigger & DSI0_TE)) ?
						  PORT_A : PORT_B;
	dsi_trans = (port == PORT_A) ? TRANSCODER_DSI_0 : TRANSCODER_DSI_1;

	/* Check if DSI configured in command mode */
2358
	val = intel_uncore_read(&dev_priv->uncore, DSI_TRANS_FUNC_CONF(dsi_trans));
2359 2360 2361 2362 2363 2364 2365 2366
	val = val & OP_MODE_MASK;

	if (val != CMD_MODE_NO_GATE && val != CMD_MODE_TE_GATE) {
		drm_err(&dev_priv->drm, "DSI trancoder not configured in command mode\n");
		return;
	}

	/* Get PIPE for handling VBLANK event */
2367
	val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL(dsi_trans));
2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386
	switch (val & TRANS_DDI_EDP_INPUT_MASK) {
	case TRANS_DDI_EDP_INPUT_A_ON:
		pipe = PIPE_A;
		break;
	case TRANS_DDI_EDP_INPUT_B_ONOFF:
		pipe = PIPE_B;
		break;
	case TRANS_DDI_EDP_INPUT_C_ONOFF:
		pipe = PIPE_C;
		break;
	default:
		drm_err(&dev_priv->drm, "Invalid PIPE\n");
		return;
	}

	intel_handle_vblank(dev_priv, pipe);

	/* clear TE in dsi IIR */
	port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A;
2387 2388
	tmp = intel_uncore_read(&dev_priv->uncore, DSI_INTR_IDENT_REG(port));
	intel_uncore_write(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), tmp);
2389 2390
}

2391 2392 2393 2394 2395 2396 2397 2398
static u32 gen8_de_pipe_flip_done_mask(struct drm_i915_private *i915)
{
	if (INTEL_GEN(i915) >= 9)
		return GEN9_PIPE_PLANE1_FLIP_DONE;
	else
		return GEN8_PIPE_PRIMARY_FLIP_DONE;
}

2399 2400
static irqreturn_t
gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2401 2402
{
	irqreturn_t ret = IRQ_NONE;
2403
	u32 iir;
2404
	enum pipe pipe;
J
Jesse Barnes 已提交
2405

2406
	if (master_ctl & GEN8_DE_MISC_IRQ) {
2407
		iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_MISC_IIR);
2408
		if (iir) {
2409
			intel_uncore_write(&dev_priv->uncore, GEN8_DE_MISC_IIR, iir);
2410
			ret = IRQ_HANDLED;
2411 2412
			gen8_de_misc_irq_handler(dev_priv, iir);
		} else {
2413 2414
			drm_err(&dev_priv->drm,
				"The master control interrupt lied (DE MISC)!\n");
2415
		}
2416 2417
	}

2418
	if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
2419
		iir = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IIR);
2420
		if (iir) {
2421
			intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IIR, iir);
2422 2423 2424
			ret = IRQ_HANDLED;
			gen11_hpd_irq_handler(dev_priv, iir);
		} else {
2425 2426
			drm_err(&dev_priv->drm,
				"The master control interrupt lied, (DE HPD)!\n");
2427 2428 2429
		}
	}

2430
	if (master_ctl & GEN8_DE_PORT_IRQ) {
2431
		iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IIR);
2432
		if (iir) {
2433
			bool found = false;
2434

2435
			intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IIR, iir);
2436
			ret = IRQ_HANDLED;
J
Jesse Barnes 已提交
2437

2438
			if (iir & gen8_de_port_aux_mask(dev_priv)) {
2439
				dp_aux_irq_handler(dev_priv);
2440 2441 2442
				found = true;
			}

2443
			if (IS_GEN9_LP(dev_priv)) {
V
Ville Syrjälä 已提交
2444 2445 2446 2447
				u32 hotplug_trigger = iir & BXT_DE_PORT_HOTPLUG_MASK;

				if (hotplug_trigger) {
					bxt_hpd_irq_handler(dev_priv, hotplug_trigger);
2448 2449 2450
					found = true;
				}
			} else if (IS_BROADWELL(dev_priv)) {
V
Ville Syrjälä 已提交
2451 2452 2453 2454
				u32 hotplug_trigger = iir & BDW_DE_PORT_HOTPLUG_MASK;

				if (hotplug_trigger) {
					ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
2455 2456
					found = true;
				}
2457 2458
			}

2459
			if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
2460
				gmbus_irq_handler(dev_priv);
S
Shashank Sharma 已提交
2461 2462 2463
				found = true;
			}

2464
			if (INTEL_GEN(dev_priv) >= 11) {
V
Ville Syrjälä 已提交
2465 2466 2467 2468
				u32 te_trigger = iir & (DSI0_TE | DSI1_TE);

				if (te_trigger) {
					gen11_dsi_te_interrupt_handler(dev_priv, te_trigger);
2469 2470 2471 2472
					found = true;
				}
			}

2473
			if (!found)
2474 2475
				drm_err(&dev_priv->drm,
					"Unexpected DE Port interrupt\n");
2476
		}
2477
		else
2478 2479
			drm_err(&dev_priv->drm,
				"The master control interrupt lied (DE PORT)!\n");
2480 2481
	}

2482
	for_each_pipe(dev_priv, pipe) {
2483
		u32 fault_errors;
2484

2485 2486
		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
			continue;
2487

2488
		iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe));
2489
		if (!iir) {
2490 2491
			drm_err(&dev_priv->drm,
				"The master control interrupt lied (DE PIPE)!\n");
2492 2493
			continue;
		}
2494

2495
		ret = IRQ_HANDLED;
2496
		intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe), iir);
2497

2498
		if (iir & GEN8_PIPE_VBLANK)
2499
			intel_handle_vblank(dev_priv, pipe);
2500

2501
		if (iir & gen8_de_pipe_flip_done_mask(dev_priv))
2502 2503
			flip_done_handler(dev_priv, pipe);

2504
		if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
2505
			hsw_pipe_crc_irq_handler(dev_priv, pipe);
2506

2507 2508
		if (iir & GEN8_PIPE_FIFO_UNDERRUN)
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2509

2510
		fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv);
2511
		if (fault_errors)
2512 2513 2514 2515
			drm_err(&dev_priv->drm,
				"Fault errors on pipe %c: 0x%08x\n",
				pipe_name(pipe),
				fault_errors);
2516 2517
	}

2518
	if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2519
	    master_ctl & GEN8_DE_PCH_IRQ) {
2520 2521 2522 2523 2524
		/*
		 * FIXME(BDW): Assume for now that the new interrupt handling
		 * scheme also closed the SDE interrupt handling race we've seen
		 * on older pch-split platforms. But this needs testing.
		 */
2525
		iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
2526
		if (iir) {
2527
			intel_uncore_write(&dev_priv->uncore, SDEIIR, iir);
2528
			ret = IRQ_HANDLED;
2529

2530 2531
			if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
				icp_irq_handler(dev_priv, iir);
2532
			else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
2533
				spt_irq_handler(dev_priv, iir);
2534
			else
2535
				cpt_irq_handler(dev_priv, iir);
2536 2537 2538 2539 2540
		} else {
			/*
			 * Like on previous PCH there seems to be something
			 * fishy going on with forwarding PCH interrupts.
			 */
2541 2542
			drm_dbg(&dev_priv->drm,
				"The master control interrupt lied (SDE)!\n");
2543
		}
2544 2545
	}

2546 2547 2548
	return ret;
}

2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566
static inline u32 gen8_master_intr_disable(void __iomem * const regs)
{
	raw_reg_write(regs, GEN8_MASTER_IRQ, 0);

	/*
	 * Now with master disabled, get a sample of level indications
	 * for this interrupt. Indications will be cleared on related acks.
	 * New indications can and will light up during processing,
	 * and will generate new interrupt after enabling master.
	 */
	return raw_reg_read(regs, GEN8_MASTER_IRQ);
}

static inline void gen8_master_intr_enable(void __iomem * const regs)
{
	raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
}

2567 2568
static irqreturn_t gen8_irq_handler(int irq, void *arg)
{
2569
	struct drm_i915_private *dev_priv = arg;
2570
	void __iomem * const regs = dev_priv->uncore.regs;
2571 2572 2573 2574 2575
	u32 master_ctl;

	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

2576 2577 2578
	master_ctl = gen8_master_intr_disable(regs);
	if (!master_ctl) {
		gen8_master_intr_enable(regs);
2579
		return IRQ_NONE;
2580
	}
2581

2582 2583
	/* Find, queue (onto bottom-halves), then clear each source */
	gen8_gt_irq_handler(&dev_priv->gt, master_ctl);
2584 2585 2586

	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
	if (master_ctl & ~GEN8_GT_IRQS) {
2587
		disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2588
		gen8_de_irq_handler(dev_priv, master_ctl);
2589
		enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2590
	}
2591

2592
	gen8_master_intr_enable(regs);
2593

2594 2595
	pmu_irq_stats(dev_priv, IRQ_HANDLED);

2596
	return IRQ_HANDLED;
2597 2598
}

2599
static u32
2600
gen11_gu_misc_irq_ack(struct intel_gt *gt, const u32 master_ctl)
2601
{
2602
	void __iomem * const regs = gt->uncore->regs;
2603
	u32 iir;
2604 2605

	if (!(master_ctl & GEN11_GU_MISC_IRQ))
2606 2607 2608 2609 2610
		return 0;

	iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
	if (likely(iir))
		raw_reg_write(regs, GEN11_GU_MISC_IIR, iir);
2611

2612
	return iir;
2613 2614 2615
}

static void
2616
gen11_gu_misc_irq_handler(struct intel_gt *gt, const u32 iir)
2617 2618
{
	if (iir & GEN11_GU_MISC_GSE)
2619
		intel_opregion_asle_intr(gt->i915);
2620 2621
}

2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639
static inline u32 gen11_master_intr_disable(void __iomem * const regs)
{
	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);

	/*
	 * Now with master disabled, get a sample of level indications
	 * for this interrupt. Indications will be cleared on related acks.
	 * New indications can and will light up during processing,
	 * and will generate new interrupt after enabling master.
	 */
	return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
}

static inline void gen11_master_intr_enable(void __iomem * const regs)
{
	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
}

2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658
static void
gen11_display_irq_handler(struct drm_i915_private *i915)
{
	void __iomem * const regs = i915->uncore.regs;
	const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);

	disable_rpm_wakeref_asserts(&i915->runtime_pm);
	/*
	 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
	 * for the display related bits.
	 */
	raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 0x0);
	gen8_de_irq_handler(i915, disp_ctl);
	raw_reg_write(regs, GEN11_DISPLAY_INT_CTL,
		      GEN11_DISPLAY_IRQ_ENABLE);

	enable_rpm_wakeref_asserts(&i915->runtime_pm);
}

2659 2660 2661 2662
static __always_inline irqreturn_t
__gen11_irq_handler(struct drm_i915_private * const i915,
		    u32 (*intr_disable)(void __iomem * const regs),
		    void (*intr_enable)(void __iomem * const regs))
M
Mika Kuoppala 已提交
2663
{
2664
	void __iomem * const regs = i915->uncore.regs;
2665
	struct intel_gt *gt = &i915->gt;
M
Mika Kuoppala 已提交
2666
	u32 master_ctl;
2667
	u32 gu_misc_iir;
M
Mika Kuoppala 已提交
2668 2669 2670 2671

	if (!intel_irqs_enabled(i915))
		return IRQ_NONE;

2672
	master_ctl = intr_disable(regs);
2673
	if (!master_ctl) {
2674
		intr_enable(regs);
M
Mika Kuoppala 已提交
2675
		return IRQ_NONE;
2676
	}
M
Mika Kuoppala 已提交
2677

2678
	/* Find, queue (onto bottom-halves), then clear each source */
2679
	gen11_gt_irq_handler(gt, master_ctl);
M
Mika Kuoppala 已提交
2680 2681

	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2682 2683
	if (master_ctl & GEN11_DISPLAY_IRQ)
		gen11_display_irq_handler(i915);
M
Mika Kuoppala 已提交
2684

2685
	gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl);
2686

2687
	intr_enable(regs);
M
Mika Kuoppala 已提交
2688

2689
	gen11_gu_misc_irq_handler(gt, gu_misc_iir);
2690

2691 2692
	pmu_irq_stats(i915, IRQ_HANDLED);

M
Mika Kuoppala 已提交
2693 2694 2695
	return IRQ_HANDLED;
}

2696 2697 2698 2699 2700 2701 2702
static irqreturn_t gen11_irq_handler(int irq, void *arg)
{
	return __gen11_irq_handler(arg,
				   gen11_master_intr_disable,
				   gen11_master_intr_enable);
}

2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742
static u32 dg1_master_intr_disable_and_ack(void __iomem * const regs)
{
	u32 val;

	/* First disable interrupts */
	raw_reg_write(regs, DG1_MSTR_UNIT_INTR, 0);

	/* Get the indication levels and ack the master unit */
	val = raw_reg_read(regs, DG1_MSTR_UNIT_INTR);
	if (unlikely(!val))
		return 0;

	raw_reg_write(regs, DG1_MSTR_UNIT_INTR, val);

	/*
	 * Now with master disabled, get a sample of level indications
	 * for this interrupt and ack them right away - we keep GEN11_MASTER_IRQ
	 * out as this bit doesn't exist anymore for DG1
	 */
	val = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ) & ~GEN11_MASTER_IRQ;
	if (unlikely(!val))
		return 0;

	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, val);

	return val;
}

static inline void dg1_master_intr_enable(void __iomem * const regs)
{
	raw_reg_write(regs, DG1_MSTR_UNIT_INTR, DG1_MSTR_IRQ);
}

static irqreturn_t dg1_irq_handler(int irq, void *arg)
{
	return __gen11_irq_handler(arg,
				   dg1_master_intr_disable_and_ack,
				   dg1_master_intr_enable);
}

2743 2744 2745
/* Called from drm generic code, passed 'crtc' which
 * we use as a pipe index
 */
2746
int i8xx_enable_vblank(struct drm_crtc *crtc)
2747
{
2748 2749
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2750
	unsigned long irqflags;
2751

2752
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2753
	i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2754
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2755

2756 2757 2758
	return 0;
}

2759
int i915gm_enable_vblank(struct drm_crtc *crtc)
2760
{
2761
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2762

2763 2764 2765 2766 2767 2768 2769
	/*
	 * Vblank interrupts fail to wake the device up from C2+.
	 * Disabling render clock gating during C-states avoids
	 * the problem. There is a small power cost so we do this
	 * only when vblank interrupts are actually enabled.
	 */
	if (dev_priv->vblank_enabled++ == 0)
2770
		intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
2771

2772
	return i8xx_enable_vblank(crtc);
2773 2774
}

2775
int i965_enable_vblank(struct drm_crtc *crtc)
2776
{
2777 2778
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2779 2780 2781
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2782 2783
	i915_enable_pipestat(dev_priv, pipe,
			     PIPE_START_VBLANK_INTERRUPT_STATUS);
2784 2785 2786 2787 2788
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);

	return 0;
}

2789
int ilk_enable_vblank(struct drm_crtc *crtc)
J
Jesse Barnes 已提交
2790
{
2791 2792
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
J
Jesse Barnes 已提交
2793
	unsigned long irqflags;
2794
	u32 bit = INTEL_GEN(dev_priv) >= 7 ?
2795
		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
J
Jesse Barnes 已提交
2796 2797

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2798
	ilk_enable_display_irq(dev_priv, bit);
J
Jesse Barnes 已提交
2799 2800
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);

2801 2802 2803 2804
	/* Even though there is no DMC, frame counter can get stuck when
	 * PSR is active as no frames are generated.
	 */
	if (HAS_PSR(dev_priv))
2805
		drm_crtc_vblank_restore(crtc);
2806

J
Jesse Barnes 已提交
2807 2808 2809
	return 0;
}

2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826
static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc,
				   bool enable)
{
	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
	enum port port;
	u32 tmp;

	if (!(intel_crtc->mode_flags &
	    (I915_MODE_FLAG_DSI_USE_TE1 | I915_MODE_FLAG_DSI_USE_TE0)))
		return false;

	/* for dual link cases we consider TE from slave */
	if (intel_crtc->mode_flags & I915_MODE_FLAG_DSI_USE_TE1)
		port = PORT_B;
	else
		port = PORT_A;

2827
	tmp =  intel_uncore_read(&dev_priv->uncore, DSI_INTR_MASK_REG(port));
2828 2829 2830 2831 2832
	if (enable)
		tmp &= ~DSI_TE_EVENT;
	else
		tmp |= DSI_TE_EVENT;

2833
	intel_uncore_write(&dev_priv->uncore, DSI_INTR_MASK_REG(port), tmp);
2834

2835 2836
	tmp = intel_uncore_read(&dev_priv->uncore, DSI_INTR_IDENT_REG(port));
	intel_uncore_write(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), tmp);
2837 2838 2839 2840

	return true;
}

2841
int bdw_enable_vblank(struct drm_crtc *crtc)
2842
{
2843
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2844 2845
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
	enum pipe pipe = intel_crtc->pipe;
2846 2847
	unsigned long irqflags;

2848 2849 2850
	if (gen11_dsi_configure_te(intel_crtc, true))
		return 0;

2851
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2852
	bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2853
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2854

2855 2856 2857 2858
	/* Even if there is no DMC, frame counter can get stuck when
	 * PSR is active as no frames are generated, so check only for PSR.
	 */
	if (HAS_PSR(dev_priv))
2859
		drm_crtc_vblank_restore(crtc);
2860

2861 2862 2863
	return 0;
}

2864 2865 2866
/* Called from drm generic code, passed 'crtc' which
 * we use as a pipe index
 */
2867
void i8xx_disable_vblank(struct drm_crtc *crtc)
2868
{
2869 2870
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2871
	unsigned long irqflags;
2872

2873
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2874
	i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2875 2876 2877
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

2878
void i915gm_disable_vblank(struct drm_crtc *crtc)
2879
{
2880
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2881

2882
	i8xx_disable_vblank(crtc);
2883

2884
	if (--dev_priv->vblank_enabled == 0)
2885
		intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
2886 2887
}

2888
void i965_disable_vblank(struct drm_crtc *crtc)
2889
{
2890 2891
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2892 2893 2894
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2895 2896
	i915_disable_pipestat(dev_priv, pipe,
			      PIPE_START_VBLANK_INTERRUPT_STATUS);
2897 2898 2899
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

2900
void ilk_disable_vblank(struct drm_crtc *crtc)
J
Jesse Barnes 已提交
2901
{
2902 2903
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
J
Jesse Barnes 已提交
2904
	unsigned long irqflags;
2905
	u32 bit = INTEL_GEN(dev_priv) >= 7 ?
2906
		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
J
Jesse Barnes 已提交
2907 2908

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2909
	ilk_disable_display_irq(dev_priv, bit);
J
Jesse Barnes 已提交
2910 2911 2912
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

2913
void bdw_disable_vblank(struct drm_crtc *crtc)
2914
{
2915
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2916 2917
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
	enum pipe pipe = intel_crtc->pipe;
2918 2919
	unsigned long irqflags;

2920 2921 2922
	if (gen11_dsi_configure_te(intel_crtc, false))
		return;

2923
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2924
	bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2925 2926 2927
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

2928
static void ibx_irq_reset(struct drm_i915_private *dev_priv)
P
Paulo Zanoni 已提交
2929
{
2930 2931
	struct intel_uncore *uncore = &dev_priv->uncore;

2932
	if (HAS_PCH_NOP(dev_priv))
P
Paulo Zanoni 已提交
2933 2934
		return;

2935
	GEN3_IRQ_RESET(uncore, SDE);
2936

2937
	if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
2938
		intel_uncore_write(&dev_priv->uncore, SERR_INT, 0xffffffff);
P
Paulo Zanoni 已提交
2939
}
2940

2941 2942
static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
{
2943 2944
	struct intel_uncore *uncore = &dev_priv->uncore;

2945
	if (IS_CHERRYVIEW(dev_priv))
2946
		intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
2947
	else
2948
		intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK);
2949

2950
	i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
2951
	intel_uncore_write(uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
2952

2953
	i9xx_pipestat_irq_reset(dev_priv);
2954

2955
	GEN3_IRQ_RESET(uncore, VLV_);
2956
	dev_priv->irq_mask = ~0u;
2957 2958
}

2959 2960
static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
{
2961 2962
	struct intel_uncore *uncore = &dev_priv->uncore;

2963
	u32 pipestat_mask;
2964
	u32 enable_mask;
2965 2966
	enum pipe pipe;

2967
	pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
2968 2969 2970 2971 2972

	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
	for_each_pipe(dev_priv, pipe)
		i915_enable_pipestat(dev_priv, pipe, pipestat_mask);

2973 2974
	enable_mask = I915_DISPLAY_PORT_INTERRUPT |
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2975 2976 2977 2978
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
		I915_LPE_PIPE_A_INTERRUPT |
		I915_LPE_PIPE_B_INTERRUPT;

2979
	if (IS_CHERRYVIEW(dev_priv))
2980 2981
		enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
			I915_LPE_PIPE_C_INTERRUPT;
2982

2983
	drm_WARN_ON(&dev_priv->drm, dev_priv->irq_mask != ~0u);
2984

2985 2986
	dev_priv->irq_mask = ~enable_mask;

2987
	GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask);
2988 2989 2990 2991
}

/* drm_dma.h hooks
*/
2992
static void ilk_irq_reset(struct drm_i915_private *dev_priv)
2993
{
2994
	struct intel_uncore *uncore = &dev_priv->uncore;
2995

2996
	GEN3_IRQ_RESET(uncore, DE);
2997 2998
	dev_priv->irq_mask = ~0u;

2999
	if (IS_GEN(dev_priv, 7))
3000
		intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff);
3001

3002
	if (IS_HASWELL(dev_priv)) {
3003 3004
		intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
		intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
3005 3006
	}

3007
	gen5_gt_irq_reset(&dev_priv->gt);
3008

3009
	ibx_irq_reset(dev_priv);
3010 3011
}

3012
static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
J
Jesse Barnes 已提交
3013
{
3014 3015
	intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
	intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
3016

3017
	gen5_gt_irq_reset(&dev_priv->gt);
J
Jesse Barnes 已提交
3018

3019
	spin_lock_irq(&dev_priv->irq_lock);
3020 3021
	if (dev_priv->display_irqs_enabled)
		vlv_display_irq_reset(dev_priv);
3022
	spin_unlock_irq(&dev_priv->irq_lock);
J
Jesse Barnes 已提交
3023 3024
}

3025
static void gen8_irq_reset(struct drm_i915_private *dev_priv)
3026
{
3027
	struct intel_uncore *uncore = &dev_priv->uncore;
3028
	enum pipe pipe;
3029

3030
	gen8_master_intr_disable(dev_priv->uncore.regs);
3031

3032
	gen8_gt_irq_reset(&dev_priv->gt);
3033

3034 3035
	intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
	intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
3036

3037
	for_each_pipe(dev_priv, pipe)
3038 3039
		if (intel_display_power_is_enabled(dev_priv,
						   POWER_DOMAIN_PIPE(pipe)))
3040
			GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3041

3042 3043 3044
	GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
	GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3045

3046
	if (HAS_PCH_SPLIT(dev_priv))
3047
		ibx_irq_reset(dev_priv);
3048
}
3049

3050
static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
M
Mika Kuoppala 已提交
3051
{
3052
	struct intel_uncore *uncore = &dev_priv->uncore;
3053
	enum pipe pipe;
3054 3055
	u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
		BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
M
Mika Kuoppala 已提交
3056

3057
	intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0);
M
Mika Kuoppala 已提交
3058

3059 3060 3061
	if (INTEL_GEN(dev_priv) >= 12) {
		enum transcoder trans;

3062
		for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075
			enum intel_display_power_domain domain;

			domain = POWER_DOMAIN_TRANSCODER(trans);
			if (!intel_display_power_is_enabled(dev_priv, domain))
				continue;

			intel_uncore_write(uncore, TRANS_PSR_IMR(trans), 0xffffffff);
			intel_uncore_write(uncore, TRANS_PSR_IIR(trans), 0xffffffff);
		}
	} else {
		intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
		intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
	}
3076

M
Mika Kuoppala 已提交
3077 3078 3079
	for_each_pipe(dev_priv, pipe)
		if (intel_display_power_is_enabled(dev_priv,
						   POWER_DOMAIN_PIPE(pipe)))
3080
			GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
M
Mika Kuoppala 已提交
3081

3082 3083 3084
	GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
	GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
	GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_);
3085

3086
	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3087
		GEN3_IRQ_RESET(uncore, SDE);
M
Matt Roper 已提交
3088

3089 3090 3091 3092
	/* Wa_14010685332:cnp/cmp,tgp,adp */
	if (INTEL_PCH_TYPE(dev_priv) == PCH_CNP ||
	    (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP &&
	     INTEL_PCH_TYPE(dev_priv) < PCH_DG1)) {
M
Matt Roper 已提交
3093 3094 3095 3096 3097
		intel_uncore_rmw(uncore, SOUTH_CHICKEN1,
				 SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
		intel_uncore_rmw(uncore, SOUTH_CHICKEN1,
				 SBCLK_RUN_REFCLK_DIS, 0);
	}
M
Mika Kuoppala 已提交
3098 3099
}

3100 3101 3102 3103
static void gen11_irq_reset(struct drm_i915_private *dev_priv)
{
	struct intel_uncore *uncore = &dev_priv->uncore;

3104 3105 3106 3107
	if (HAS_MASTER_UNIT_IRQ(dev_priv))
		dg1_master_intr_disable_and_ack(dev_priv->uncore.regs);
	else
		gen11_master_intr_disable(dev_priv->uncore.regs);
3108 3109 3110 3111 3112 3113 3114 3115

	gen11_gt_irq_reset(&dev_priv->gt);
	gen11_display_irq_reset(dev_priv);

	GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
}

3116
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3117
				     u8 pipe_mask)
3118
{
3119
	struct intel_uncore *uncore = &dev_priv->uncore;
3120 3121
	u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN |
		gen8_de_pipe_flip_done_mask(dev_priv);
3122
	enum pipe pipe;
3123

3124
	spin_lock_irq(&dev_priv->irq_lock);
3125 3126 3127 3128 3129 3130

	if (!intel_irqs_enabled(dev_priv)) {
		spin_unlock_irq(&dev_priv->irq_lock);
		return;
	}

3131
	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3132
		GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
3133 3134
				  dev_priv->de_irq_mask[pipe],
				  ~dev_priv->de_irq_mask[pipe] | extra_ier);
3135

3136
	spin_unlock_irq(&dev_priv->irq_lock);
3137 3138
}

3139
void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
3140
				     u8 pipe_mask)
3141
{
3142
	struct intel_uncore *uncore = &dev_priv->uncore;
3143 3144
	enum pipe pipe;

3145
	spin_lock_irq(&dev_priv->irq_lock);
3146 3147 3148 3149 3150 3151

	if (!intel_irqs_enabled(dev_priv)) {
		spin_unlock_irq(&dev_priv->irq_lock);
		return;
	}

3152
	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3153
		GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3154

3155 3156 3157
	spin_unlock_irq(&dev_priv->irq_lock);

	/* make sure we're done processing display irqs */
3158
	intel_synchronize_irq(dev_priv);
3159 3160
}

3161
static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
3162
{
3163
	struct intel_uncore *uncore = &dev_priv->uncore;
3164

3165 3166
	intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0);
	intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
3167

3168
	gen8_gt_irq_reset(&dev_priv->gt);
3169

3170
	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3171

3172
	spin_lock_irq(&dev_priv->irq_lock);
3173 3174
	if (dev_priv->display_irqs_enabled)
		vlv_display_irq_reset(dev_priv);
3175
	spin_unlock_irq(&dev_priv->irq_lock);
3176 3177
}

3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202
static u32 ibx_hotplug_enables(struct drm_i915_private *i915,
			       enum hpd_pin pin)
{
	switch (pin) {
	case HPD_PORT_A:
		/*
		 * When CPU and PCH are on the same package, port A
		 * HPD must be enabled in both north and south.
		 */
		return HAS_PCH_LPT_LP(i915) ?
			PORTA_HOTPLUG_ENABLE : 0;
	case HPD_PORT_B:
		return PORTB_HOTPLUG_ENABLE |
			PORTB_PULSE_DURATION_2ms;
	case HPD_PORT_C:
		return PORTC_HOTPLUG_ENABLE |
			PORTC_PULSE_DURATION_2ms;
	case HPD_PORT_D:
		return PORTD_HOTPLUG_ENABLE |
			PORTD_PULSE_DURATION_2ms;
	default:
		return 0;
	}
}

3203
static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
3204
{
3205
	u32 hotplug;
3206 3207 3208

	/*
	 * Enable digital hotplug on the PCH, and configure the DP short pulse
3209 3210
	 * duration to 2ms (which is the minimum in the Display Port spec).
	 * The pulse duration bits are reserved on LPT+.
3211
	 */
3212
	hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
3213 3214 3215 3216 3217
	hotplug &= ~(PORTA_HOTPLUG_ENABLE |
		     PORTB_HOTPLUG_ENABLE |
		     PORTC_HOTPLUG_ENABLE |
		     PORTD_HOTPLUG_ENABLE |
		     PORTB_PULSE_DURATION_MASK |
3218 3219
		     PORTC_PULSE_DURATION_MASK |
		     PORTD_PULSE_DURATION_MASK);
3220
	hotplug |= intel_hpd_hotplug_enables(dev_priv, ibx_hotplug_enables);
3221
	intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, hotplug);
3222
}
X
Xiong Zhang 已提交
3223

3224 3225 3226 3227
static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug_irqs, enabled_irqs;

3228
	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3229
	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3230 3231 3232 3233 3234 3235

	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);

	ibx_hpd_detection_setup(dev_priv);
}

3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266
static u32 icp_ddi_hotplug_enables(struct drm_i915_private *i915,
				   enum hpd_pin pin)
{
	switch (pin) {
	case HPD_PORT_A:
	case HPD_PORT_B:
	case HPD_PORT_C:
	case HPD_PORT_D:
		return SHOTPLUG_CTL_DDI_HPD_ENABLE(pin);
	default:
		return 0;
	}
}

static u32 icp_tc_hotplug_enables(struct drm_i915_private *i915,
				  enum hpd_pin pin)
{
	switch (pin) {
	case HPD_PORT_TC1:
	case HPD_PORT_TC2:
	case HPD_PORT_TC3:
	case HPD_PORT_TC4:
	case HPD_PORT_TC5:
	case HPD_PORT_TC6:
		return ICP_TC_HPD_ENABLE(pin);
	default:
		return 0;
	}
}

static void icp_ddi_hpd_detection_setup(struct drm_i915_private *dev_priv)
3267 3268 3269
{
	u32 hotplug;

3270
	hotplug = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_DDI);
3271 3272 3273 3274 3275
	hotplug &= ~(SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_A) |
		     SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_B) |
		     SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_C) |
		     SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_D));
	hotplug |= intel_hpd_hotplug_enables(dev_priv, icp_ddi_hotplug_enables);
3276
	intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_DDI, hotplug);
3277
}
3278

3279
static void icp_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
3280 3281 3282
{
	u32 hotplug;

3283
	hotplug = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_TC);
3284 3285 3286 3287 3288 3289 3290
	hotplug &= ~(ICP_TC_HPD_ENABLE(HPD_PORT_TC1) |
		     ICP_TC_HPD_ENABLE(HPD_PORT_TC2) |
		     ICP_TC_HPD_ENABLE(HPD_PORT_TC3) |
		     ICP_TC_HPD_ENABLE(HPD_PORT_TC4) |
		     ICP_TC_HPD_ENABLE(HPD_PORT_TC5) |
		     ICP_TC_HPD_ENABLE(HPD_PORT_TC6));
	hotplug |= intel_hpd_hotplug_enables(dev_priv, icp_tc_hotplug_enables);
3291
	intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_TC, hotplug);
3292 3293
}

3294
static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
3295 3296 3297
{
	u32 hotplug_irqs, enabled_irqs;

3298
	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3299
	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3300

3301
	if (INTEL_PCH_TYPE(dev_priv) <= PCH_TGP)
3302
		intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
3303

3304 3305
	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);

3306 3307
	icp_ddi_hpd_detection_setup(dev_priv);
	icp_tc_hpd_detection_setup(dev_priv);
3308 3309
}

3310 3311
static u32 gen11_hotplug_enables(struct drm_i915_private *i915,
				 enum hpd_pin pin)
3312
{
3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323
	switch (pin) {
	case HPD_PORT_TC1:
	case HPD_PORT_TC2:
	case HPD_PORT_TC3:
	case HPD_PORT_TC4:
	case HPD_PORT_TC5:
	case HPD_PORT_TC6:
		return GEN11_HOTPLUG_CTL_ENABLE(pin);
	default:
		return 0;
	}
M
Matt Roper 已提交
3324 3325
}

3326 3327
static void dg1_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
C
Clinton A Taylor 已提交
3328 3329
	u32 val;

3330
	val = intel_uncore_read(&dev_priv->uncore, SOUTH_CHICKEN1);
C
Clinton A Taylor 已提交
3331 3332 3333 3334
	val |= (INVERT_DDIA_HPD |
		INVERT_DDIB_HPD |
		INVERT_DDIC_HPD |
		INVERT_DDID_HPD);
3335
	intel_uncore_write(&dev_priv->uncore, SOUTH_CHICKEN1, val);
C
Clinton A Taylor 已提交
3336

3337
	icp_hpd_irq_setup(dev_priv);
3338 3339
}

3340
static void gen11_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
3341 3342 3343
{
	u32 hotplug;

3344
	hotplug = intel_uncore_read(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL);
3345 3346 3347 3348 3349 3350 3351
	hotplug &= ~(GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) |
		     GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) |
		     GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) |
		     GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC4) |
		     GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) |
		     GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6));
	hotplug |= intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables);
3352
	intel_uncore_write(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, hotplug);
3353 3354 3355 3356 3357
}

static void gen11_tbt_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug;
3358

3359
	hotplug = intel_uncore_read(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL);
3360 3361 3362 3363 3364 3365 3366
	hotplug &= ~(GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) |
		     GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) |
		     GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) |
		     GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC4) |
		     GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) |
		     GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6));
	hotplug |= intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables);
3367
	intel_uncore_write(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, hotplug);
3368 3369 3370 3371 3372 3373 3374
}

static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug_irqs, enabled_irqs;
	u32 val;

3375
	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
3376
	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd);
3377

3378
	val = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IMR);
3379
	val &= ~hotplug_irqs;
3380
	val |= ~enabled_irqs & hotplug_irqs;
3381 3382
	intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IMR, val);
	intel_uncore_posting_read(&dev_priv->uncore, GEN11_DE_HPD_IMR);
3383

3384 3385
	gen11_tc_hpd_detection_setup(dev_priv);
	gen11_tbt_hpd_detection_setup(dev_priv);
3386

3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416
	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
		icp_hpd_irq_setup(dev_priv);
}

static u32 spt_hotplug_enables(struct drm_i915_private *i915,
			       enum hpd_pin pin)
{
	switch (pin) {
	case HPD_PORT_A:
		return PORTA_HOTPLUG_ENABLE;
	case HPD_PORT_B:
		return PORTB_HOTPLUG_ENABLE;
	case HPD_PORT_C:
		return PORTC_HOTPLUG_ENABLE;
	case HPD_PORT_D:
		return PORTD_HOTPLUG_ENABLE;
	default:
		return 0;
	}
}

static u32 spt_hotplug2_enables(struct drm_i915_private *i915,
				enum hpd_pin pin)
{
	switch (pin) {
	case HPD_PORT_E:
		return PORTE_HOTPLUG_ENABLE;
	default:
		return 0;
	}
3417 3418
}

3419
static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3420
{
3421 3422 3423 3424
	u32 val, hotplug;

	/* Display WA #1179 WaHardHangonHotPlug: cnp */
	if (HAS_PCH_CNP(dev_priv)) {
3425
		val = intel_uncore_read(&dev_priv->uncore, SOUTH_CHICKEN1);
3426 3427
		val &= ~CHASSIS_CLK_REQ_DURATION_MASK;
		val |= CHASSIS_CLK_REQ_DURATION(0xf);
3428
		intel_uncore_write(&dev_priv->uncore, SOUTH_CHICKEN1, val);
3429
	}
3430 3431

	/* Enable digital hotplug on the PCH */
3432
	hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
3433 3434 3435 3436 3437
	hotplug &= ~(PORTA_HOTPLUG_ENABLE |
		     PORTB_HOTPLUG_ENABLE |
		     PORTC_HOTPLUG_ENABLE |
		     PORTD_HOTPLUG_ENABLE);
	hotplug |= intel_hpd_hotplug_enables(dev_priv, spt_hotplug_enables);
3438
	intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, hotplug);
3439

3440
	hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG2);
3441 3442
	hotplug &= ~PORTE_HOTPLUG_ENABLE;
	hotplug |= intel_hpd_hotplug_enables(dev_priv, spt_hotplug2_enables);
3443
	intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG2, hotplug);
3444 3445
}

3446 3447 3448 3449
static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug_irqs, enabled_irqs;

3450
	if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
3451
		intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
3452

3453
	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3454
	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3455 3456 3457 3458 3459 3460

	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);

	spt_hpd_detection_setup(dev_priv);
}

3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472
static u32 ilk_hotplug_enables(struct drm_i915_private *i915,
			       enum hpd_pin pin)
{
	switch (pin) {
	case HPD_PORT_A:
		return DIGITAL_PORTA_HOTPLUG_ENABLE |
			DIGITAL_PORTA_PULSE_DURATION_2ms;
	default:
		return 0;
	}
}

3473 3474 3475 3476 3477 3478 3479 3480 3481
static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug;

	/*
	 * Enable digital hotplug on the CPU, and configure the DP short pulse
	 * duration to 2ms (which is the minimum in the Display Port spec)
	 * The pulse duration bits are reserved on HSW+.
	 */
3482
	hotplug = intel_uncore_read(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL);
3483 3484 3485
	hotplug &= ~(DIGITAL_PORTA_HOTPLUG_ENABLE |
		     DIGITAL_PORTA_PULSE_DURATION_MASK);
	hotplug |= intel_hpd_hotplug_enables(dev_priv, ilk_hotplug_enables);
3486
	intel_uncore_write(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3487 3488
}

3489
static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3490
{
3491
	u32 hotplug_irqs, enabled_irqs;
3492

3493 3494
	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd);
3495

3496
	if (INTEL_GEN(dev_priv) >= 8)
3497
		bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3498
	else
3499
		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3500

3501
	ilk_hpd_detection_setup(dev_priv);
3502

3503
	ibx_hpd_irq_setup(dev_priv);
3504 3505
}

3506 3507
static u32 bxt_hotplug_enables(struct drm_i915_private *i915,
			       enum hpd_pin pin)
3508
{
3509
	u32 hotplug;
3510

3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530
	switch (pin) {
	case HPD_PORT_A:
		hotplug = PORTA_HOTPLUG_ENABLE;
		if (intel_bios_is_port_hpd_inverted(i915, PORT_A))
			hotplug |= BXT_DDIA_HPD_INVERT;
		return hotplug;
	case HPD_PORT_B:
		hotplug = PORTB_HOTPLUG_ENABLE;
		if (intel_bios_is_port_hpd_inverted(i915, PORT_B))
			hotplug |= BXT_DDIB_HPD_INVERT;
		return hotplug;
	case HPD_PORT_C:
		hotplug = PORTC_HOTPLUG_ENABLE;
		if (intel_bios_is_port_hpd_inverted(i915, PORT_C))
			hotplug |= BXT_DDIC_HPD_INVERT;
		return hotplug;
	default:
		return 0;
	}
}
3531

3532 3533 3534
static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug;
3535

3536
	hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
3537 3538 3539 3540 3541 3542 3543
	hotplug &= ~(PORTA_HOTPLUG_ENABLE |
		     PORTB_HOTPLUG_ENABLE |
		     PORTC_HOTPLUG_ENABLE |
		     BXT_DDIA_HPD_INVERT |
		     BXT_DDIB_HPD_INVERT |
		     BXT_DDIC_HPD_INVERT);
	hotplug |= intel_hpd_hotplug_enables(dev_priv, bxt_hotplug_enables);
3544
	intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, hotplug);
3545 3546
}

3547 3548 3549 3550
static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug_irqs, enabled_irqs;

3551
	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
3552
	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd);
3553 3554 3555

	bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);

3556
	bxt_hpd_detection_setup(dev_priv);
3557 3558
}

3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569
/*
 * SDEIER is also touched by the interrupt handler to work around missed PCH
 * interrupts. Hence we can't update it after the interrupt handler is enabled -
 * instead we unconditionally enable all PCH interrupt sources here, but then
 * only unmask them as needed with SDEIMR.
 *
 * Note that we currently do this after installing the interrupt handler,
 * but before we enable the master interrupt. That should be sufficient
 * to avoid races with the irq handler, assuming we have MSI. Shared legacy
 * interrupts could still race.
 */
3570
static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
P
Paulo Zanoni 已提交
3571
{
3572
	struct intel_uncore *uncore = &dev_priv->uncore;
3573
	u32 mask;
3574

3575
	if (HAS_PCH_NOP(dev_priv))
D
Daniel Vetter 已提交
3576 3577
		return;

3578
	if (HAS_PCH_IBX(dev_priv))
3579
		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3580
	else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3581
		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3582 3583
	else
		mask = SDE_GMBUS_CPT;
3584

3585
	GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff);
P
Paulo Zanoni 已提交
3586 3587
}

3588
static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
3589
{
3590
	struct intel_uncore *uncore = &dev_priv->uncore;
3591 3592
	u32 display_mask, extra_mask;

3593
	if (INTEL_GEN(dev_priv) >= 7) {
3594
		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3595
				DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
3596
		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3597
			      DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3598 3599 3600
			      DE_PLANE_FLIP_DONE_IVB(PLANE_C) |
			      DE_PLANE_FLIP_DONE_IVB(PLANE_B) |
			      DE_PLANE_FLIP_DONE_IVB(PLANE_A) |
3601
			      DE_DP_A_HOTPLUG_IVB);
3602 3603
	} else {
		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3604 3605
				DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
				DE_PIPEA_CRC_DONE | DE_POISON);
3606
		extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK |
3607
			      DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3608 3609
			      DE_PLANE_FLIP_DONE(PLANE_A) |
			      DE_PLANE_FLIP_DONE(PLANE_B) |
3610
			      DE_DP_A_HOTPLUG);
3611
	}
3612

3613
	if (IS_HASWELL(dev_priv)) {
3614
		gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
3615 3616 3617
		display_mask |= DE_EDP_PSR_INT_HSW;
	}

3618 3619 3620
	if (IS_IRONLAKE_M(dev_priv))
		extra_mask |= DE_PCU_EVENT;

3621
	dev_priv->irq_mask = ~display_mask;
3622

3623
	ibx_irq_postinstall(dev_priv);
P
Paulo Zanoni 已提交
3624

3625 3626
	gen5_gt_irq_postinstall(&dev_priv->gt);

3627 3628
	GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
		      display_mask | extra_mask);
3629 3630
}

3631 3632
void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
{
3633
	lockdep_assert_held(&dev_priv->irq_lock);
3634 3635 3636 3637 3638 3639

	if (dev_priv->display_irqs_enabled)
		return;

	dev_priv->display_irqs_enabled = true;

3640 3641
	if (intel_irqs_enabled(dev_priv)) {
		vlv_display_irq_reset(dev_priv);
3642
		vlv_display_irq_postinstall(dev_priv);
3643
	}
3644 3645 3646 3647
}

void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
{
3648
	lockdep_assert_held(&dev_priv->irq_lock);
3649 3650 3651 3652 3653 3654

	if (!dev_priv->display_irqs_enabled)
		return;

	dev_priv->display_irqs_enabled = false;

3655
	if (intel_irqs_enabled(dev_priv))
3656
		vlv_display_irq_reset(dev_priv);
3657 3658
}

3659

3660
static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
3661
{
3662
	gen5_gt_irq_postinstall(&dev_priv->gt);
J
Jesse Barnes 已提交
3663

3664
	spin_lock_irq(&dev_priv->irq_lock);
3665 3666
	if (dev_priv->display_irqs_enabled)
		vlv_display_irq_postinstall(dev_priv);
3667 3668
	spin_unlock_irq(&dev_priv->irq_lock);

3669 3670
	intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
	intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
3671 3672
}

3673 3674
static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
{
3675 3676
	struct intel_uncore *uncore = &dev_priv->uncore;

3677 3678
	u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) |
		GEN8_PIPE_CDCLK_CRC_DONE;
3679
	u32 de_pipe_enables;
3680
	u32 de_port_masked = gen8_de_port_aux_mask(dev_priv);
3681
	u32 de_port_enables;
3682
	u32 de_misc_masked = GEN8_DE_EDP_PSR;
3683 3684
	u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
		BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
3685
	enum pipe pipe;
3686

3687 3688 3689
	if (INTEL_GEN(dev_priv) <= 10)
		de_misc_masked |= GEN8_DE_MISC_GSE;

3690 3691
	if (IS_GEN9_LP(dev_priv))
		de_port_masked |= BXT_DE_PORT_GMBUS;
R
Rodrigo Vivi 已提交
3692

3693 3694 3695 3696 3697 3698 3699
	if (INTEL_GEN(dev_priv) >= 11) {
		enum port port;

		if (intel_bios_is_dsi_present(dev_priv, &port))
			de_port_masked |= DSI0_TE | DSI1_TE;
	}

3700 3701 3702
	de_pipe_enables = de_pipe_masked |
		GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN |
		gen8_de_pipe_flip_done_mask(dev_priv);
3703

3704
	de_port_enables = de_port_masked;
3705
	if (IS_GEN9_LP(dev_priv))
3706 3707
		de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
	else if (IS_BROADWELL(dev_priv))
3708
		de_port_enables |= BDW_DE_PORT_HOTPLUG_MASK;
3709

3710 3711 3712
	if (INTEL_GEN(dev_priv) >= 12) {
		enum transcoder trans;

3713
		for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724
			enum intel_display_power_domain domain;

			domain = POWER_DOMAIN_TRANSCODER(trans);
			if (!intel_display_power_is_enabled(dev_priv, domain))
				continue;

			gen3_assert_iir_is_zero(uncore, TRANS_PSR_IIR(trans));
		}
	} else {
		gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
	}
3725

M
Mika Kahola 已提交
3726 3727
	for_each_pipe(dev_priv, pipe) {
		dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
3728

3729
		if (intel_display_power_is_enabled(dev_priv,
3730
				POWER_DOMAIN_PIPE(pipe)))
3731
			GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
3732 3733
					  dev_priv->de_irq_mask[pipe],
					  de_pipe_enables);
M
Mika Kahola 已提交
3734
	}
3735

3736 3737
	GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
	GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
3738

3739 3740
	if (INTEL_GEN(dev_priv) >= 11) {
		u32 de_hpd_masked = 0;
3741 3742
		u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
				     GEN11_DE_TBT_HOTPLUG_MASK;
3743

3744 3745
		GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked,
			      de_hpd_enables);
3746
	}
3747 3748
}

3749
static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
3750
{
3751
	if (HAS_PCH_SPLIT(dev_priv))
3752
		ibx_irq_postinstall(dev_priv);
P
Paulo Zanoni 已提交
3753

3754
	gen8_gt_irq_postinstall(&dev_priv->gt);
3755 3756
	gen8_de_irq_postinstall(dev_priv);

3757
	gen8_master_intr_enable(dev_priv->uncore.regs);
3758 3759
}

3760
static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
3761
{
3762
	struct intel_uncore *uncore = &dev_priv->uncore;
3763 3764
	u32 mask = SDE_GMBUS_ICP;

3765
	GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff);
3766 3767
}

3768
static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
M
Mika Kuoppala 已提交
3769
{
3770
	struct intel_uncore *uncore = &dev_priv->uncore;
3771
	u32 gu_misc_masked = GEN11_GU_MISC_GSE;
M
Mika Kuoppala 已提交
3772

3773
	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3774
		icp_irq_postinstall(dev_priv);
3775

3776
	gen11_gt_irq_postinstall(&dev_priv->gt);
M
Mika Kuoppala 已提交
3777 3778
	gen8_de_irq_postinstall(dev_priv);

3779
	GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
3780

3781
	intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
M
Mika Kuoppala 已提交
3782

3783 3784
	if (HAS_MASTER_UNIT_IRQ(dev_priv)) {
		dg1_master_intr_enable(uncore->regs);
3785
		intel_uncore_posting_read(&dev_priv->uncore, DG1_MSTR_UNIT_INTR);
3786 3787
	} else {
		gen11_master_intr_enable(uncore->regs);
3788
		intel_uncore_posting_read(&dev_priv->uncore, GEN11_GFX_MSTR_IRQ);
3789
	}
M
Mika Kuoppala 已提交
3790 3791
}

3792
static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
3793
{
3794
	gen8_gt_irq_postinstall(&dev_priv->gt);
3795

3796
	spin_lock_irq(&dev_priv->irq_lock);
3797 3798
	if (dev_priv->display_irqs_enabled)
		vlv_display_irq_postinstall(dev_priv);
3799 3800
	spin_unlock_irq(&dev_priv->irq_lock);

3801 3802
	intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
	intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
3803 3804
}

3805
static void i8xx_irq_reset(struct drm_i915_private *dev_priv)
L
Linus Torvalds 已提交
3806
{
3807
	struct intel_uncore *uncore = &dev_priv->uncore;
3808

3809 3810
	i9xx_pipestat_irq_reset(dev_priv);

3811
	GEN2_IRQ_RESET(uncore);
3812
	dev_priv->irq_mask = ~0u;
C
Chris Wilson 已提交
3813 3814
}

3815
static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv)
C
Chris Wilson 已提交
3816
{
3817
	struct intel_uncore *uncore = &dev_priv->uncore;
3818
	u16 enable_mask;
C
Chris Wilson 已提交
3819

3820 3821 3822 3823
	intel_uncore_write16(uncore,
			     EMR,
			     ~(I915_ERROR_PAGE_TABLE |
			       I915_ERROR_MEMORY_REFRESH));
C
Chris Wilson 已提交
3824 3825 3826 3827

	/* Unmask the interrupts that we always want on. */
	dev_priv->irq_mask =
		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3828 3829
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
		  I915_MASTER_ERROR_INTERRUPT);
C
Chris Wilson 已提交
3830

3831 3832 3833
	enable_mask =
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3834
		I915_MASTER_ERROR_INTERRUPT |
3835 3836
		I915_USER_INTERRUPT;

3837
	GEN2_IRQ_INIT(uncore, dev_priv->irq_mask, enable_mask);
C
Chris Wilson 已提交
3838

3839 3840
	/* Interrupt setup is already guaranteed to be single-threaded, this is
	 * just to make the assert_spin_locked check happy. */
3841
	spin_lock_irq(&dev_priv->irq_lock);
3842 3843
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3844
	spin_unlock_irq(&dev_priv->irq_lock);
C
Chris Wilson 已提交
3845 3846
}

3847
static void i8xx_error_irq_ack(struct drm_i915_private *i915,
3848 3849
			       u16 *eir, u16 *eir_stuck)
{
3850
	struct intel_uncore *uncore = &i915->uncore;
3851 3852
	u16 emr;

3853
	*eir = intel_uncore_read16(uncore, EIR);
3854 3855

	if (*eir)
3856
		intel_uncore_write16(uncore, EIR, *eir);
3857

3858
	*eir_stuck = intel_uncore_read16(uncore, EIR);
3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871
	if (*eir_stuck == 0)
		return;

	/*
	 * Toggle all EMR bits to make sure we get an edge
	 * in the ISR master error bit if we don't clear
	 * all the EIR bits. Otherwise the edge triggered
	 * IIR on i965/g4x wouldn't notice that an interrupt
	 * is still pending. Also some EIR bits can't be
	 * cleared except by handling the underlying error
	 * (or by a GPU reset) so we mask any bit that
	 * remains set.
	 */
3872 3873 3874
	emr = intel_uncore_read16(uncore, EMR);
	intel_uncore_write16(uncore, EMR, 0xffff);
	intel_uncore_write16(uncore, EMR, emr | *eir_stuck);
3875 3876 3877 3878 3879 3880 3881 3882
}

static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
				   u16 eir, u16 eir_stuck)
{
	DRM_DEBUG("Master Error: EIR 0x%04x\n", eir);

	if (eir_stuck)
3883 3884
		drm_dbg(&dev_priv->drm, "EIR stuck: 0x%04x, masked\n",
			eir_stuck);
3885 3886 3887 3888 3889 3890 3891
}

static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
			       u32 *eir, u32 *eir_stuck)
{
	u32 emr;

3892
	*eir = intel_uncore_read(&dev_priv->uncore, EIR);
3893

3894
	intel_uncore_write(&dev_priv->uncore, EIR, *eir);
3895

3896
	*eir_stuck = intel_uncore_read(&dev_priv->uncore, EIR);
3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909
	if (*eir_stuck == 0)
		return;

	/*
	 * Toggle all EMR bits to make sure we get an edge
	 * in the ISR master error bit if we don't clear
	 * all the EIR bits. Otherwise the edge triggered
	 * IIR on i965/g4x wouldn't notice that an interrupt
	 * is still pending. Also some EIR bits can't be
	 * cleared except by handling the underlying error
	 * (or by a GPU reset) so we mask any bit that
	 * remains set.
	 */
3910 3911 3912
	emr = intel_uncore_read(&dev_priv->uncore, EMR);
	intel_uncore_write(&dev_priv->uncore, EMR, 0xffffffff);
	intel_uncore_write(&dev_priv->uncore, EMR, emr | *eir_stuck);
3913 3914 3915 3916 3917 3918 3919 3920
}

static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
				   u32 eir, u32 eir_stuck)
{
	DRM_DEBUG("Master Error, EIR 0x%08x\n", eir);

	if (eir_stuck)
3921 3922
		drm_dbg(&dev_priv->drm, "EIR stuck: 0x%08x, masked\n",
			eir_stuck);
3923 3924
}

3925
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
C
Chris Wilson 已提交
3926
{
3927
	struct drm_i915_private *dev_priv = arg;
3928
	irqreturn_t ret = IRQ_NONE;
C
Chris Wilson 已提交
3929

3930 3931 3932
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

3933
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
3934
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3935

3936
	do {
3937
		u32 pipe_stats[I915_MAX_PIPES] = {};
3938
		u16 eir = 0, eir_stuck = 0;
3939
		u16 iir;
3940

3941
		iir = intel_uncore_read16(&dev_priv->uncore, GEN2_IIR);
3942 3943 3944 3945
		if (iir == 0)
			break;

		ret = IRQ_HANDLED;
C
Chris Wilson 已提交
3946

3947 3948 3949
		/* Call regardless, as some status bits might not be
		 * signalled in iir */
		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
C
Chris Wilson 已提交
3950

3951 3952 3953
		if (iir & I915_MASTER_ERROR_INTERRUPT)
			i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);

3954
		intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir);
C
Chris Wilson 已提交
3955 3956

		if (iir & I915_USER_INTERRUPT)
3957
			intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]);
C
Chris Wilson 已提交
3958

3959 3960
		if (iir & I915_MASTER_ERROR_INTERRUPT)
			i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
C
Chris Wilson 已提交
3961

3962 3963
		i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
	} while (0);
3964

3965 3966
	pmu_irq_stats(dev_priv, ret);

3967
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
C
Chris Wilson 已提交
3968

3969
	return ret;
C
Chris Wilson 已提交
3970 3971
}

3972
static void i915_irq_reset(struct drm_i915_private *dev_priv)
3973
{
3974
	struct intel_uncore *uncore = &dev_priv->uncore;
3975

3976
	if (I915_HAS_HOTPLUG(dev_priv)) {
3977
		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3978
		intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
3979 3980
	}

3981 3982
	i9xx_pipestat_irq_reset(dev_priv);

3983
	GEN3_IRQ_RESET(uncore, GEN2_);
3984
	dev_priv->irq_mask = ~0u;
3985 3986
}

3987
static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
3988
{
3989
	struct intel_uncore *uncore = &dev_priv->uncore;
3990
	u32 enable_mask;
3991

3992
	intel_uncore_write(&dev_priv->uncore, EMR, ~(I915_ERROR_PAGE_TABLE |
3993
			  I915_ERROR_MEMORY_REFRESH));
3994 3995 3996 3997 3998

	/* Unmask the interrupts that we always want on. */
	dev_priv->irq_mask =
		~(I915_ASLE_INTERRUPT |
		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3999 4000
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
		  I915_MASTER_ERROR_INTERRUPT);
4001 4002 4003 4004 4005

	enable_mask =
		I915_ASLE_INTERRUPT |
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4006
		I915_MASTER_ERROR_INTERRUPT |
4007 4008
		I915_USER_INTERRUPT;

4009
	if (I915_HAS_HOTPLUG(dev_priv)) {
4010 4011 4012 4013 4014 4015
		/* Enable in IER... */
		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
		/* and unmask in IMR */
		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
	}

4016
	GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
4017

4018 4019
	/* Interrupt setup is already guaranteed to be single-threaded, this is
	 * just to make the assert_spin_locked check happy. */
4020
	spin_lock_irq(&dev_priv->irq_lock);
4021 4022
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4023
	spin_unlock_irq(&dev_priv->irq_lock);
4024

4025
	i915_enable_asle_pipestat(dev_priv);
4026 4027
}

4028
static irqreturn_t i915_irq_handler(int irq, void *arg)
4029
{
4030
	struct drm_i915_private *dev_priv = arg;
4031
	irqreturn_t ret = IRQ_NONE;
4032

4033 4034 4035
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

4036
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
4037
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4038

4039
	do {
4040
		u32 pipe_stats[I915_MAX_PIPES] = {};
4041
		u32 eir = 0, eir_stuck = 0;
4042 4043
		u32 hotplug_status = 0;
		u32 iir;
4044

4045
		iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR);
4046 4047 4048 4049 4050 4051 4052 4053
		if (iir == 0)
			break;

		ret = IRQ_HANDLED;

		if (I915_HAS_HOTPLUG(dev_priv) &&
		    iir & I915_DISPLAY_PORT_INTERRUPT)
			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4054

4055 4056 4057
		/* Call regardless, as some status bits might not be
		 * signalled in iir */
		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4058

4059 4060 4061
		if (iir & I915_MASTER_ERROR_INTERRUPT)
			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);

4062
		intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
4063 4064

		if (iir & I915_USER_INTERRUPT)
4065
			intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]);
4066

4067 4068
		if (iir & I915_MASTER_ERROR_INTERRUPT)
			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4069

4070 4071 4072 4073 4074
		if (hotplug_status)
			i9xx_hpd_irq_handler(dev_priv, hotplug_status);

		i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
	} while (0);
4075

4076 4077
	pmu_irq_stats(dev_priv, ret);

4078
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4079

4080 4081 4082
	return ret;
}

4083
static void i965_irq_reset(struct drm_i915_private *dev_priv)
4084
{
4085
	struct intel_uncore *uncore = &dev_priv->uncore;
4086

4087
	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4088
	intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
4089

4090 4091
	i9xx_pipestat_irq_reset(dev_priv);

4092
	GEN3_IRQ_RESET(uncore, GEN2_);
4093
	dev_priv->irq_mask = ~0u;
4094 4095
}

4096
static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
4097
{
4098
	struct intel_uncore *uncore = &dev_priv->uncore;
4099
	u32 enable_mask;
4100 4101
	u32 error_mask;

4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114
	/*
	 * Enable some error detection, note the instruction error mask
	 * bit is reserved, so we leave it masked.
	 */
	if (IS_G4X(dev_priv)) {
		error_mask = ~(GM45_ERROR_PAGE_TABLE |
			       GM45_ERROR_MEM_PRIV |
			       GM45_ERROR_CP_PRIV |
			       I915_ERROR_MEMORY_REFRESH);
	} else {
		error_mask = ~(I915_ERROR_PAGE_TABLE |
			       I915_ERROR_MEMORY_REFRESH);
	}
4115
	intel_uncore_write(&dev_priv->uncore, EMR, error_mask);
4116

4117
	/* Unmask the interrupts that we always want on. */
4118 4119 4120 4121 4122
	dev_priv->irq_mask =
		~(I915_ASLE_INTERRUPT |
		  I915_DISPLAY_PORT_INTERRUPT |
		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4123
		  I915_MASTER_ERROR_INTERRUPT);
4124

4125 4126 4127 4128 4129
	enable_mask =
		I915_ASLE_INTERRUPT |
		I915_DISPLAY_PORT_INTERRUPT |
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4130
		I915_MASTER_ERROR_INTERRUPT |
4131
		I915_USER_INTERRUPT;
4132

4133
	if (IS_G4X(dev_priv))
4134
		enable_mask |= I915_BSD_USER_INTERRUPT;
4135

4136
	GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
4137

4138 4139
	/* Interrupt setup is already guaranteed to be single-threaded, this is
	 * just to make the assert_spin_locked check happy. */
4140
	spin_lock_irq(&dev_priv->irq_lock);
4141 4142 4143
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4144
	spin_unlock_irq(&dev_priv->irq_lock);
4145

4146
	i915_enable_asle_pipestat(dev_priv);
4147 4148
}

4149
static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
4150 4151 4152
{
	u32 hotplug_en;

4153
	lockdep_assert_held(&dev_priv->irq_lock);
4154

4155 4156
	/* Note HDMI and DP share hotplug bits */
	/* enable bits are the same for all generations */
4157
	hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
4158 4159 4160 4161
	/* Programming the CRT detection parameters tends
	   to generate a spurious hotplug event about three
	   seconds later.  So just do it once.
	*/
4162
	if (IS_G4X(dev_priv))
4163 4164 4165 4166
		hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
	hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;

	/* Ignore TV since it's buggy */
4167
	i915_hotplug_interrupt_update_locked(dev_priv,
4168 4169 4170 4171
					     HOTPLUG_INT_EN_MASK |
					     CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
					     CRT_HOTPLUG_ACTIVATION_PERIOD_64,
					     hotplug_en);
4172 4173
}

4174
static irqreturn_t i965_irq_handler(int irq, void *arg)
4175
{
4176
	struct drm_i915_private *dev_priv = arg;
4177
	irqreturn_t ret = IRQ_NONE;
4178

4179 4180 4181
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

4182
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
4183
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4184

4185
	do {
4186
		u32 pipe_stats[I915_MAX_PIPES] = {};
4187
		u32 eir = 0, eir_stuck = 0;
4188 4189
		u32 hotplug_status = 0;
		u32 iir;
4190

4191
		iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR);
4192
		if (iir == 0)
4193 4194 4195 4196
			break;

		ret = IRQ_HANDLED;

4197 4198 4199 4200 4201 4202
		if (iir & I915_DISPLAY_PORT_INTERRUPT)
			hotplug_status = i9xx_hpd_irq_ack(dev_priv);

		/* Call regardless, as some status bits might not be
		 * signalled in iir */
		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4203

4204 4205 4206
		if (iir & I915_MASTER_ERROR_INTERRUPT)
			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);

4207
		intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
4208 4209

		if (iir & I915_USER_INTERRUPT)
4210
			intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]);
4211

4212
		if (iir & I915_BSD_USER_INTERRUPT)
4213
			intel_engine_signal_breadcrumbs(dev_priv->gt.engine[VCS0]);
4214

4215 4216
		if (iir & I915_MASTER_ERROR_INTERRUPT)
			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4217

4218 4219 4220 4221 4222
		if (hotplug_status)
			i9xx_hpd_irq_handler(dev_priv, hotplug_status);

		i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
	} while (0);
4223

4224 4225
	pmu_irq_stats(dev_priv, IRQ_HANDLED);

4226
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4227

4228 4229 4230
	return ret;
}

4231 4232 4233 4234 4235 4236 4237
/**
 * intel_irq_init - initializes irq support
 * @dev_priv: i915 device instance
 *
 * This function initializes all the irq support including work items, timers
 * and all the vtables. It does not setup the interrupt itself though.
 */
4238
void intel_irq_init(struct drm_i915_private *dev_priv)
4239
{
4240
	struct drm_device *dev = &dev_priv->drm;
4241
	int i;
4242

4243
	INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work);
4244 4245
	for (i = 0; i < MAX_L3_SLICES; ++i)
		dev_priv->l3_parity.remap_info[i] = NULL;
4246

4247
	/* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */
4248
	if (HAS_GT_UC(dev_priv) && INTEL_GEN(dev_priv) < 11)
4249
		dev_priv->gt.pm_guc_events = GUC_INTR_GUC2HOST << 16;
4250

4251 4252 4253
	if (!HAS_DISPLAY(dev_priv))
		return;

4254 4255 4256 4257
	intel_hpd_init_pins(dev_priv);

	intel_hpd_init_work(dev_priv);

4258
	dev->vblank_disable_immediate = true;
4259

4260 4261 4262 4263 4264 4265 4266 4267 4268 4269
	/* Most platforms treat the display irq block as an always-on
	 * power domain. vlv/chv can disable it at runtime and need
	 * special care to avoid writing any of the display block registers
	 * outside of the power domain. We defer setting up the display irqs
	 * in this case to the runtime pm.
	 */
	dev_priv->display_irqs_enabled = true;
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
		dev_priv->display_irqs_enabled = false;

L
Lyude 已提交
4270
	dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4271 4272 4273 4274 4275 4276 4277
	/* If we have MST support, we want to avoid doing short HPD IRQ storm
	 * detection, as short HPD storms will occur as a natural part of
	 * sideband messaging with MST.
	 * On older platforms however, IRQ storms can occur with both long and
	 * short pulses, as seen on some G4x systems.
	 */
	dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
L
Lyude 已提交
4278

4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293
	if (HAS_GMCH(dev_priv)) {
		if (I915_HAS_HOTPLUG(dev_priv))
			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
	} else {
		if (HAS_PCH_DG1(dev_priv))
			dev_priv->display.hpd_irq_setup = dg1_hpd_irq_setup;
		else if (INTEL_GEN(dev_priv) >= 11)
			dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
		else if (IS_GEN9_LP(dev_priv))
			dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
		else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
			dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
		else
			dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
	}
4294
}
4295

4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309
/**
 * intel_irq_fini - deinitializes IRQ support
 * @i915: i915 device instance
 *
 * This function deinitializes all the IRQ support.
 */
void intel_irq_fini(struct drm_i915_private *i915)
{
	int i;

	for (i = 0; i < MAX_L3_SLICES; ++i)
		kfree(i915->l3_parity.remap_info[i]);
}

4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323
static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
{
	if (HAS_GMCH(dev_priv)) {
		if (IS_CHERRYVIEW(dev_priv))
			return cherryview_irq_handler;
		else if (IS_VALLEYVIEW(dev_priv))
			return valleyview_irq_handler;
		else if (IS_GEN(dev_priv, 4))
			return i965_irq_handler;
		else if (IS_GEN(dev_priv, 3))
			return i915_irq_handler;
		else
			return i8xx_irq_handler;
	} else {
4324 4325
		if (HAS_MASTER_UNIT_IRQ(dev_priv))
			return dg1_irq_handler;
4326 4327 4328 4329 4330
		if (INTEL_GEN(dev_priv) >= 11)
			return gen11_irq_handler;
		else if (INTEL_GEN(dev_priv) >= 8)
			return gen8_irq_handler;
		else
4331
			return ilk_irq_handler;
4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353
	}
}

static void intel_irq_reset(struct drm_i915_private *dev_priv)
{
	if (HAS_GMCH(dev_priv)) {
		if (IS_CHERRYVIEW(dev_priv))
			cherryview_irq_reset(dev_priv);
		else if (IS_VALLEYVIEW(dev_priv))
			valleyview_irq_reset(dev_priv);
		else if (IS_GEN(dev_priv, 4))
			i965_irq_reset(dev_priv);
		else if (IS_GEN(dev_priv, 3))
			i915_irq_reset(dev_priv);
		else
			i8xx_irq_reset(dev_priv);
	} else {
		if (INTEL_GEN(dev_priv) >= 11)
			gen11_irq_reset(dev_priv);
		else if (INTEL_GEN(dev_priv) >= 8)
			gen8_irq_reset(dev_priv);
		else
4354
			ilk_irq_reset(dev_priv);
4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376
	}
}

static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
{
	if (HAS_GMCH(dev_priv)) {
		if (IS_CHERRYVIEW(dev_priv))
			cherryview_irq_postinstall(dev_priv);
		else if (IS_VALLEYVIEW(dev_priv))
			valleyview_irq_postinstall(dev_priv);
		else if (IS_GEN(dev_priv, 4))
			i965_irq_postinstall(dev_priv);
		else if (IS_GEN(dev_priv, 3))
			i915_irq_postinstall(dev_priv);
		else
			i8xx_irq_postinstall(dev_priv);
	} else {
		if (INTEL_GEN(dev_priv) >= 11)
			gen11_irq_postinstall(dev_priv);
		else if (INTEL_GEN(dev_priv) >= 8)
			gen8_irq_postinstall(dev_priv);
		else
4377
			ilk_irq_postinstall(dev_priv);
4378 4379 4380
	}
}

4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391
/**
 * intel_irq_install - enables the hardware interrupt
 * @dev_priv: i915 device instance
 *
 * This function enables the hardware interrupt handling, but leaves the hotplug
 * handling still disabled. It is called after intel_irq_init().
 *
 * In the driver load and resume code we need working interrupts in a few places
 * but don't want to deal with the hassle of concurrent probe and hotplug
 * workers. Hence the split into this two-stage approach.
 */
4392 4393
int intel_irq_install(struct drm_i915_private *dev_priv)
{
4394
	int irq = to_pci_dev(dev_priv->drm.dev)->irq;
4395 4396
	int ret;

4397 4398 4399 4400 4401
	/*
	 * We enable some interrupt sources in our postinstall hooks, so mark
	 * interrupts as enabled _before_ actually enabling them to avoid
	 * special cases in our ordering checks.
	 */
4402
	dev_priv->runtime_pm.irqs_enabled = true;
4403

4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417
	dev_priv->drm.irq_enabled = true;

	intel_irq_reset(dev_priv);

	ret = request_irq(irq, intel_irq_handler(dev_priv),
			  IRQF_SHARED, DRIVER_NAME, dev_priv);
	if (ret < 0) {
		dev_priv->drm.irq_enabled = false;
		return ret;
	}

	intel_irq_postinstall(dev_priv);

	return ret;
4418 4419
}

4420 4421 4422 4423 4424 4425 4426
/**
 * intel_irq_uninstall - finilizes all irq handling
 * @dev_priv: i915 device instance
 *
 * This stops interrupt and hotplug handling and unregisters and frees all
 * resources acquired in the init functions.
 */
4427 4428
void intel_irq_uninstall(struct drm_i915_private *dev_priv)
{
4429
	int irq = to_pci_dev(dev_priv->drm.dev)->irq;
4430 4431

	/*
4432 4433 4434 4435
	 * FIXME we can get called twice during driver probe
	 * error handling as well as during driver remove due to
	 * intel_modeset_driver_remove() calling us out of sequence.
	 * Would be nice if it didn't do that...
4436 4437 4438 4439 4440 4441 4442 4443 4444 4445
	 */
	if (!dev_priv->drm.irq_enabled)
		return;

	dev_priv->drm.irq_enabled = false;

	intel_irq_reset(dev_priv);

	free_irq(irq, dev_priv);

4446
	intel_hpd_cancel_work(dev_priv);
4447
	dev_priv->runtime_pm.irqs_enabled = false;
4448 4449
}

4450 4451 4452 4453 4454 4455 4456
/**
 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
 * @dev_priv: i915 device instance
 *
 * This function is used to disable interrupts at runtime, both in the runtime
 * pm and the system suspend/resume code.
 */
4457
void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4458
{
4459
	intel_irq_reset(dev_priv);
4460
	dev_priv->runtime_pm.irqs_enabled = false;
4461
	intel_synchronize_irq(dev_priv);
4462 4463
}

4464 4465 4466 4467 4468 4469 4470
/**
 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
 * @dev_priv: i915 device instance
 *
 * This function is used to enable interrupts at runtime, both in the runtime
 * pm and the system suspend/resume code.
 */
4471
void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4472
{
4473
	dev_priv->runtime_pm.irqs_enabled = true;
4474 4475
	intel_irq_reset(dev_priv);
	intel_irq_postinstall(dev_priv);
4476
}
4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488

bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
{
	/*
	 * We only use drm_irq_uninstall() at unload and VT switch, so
	 * this is the only thing we need to check.
	 */
	return dev_priv->runtime_pm.irqs_enabled;
}

void intel_synchronize_irq(struct drm_i915_private *i915)
{
4489
	synchronize_irq(to_pci_dev(i915->drm.dev)->irq);
4490
}