i915_irq.c 127.5 KB
Newer Older
D
Dave Airlie 已提交
1
/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
L
Linus Torvalds 已提交
2
 */
D
Dave Airlie 已提交
3
/*
L
Linus Torvalds 已提交
4 5
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
 * All Rights Reserved.
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
D
Dave Airlie 已提交
27
 */
L
Linus Torvalds 已提交
28

29 30
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

31
#include <linux/circ_buf.h>
32 33 34
#include <linux/slab.h>
#include <linux/sysrq.h>

35
#include <drm/drm_drv.h>
36 37
#include <drm/drm_irq.h>

38
#include "display/intel_display_types.h"
39 40 41 42 43
#include "display/intel_fifo_underrun.h"
#include "display/intel_hotplug.h"
#include "display/intel_lpe_audio.h"
#include "display/intel_psr.h"

44
#include "gt/intel_breadcrumbs.h"
45
#include "gt/intel_gt.h"
46
#include "gt/intel_gt_irq.h"
47
#include "gt/intel_gt_pm_irq.h"
48
#include "gt/intel_rps.h"
49

L
Linus Torvalds 已提交
50
#include "i915_drv.h"
51
#include "i915_irq.h"
C
Chris Wilson 已提交
52
#include "i915_trace.h"
53
#include "intel_pm.h"
L
Linus Torvalds 已提交
54

55 56 57 58 59 60 61 62
/**
 * DOC: interrupt handling
 *
 * These functions provide the basic support for enabling and disabling the
 * interrupt handling support. There's a lot more functionality in i915_irq.c
 * and related files, but that will be described in separate chapters.
 */

63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80
/*
 * Interrupt statistic for PMU. Increments the counter only if the
 * interrupt originated from the the GPU so interrupts from a device which
 * shares the interrupt line are not accounted.
 */
static inline void pmu_irq_stats(struct drm_i915_private *i915,
				 irqreturn_t res)
{
	if (unlikely(res != IRQ_HANDLED))
		return;

	/*
	 * A clever compiler translates that into INC. A not so clever one
	 * should at least prevent store tearing.
	 */
	WRITE_ONCE(i915->pmu.irq_count, i915->pmu.irq_count + 1);
}

81
typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val);
82 83
typedef u32 (*hotplug_enables_func)(struct drm_i915_private *i915,
				    enum hpd_pin pin);
84

85 86 87 88
static const u32 hpd_ilk[HPD_NUM_PINS] = {
	[HPD_PORT_A] = DE_DP_A_HOTPLUG,
};

89 90 91 92
static const u32 hpd_ivb[HPD_NUM_PINS] = {
	[HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
};

93
static const u32 hpd_bdw[HPD_NUM_PINS] = {
94
	[HPD_PORT_A] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_A),
95 96
};

97
static const u32 hpd_ibx[HPD_NUM_PINS] = {
98 99 100 101
	[HPD_CRT] = SDE_CRT_HOTPLUG,
	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
102
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG,
103 104
};

105
static const u32 hpd_cpt[HPD_NUM_PINS] = {
106
	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
107
	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
108 109
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
110
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
111 112
};

X
Xiong Zhang 已提交
113
static const u32 hpd_spt[HPD_NUM_PINS] = {
114
	[HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
X
Xiong Zhang 已提交
115 116 117
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
118
	[HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT,
X
Xiong Zhang 已提交
119 120
};

121
static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
122 123 124 125 126
	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
127
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN,
128 129
};

130
static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
131 132 133 134 135
	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
136
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
137 138
};

139
static const u32 hpd_status_i915[HPD_NUM_PINS] = {
140 141 142 143 144
	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
145
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
146 147
};

148
static const u32 hpd_bxt[HPD_NUM_PINS] = {
149 150 151
	[HPD_PORT_A] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_A),
	[HPD_PORT_B] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_B),
	[HPD_PORT_C] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_C),
152 153
};

154
static const u32 hpd_gen11[HPD_NUM_PINS] = {
155 156 157 158 159 160
	[HPD_PORT_TC1] = GEN11_TC_HOTPLUG(HPD_PORT_TC1) | GEN11_TBT_HOTPLUG(HPD_PORT_TC1),
	[HPD_PORT_TC2] = GEN11_TC_HOTPLUG(HPD_PORT_TC2) | GEN11_TBT_HOTPLUG(HPD_PORT_TC2),
	[HPD_PORT_TC3] = GEN11_TC_HOTPLUG(HPD_PORT_TC3) | GEN11_TBT_HOTPLUG(HPD_PORT_TC3),
	[HPD_PORT_TC4] = GEN11_TC_HOTPLUG(HPD_PORT_TC4) | GEN11_TBT_HOTPLUG(HPD_PORT_TC4),
	[HPD_PORT_TC5] = GEN11_TC_HOTPLUG(HPD_PORT_TC5) | GEN11_TBT_HOTPLUG(HPD_PORT_TC5),
	[HPD_PORT_TC6] = GEN11_TC_HOTPLUG(HPD_PORT_TC6) | GEN11_TBT_HOTPLUG(HPD_PORT_TC6),
161 162
};

163
static const u32 hpd_icp[HPD_NUM_PINS] = {
164 165 166
	[HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A),
	[HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B),
	[HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C),
167 168 169 170 171 172
	[HPD_PORT_TC1] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC1),
	[HPD_PORT_TC2] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC2),
	[HPD_PORT_TC3] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC3),
	[HPD_PORT_TC4] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC4),
	[HPD_PORT_TC5] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC5),
	[HPD_PORT_TC6] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC6),
173 174
};

175
static const u32 hpd_sde_dg1[HPD_NUM_PINS] = {
176 177 178 179
	[HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A),
	[HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B),
	[HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C),
	[HPD_PORT_D] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_D),
180 181
};

182 183 184 185 186 187 188 189 190 191 192 193 194
static void intel_hpd_init_pins(struct drm_i915_private *dev_priv)
{
	struct i915_hotplug *hpd = &dev_priv->hotplug;

	if (HAS_GMCH(dev_priv)) {
		if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
		    IS_CHERRYVIEW(dev_priv))
			hpd->hpd = hpd_status_g4x;
		else
			hpd->hpd = hpd_status_i915;
		return;
	}

195
	if (INTEL_GEN(dev_priv) >= 11)
196 197 198 199 200 201 202 203 204 205
		hpd->hpd = hpd_gen11;
	else if (IS_GEN9_LP(dev_priv))
		hpd->hpd = hpd_bxt;
	else if (INTEL_GEN(dev_priv) >= 8)
		hpd->hpd = hpd_bdw;
	else if (INTEL_GEN(dev_priv) >= 7)
		hpd->hpd = hpd_ivb;
	else
		hpd->hpd = hpd_ilk;

206 207
	if ((INTEL_PCH_TYPE(dev_priv) < PCH_DG1) &&
	    (!HAS_PCH_SPLIT(dev_priv) || HAS_PCH_NOP(dev_priv)))
208 209
		return;

210 211
	if (HAS_PCH_DG1(dev_priv))
		hpd->pch_hpd = hpd_sde_dg1;
212
	else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
213 214 215 216 217 218 219 220 221 222 223
		hpd->pch_hpd = hpd_icp;
	else if (HAS_PCH_CNP(dev_priv) || HAS_PCH_SPT(dev_priv))
		hpd->pch_hpd = hpd_spt;
	else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_CPT(dev_priv))
		hpd->pch_hpd = hpd_cpt;
	else if (HAS_PCH_IBX(dev_priv))
		hpd->pch_hpd = hpd_ibx;
	else
		MISSING_CASE(INTEL_PCH_TYPE(dev_priv));
}

224 225 226 227 228 229 230 231
static void
intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
{
	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);

	drm_crtc_handle_vblank(&crtc->base);
}

232 233
void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
		    i915_reg_t iir, i915_reg_t ier)
234
{
235 236
	intel_uncore_write(uncore, imr, 0xffffffff);
	intel_uncore_posting_read(uncore, imr);
237

238
	intel_uncore_write(uncore, ier, 0);
239 240

	/* IIR can theoretically queue up two events. Be paranoid. */
241 242 243 244
	intel_uncore_write(uncore, iir, 0xffffffff);
	intel_uncore_posting_read(uncore, iir);
	intel_uncore_write(uncore, iir, 0xffffffff);
	intel_uncore_posting_read(uncore, iir);
245 246
}

247
void gen2_irq_reset(struct intel_uncore *uncore)
248
{
249 250
	intel_uncore_write16(uncore, GEN2_IMR, 0xffff);
	intel_uncore_posting_read16(uncore, GEN2_IMR);
251

252
	intel_uncore_write16(uncore, GEN2_IER, 0);
253 254

	/* IIR can theoretically queue up two events. Be paranoid. */
255 256 257 258
	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
	intel_uncore_posting_read16(uncore, GEN2_IIR);
	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
	intel_uncore_posting_read16(uncore, GEN2_IIR);
259 260
}

261 262 263
/*
 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
 */
264
static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
265
{
266
	u32 val = intel_uncore_read(uncore, reg);
267 268 269 270

	if (val == 0)
		return;

271 272 273
	drm_WARN(&uncore->i915->drm, 1,
		 "Interrupt register 0x%x is not zero: 0x%08x\n",
		 i915_mmio_reg_offset(reg), val);
274 275 276 277
	intel_uncore_write(uncore, reg, 0xffffffff);
	intel_uncore_posting_read(uncore, reg);
	intel_uncore_write(uncore, reg, 0xffffffff);
	intel_uncore_posting_read(uncore, reg);
278
}
279

280
static void gen2_assert_iir_is_zero(struct intel_uncore *uncore)
281
{
282
	u16 val = intel_uncore_read16(uncore, GEN2_IIR);
283 284 285 286

	if (val == 0)
		return;

287 288 289
	drm_WARN(&uncore->i915->drm, 1,
		 "Interrupt register 0x%x is not zero: 0x%08x\n",
		 i915_mmio_reg_offset(GEN2_IIR), val);
290 291 292 293
	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
	intel_uncore_posting_read16(uncore, GEN2_IIR);
	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
	intel_uncore_posting_read16(uncore, GEN2_IIR);
294 295
}

296 297 298 299
void gen3_irq_init(struct intel_uncore *uncore,
		   i915_reg_t imr, u32 imr_val,
		   i915_reg_t ier, u32 ier_val,
		   i915_reg_t iir)
300
{
301
	gen3_assert_iir_is_zero(uncore, iir);
302

303 304 305
	intel_uncore_write(uncore, ier, ier_val);
	intel_uncore_write(uncore, imr, imr_val);
	intel_uncore_posting_read(uncore, imr);
306 307
}

308 309
void gen2_irq_init(struct intel_uncore *uncore,
		   u32 imr_val, u32 ier_val)
310
{
311
	gen2_assert_iir_is_zero(uncore);
312

313 314 315
	intel_uncore_write16(uncore, GEN2_IER, ier_val);
	intel_uncore_write16(uncore, GEN2_IMR, imr_val);
	intel_uncore_posting_read16(uncore, GEN2_IMR);
316 317
}

318 319 320
/* For display hotplug interrupt */
static inline void
i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
321 322
				     u32 mask,
				     u32 bits)
323
{
324
	u32 val;
325

326
	lockdep_assert_held(&dev_priv->irq_lock);
327
	drm_WARN_ON(&dev_priv->drm, bits & ~mask);
328

329
	val = intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_EN);
330 331
	val &= ~mask;
	val |= bits;
332
	intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_EN, val);
333 334 335 336 337 338 339 340 341 342 343 344 345 346 347
}

/**
 * i915_hotplug_interrupt_update - update hotplug interrupt enable
 * @dev_priv: driver private
 * @mask: bits to update
 * @bits: bits to enable
 * NOTE: the HPD enable bits are modified both inside and outside
 * of an interrupt context. To avoid that read-modify-write cycles
 * interfer, these bits are protected by a spinlock. Since this
 * function is usually not called from a context where the lock is
 * held already, this function acquires the lock itself. A non-locking
 * version is also available.
 */
void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
348 349
				   u32 mask,
				   u32 bits)
350 351 352 353 354 355
{
	spin_lock_irq(&dev_priv->irq_lock);
	i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
	spin_unlock_irq(&dev_priv->irq_lock);
}

356 357 358 359 360 361
/**
 * ilk_update_display_irq - update DEIMR
 * @dev_priv: driver private
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
362
void ilk_update_display_irq(struct drm_i915_private *dev_priv,
363 364
			    u32 interrupt_mask,
			    u32 enabled_irq_mask)
365
{
366
	u32 new_val;
367

368
	lockdep_assert_held(&dev_priv->irq_lock);
369
	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
370 371 372 373 374

	new_val = dev_priv->irq_mask;
	new_val &= ~interrupt_mask;
	new_val |= (~enabled_irq_mask & interrupt_mask);

375 376
	if (new_val != dev_priv->irq_mask &&
	    !drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) {
377
		dev_priv->irq_mask = new_val;
378 379
		intel_uncore_write(&dev_priv->uncore, DEIMR, dev_priv->irq_mask);
		intel_uncore_posting_read(&dev_priv->uncore, DEIMR);
380 381 382
	}
}

383
/**
384 385 386 387 388
 * bdw_update_port_irq - update DE port interrupt
 * @dev_priv: driver private
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
389
static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
390 391
				u32 interrupt_mask,
				u32 enabled_irq_mask)
392
{
393 394
	u32 new_val;
	u32 old_val;
395

396
	lockdep_assert_held(&dev_priv->irq_lock);
397

398
	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
399

400
	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
401 402
		return;

403
	old_val = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IMR);
404 405 406 407 408 409

	new_val = old_val;
	new_val &= ~interrupt_mask;
	new_val |= (~enabled_irq_mask & interrupt_mask);

	if (new_val != old_val) {
410 411
		intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IMR, new_val);
		intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PORT_IMR);
412 413 414
	}
}

415 416 417 418 419 420 421 422 423
/**
 * bdw_update_pipe_irq - update DE pipe interrupt
 * @dev_priv: driver private
 * @pipe: pipe whose interrupt to update
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
			 enum pipe pipe,
424 425
			 u32 interrupt_mask,
			 u32 enabled_irq_mask)
426
{
427
	u32 new_val;
428

429
	lockdep_assert_held(&dev_priv->irq_lock);
430

431
	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
432

433
	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
434 435 436 437 438 439 440 441
		return;

	new_val = dev_priv->de_irq_mask[pipe];
	new_val &= ~interrupt_mask;
	new_val |= (~enabled_irq_mask & interrupt_mask);

	if (new_val != dev_priv->de_irq_mask[pipe]) {
		dev_priv->de_irq_mask[pipe] = new_val;
442 443
		intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
		intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe));
444 445 446
	}
}

447 448 449 450 451 452
/**
 * ibx_display_interrupt_update - update SDEIMR
 * @dev_priv: driver private
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
453
void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
454 455
				  u32 interrupt_mask,
				  u32 enabled_irq_mask)
456
{
457
	u32 sdeimr = intel_uncore_read(&dev_priv->uncore, SDEIMR);
458 459 460
	sdeimr &= ~interrupt_mask;
	sdeimr |= (~enabled_irq_mask & interrupt_mask);

461
	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
462

463
	lockdep_assert_held(&dev_priv->irq_lock);
464

465
	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
466 467
		return;

468 469
	intel_uncore_write(&dev_priv->uncore, SDEIMR, sdeimr);
	intel_uncore_posting_read(&dev_priv->uncore, SDEIMR);
470
}
471

472 473
u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
			      enum pipe pipe)
474
{
475 476
	u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
	u32 enable_mask = status_mask << 16;
477

478
	lockdep_assert_held(&dev_priv->irq_lock);
479

480 481
	if (INTEL_GEN(dev_priv) < 5)
		goto out;
482 483

	/*
484 485
	 * On pipe A we don't support the PSR interrupt yet,
	 * on pipe B and C the same bit MBZ.
486
	 */
487 488
	if (drm_WARN_ON_ONCE(&dev_priv->drm,
			     status_mask & PIPE_A_PSR_STATUS_VLV))
489
		return 0;
490 491 492 493
	/*
	 * On pipe B and C we don't support the PSR interrupt yet, on pipe
	 * A the same bit is for perf counters which we don't use either.
	 */
494 495
	if (drm_WARN_ON_ONCE(&dev_priv->drm,
			     status_mask & PIPE_B_PSR_STATUS_VLV))
496
		return 0;
497 498 499 500 501 502 503 504 505

	enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
			 SPRITE0_FLIP_DONE_INT_EN_VLV |
			 SPRITE1_FLIP_DONE_INT_EN_VLV);
	if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
		enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
	if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
		enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;

506
out:
507 508 509 510 511
	drm_WARN_ONCE(&dev_priv->drm,
		      enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
		      pipe_name(pipe), enable_mask, status_mask);
512

513 514 515
	return enable_mask;
}

516 517
void i915_enable_pipestat(struct drm_i915_private *dev_priv,
			  enum pipe pipe, u32 status_mask)
518
{
519
	i915_reg_t reg = PIPESTAT(pipe);
520 521
	u32 enable_mask;

522 523 524
	drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
		      "pipe %c: status_mask=0x%x\n",
		      pipe_name(pipe), status_mask);
525 526

	lockdep_assert_held(&dev_priv->irq_lock);
527
	drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
528 529 530 531 532 533 534

	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
		return;

	dev_priv->pipestat_irq_mask[pipe] |= status_mask;
	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);

535 536
	intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
	intel_uncore_posting_read(&dev_priv->uncore, reg);
537 538
}

539 540
void i915_disable_pipestat(struct drm_i915_private *dev_priv,
			   enum pipe pipe, u32 status_mask)
541
{
542
	i915_reg_t reg = PIPESTAT(pipe);
543 544
	u32 enable_mask;

545 546 547
	drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
		      "pipe %c: status_mask=0x%x\n",
		      pipe_name(pipe), status_mask);
548 549

	lockdep_assert_held(&dev_priv->irq_lock);
550
	drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
551 552 553 554 555 556 557

	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
		return;

	dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);

558 559
	intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
	intel_uncore_posting_read(&dev_priv->uncore, reg);
560 561
}

562 563 564 565 566 567 568 569
static bool i915_has_asle(struct drm_i915_private *dev_priv)
{
	if (!dev_priv->opregion.asle)
		return false;

	return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
}

570
/**
571
 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
572
 * @dev_priv: i915 device private
573
 */
574
static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
575
{
576
	if (!i915_has_asle(dev_priv))
577 578
		return;

579
	spin_lock_irq(&dev_priv->irq_lock);
580

581
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
582
	if (INTEL_GEN(dev_priv) >= 4)
583
		i915_enable_pipestat(dev_priv, PIPE_A,
584
				     PIPE_LEGACY_BLC_EVENT_STATUS);
585

586
	spin_unlock_irq(&dev_priv->irq_lock);
587 588
}

589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638
/*
 * This timing diagram depicts the video signal in and
 * around the vertical blanking period.
 *
 * Assumptions about the fictitious mode used in this example:
 *  vblank_start >= 3
 *  vsync_start = vblank_start + 1
 *  vsync_end = vblank_start + 2
 *  vtotal = vblank_start + 3
 *
 *           start of vblank:
 *           latch double buffered registers
 *           increment frame counter (ctg+)
 *           generate start of vblank interrupt (gen4+)
 *           |
 *           |          frame start:
 *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
 *           |          may be shifted forward 1-3 extra lines via PIPECONF
 *           |          |
 *           |          |  start of vsync:
 *           |          |  generate vsync interrupt
 *           |          |  |
 * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
 *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
 * ----va---> <-----------------vb--------------------> <--------va-------------
 *       |          |       <----vs----->                     |
 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
 *       |          |                                         |
 *       last visible pixel                                   first visible pixel
 *                  |                                         increment frame counter (gen3/4)
 *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
 *
 * x  = horizontal active
 * _  = horizontal blanking
 * hs = horizontal sync
 * va = vertical active
 * vb = vertical blanking
 * vs = vertical sync
 * vbs = vblank_start (number)
 *
 * Summary:
 * - most events happen at the start of horizontal sync
 * - frame start happens at the start of horizontal blank, 1-4 lines
 *   (depending on PIPECONF settings) after the start of vblank
 * - gen3/4 pixel and frame counter are synchronized with the start
 *   of horizontal active on the first line of vertical active
 */

639 640 641
/* Called from drm generic code, passed a 'crtc', which
 * we use as a pipe index
 */
642
u32 i915_get_vblank_counter(struct drm_crtc *crtc)
643
{
644 645
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
646
	const struct drm_display_mode *mode = &vblank->hwmode;
647
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
648
	i915_reg_t high_frame, low_frame;
649
	u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
650
	unsigned long irqflags;
651

652 653 654 655 656 657 658 659 660 661 662 663 664 665
	/*
	 * On i965gm TV output the frame counter only works up to
	 * the point when we enable the TV encoder. After that the
	 * frame counter ceases to work and reads zero. We need a
	 * vblank wait before enabling the TV encoder and so we
	 * have to enable vblank interrupts while the frame counter
	 * is still in a working state. However the core vblank code
	 * does not like us returning non-zero frame counter values
	 * when we've told it that we don't have a working frame
	 * counter. Thus we must stop non-zero values leaking out.
	 */
	if (!vblank->max_vblank_count)
		return 0;

666 667 668 669 670
	htotal = mode->crtc_htotal;
	hsync_start = mode->crtc_hsync_start;
	vbl_start = mode->crtc_vblank_start;
	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
		vbl_start = DIV_ROUND_UP(vbl_start, 2);
671

672 673 674 675 676 677
	/* Convert to pixel count */
	vbl_start *= htotal;

	/* Start of vblank event occurs at start of hsync */
	vbl_start -= htotal - hsync_start;

678 679
	high_frame = PIPEFRAME(pipe);
	low_frame = PIPEFRAMEPIXEL(pipe);
680

681 682
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);

683 684 685 686 687 688
	/*
	 * High & low register fields aren't synchronized, so make sure
	 * we get a low value that's stable across two reads of the high
	 * register.
	 */
	do {
689 690 691
		high1 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK;
		low   = intel_de_read_fw(dev_priv, low_frame);
		high2 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK;
692 693
	} while (high1 != high2);

694 695
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);

696
	high1 >>= PIPE_FRAME_HIGH_SHIFT;
697
	pixel = low & PIPE_PIXEL_MASK;
698
	low >>= PIPE_FRAME_LOW_SHIFT;
699 700 701 702 703 704

	/*
	 * The frame counter increments at beginning of active.
	 * Cook up a vblank counter by also checking the pixel
	 * counter against vblank start.
	 */
705
	return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
706 707
}

708
u32 g4x_get_vblank_counter(struct drm_crtc *crtc)
709
{
710
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
711
	struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
712
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
713

714 715 716
	if (!vblank->max_vblank_count)
		return 0;

717
	return intel_uncore_read(&dev_priv->uncore, PIPE_FRMCOUNT_G4X(pipe));
718 719
}

720
static u32 intel_crtc_scanlines_since_frame_timestamp(struct intel_crtc *crtc)
721 722 723 724 725 726 727
{
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
	struct drm_vblank_crtc *vblank =
		&crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
	const struct drm_display_mode *mode = &vblank->hwmode;
	u32 htotal = mode->crtc_htotal;
	u32 clock = mode->crtc_clock;
728
	u32 scan_prev_time, scan_curr_time, scan_post_time;
729 730 731 732 733 734 735 736 737 738 739 740 741

	/*
	 * To avoid the race condition where we might cross into the
	 * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR
	 * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR
	 * during the same frame.
	 */
	do {
		/*
		 * This field provides read back of the display
		 * pipe frame time stamp. The time stamp value
		 * is sampled at every start of vertical blank.
		 */
742 743
		scan_prev_time = intel_de_read_fw(dev_priv,
						  PIPE_FRMTMSTMP(crtc->pipe));
744 745 746 747 748

		/*
		 * The TIMESTAMP_CTR register has the current
		 * time stamp value.
		 */
749
		scan_curr_time = intel_de_read_fw(dev_priv, IVB_TIMESTAMP_CTR);
750

751 752
		scan_post_time = intel_de_read_fw(dev_priv,
						  PIPE_FRMTMSTMP(crtc->pipe));
753 754
	} while (scan_post_time != scan_prev_time);

755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776
	return div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
				   clock), 1000 * htotal);
}

/*
 * On certain encoders on certain platforms, pipe
 * scanline register will not work to get the scanline,
 * since the timings are driven from the PORT or issues
 * with scanline register updates.
 * This function will use Framestamp and current
 * timestamp registers to calculate the scanline.
 */
static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
{
	struct drm_vblank_crtc *vblank =
		&crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
	const struct drm_display_mode *mode = &vblank->hwmode;
	u32 vblank_start = mode->crtc_vblank_start;
	u32 vtotal = mode->crtc_vtotal;
	u32 scanline;

	scanline = intel_crtc_scanlines_since_frame_timestamp(crtc);
777 778 779 780 781 782
	scanline = min(scanline, vtotal - 1);
	scanline = (scanline + vblank_start) % vtotal;

	return scanline;
}

783 784 785 786
/*
 * intel_de_read_fw(), only for fast reads of display block, no need for
 * forcewake etc.
 */
787 788 789
static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
{
	struct drm_device *dev = crtc->base.dev;
790
	struct drm_i915_private *dev_priv = to_i915(dev);
791 792
	const struct drm_display_mode *mode;
	struct drm_vblank_crtc *vblank;
793
	enum pipe pipe = crtc->pipe;
794
	int position, vtotal;
795

796 797 798
	if (!crtc->active)
		return -1;

799 800 801
	vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
	mode = &vblank->hwmode;

802
	if (crtc->mode_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP)
803 804
		return __intel_get_crtc_scanline_from_timestamp(crtc);

805
	vtotal = mode->crtc_vtotal;
806 807 808
	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
		vtotal /= 2;

809
	if (IS_GEN(dev_priv, 2))
810
		position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
811
	else
812
		position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
813

814 815 816 817 818 819 820 821 822 823 824 825
	/*
	 * On HSW, the DSL reg (0x70000) appears to return 0 if we
	 * read it just before the start of vblank.  So try it again
	 * so we don't accidentally end up spanning a vblank frame
	 * increment, causing the pipe_update_end() code to squak at us.
	 *
	 * The nature of this problem means we can't simply check the ISR
	 * bit and return the vblank start value; nor can we use the scanline
	 * debug register in the transcoder as it appears to have the same
	 * problem.  We may need to extend this to include other platforms,
	 * but so far testing only shows the problem on HSW.
	 */
826
	if (HAS_DDI(dev_priv) && !position) {
827 828 829 830
		int i, temp;

		for (i = 0; i < 100; i++) {
			udelay(1);
831
			temp = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
832 833 834 835 836 837 838
			if (temp != position) {
				position = temp;
				break;
			}
		}
	}

839
	/*
840 841
	 * See update_scanline_offset() for the details on the
	 * scanline_offset adjustment.
842
	 */
843
	return (position + crtc->scanline_offset) % vtotal;
844 845
}

846 847 848 849 850
static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
				     bool in_vblank_irq,
				     int *vpos, int *hpos,
				     ktime_t *stime, ktime_t *etime,
				     const struct drm_display_mode *mode)
851
{
852
	struct drm_device *dev = _crtc->dev;
853
	struct drm_i915_private *dev_priv = to_i915(dev);
854
	struct intel_crtc *crtc = to_intel_crtc(_crtc);
855
	enum pipe pipe = crtc->pipe;
856
	int position;
857
	int vbl_start, vbl_end, hsync_start, htotal, vtotal;
858
	unsigned long irqflags;
859 860
	bool use_scanline_counter = INTEL_GEN(dev_priv) >= 5 ||
		IS_G4X(dev_priv) || IS_GEN(dev_priv, 2) ||
861
		crtc->mode_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER;
862

863
	if (drm_WARN_ON(&dev_priv->drm, !mode->crtc_clock)) {
864 865 866
		drm_dbg(&dev_priv->drm,
			"trying to get scanoutpos for disabled "
			"pipe %c\n", pipe_name(pipe));
867
		return false;
868 869
	}

870
	htotal = mode->crtc_htotal;
871
	hsync_start = mode->crtc_hsync_start;
872 873 874
	vtotal = mode->crtc_vtotal;
	vbl_start = mode->crtc_vblank_start;
	vbl_end = mode->crtc_vblank_end;
875

876 877 878 879 880 881
	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
		vbl_start = DIV_ROUND_UP(vbl_start, 2);
		vbl_end /= 2;
		vtotal /= 2;
	}

882 883 884 885 886 887
	/*
	 * Lock uncore.lock, as we will do multiple timing critical raw
	 * register reads, potentially with preemption disabled, so the
	 * following code must not block on uncore.lock.
	 */
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
888

889 890 891 892 893 894
	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */

	/* Get optional system timestamp before query. */
	if (stime)
		*stime = ktime_get();

895 896 897 898 899 900 901 902 903 904 905 906 907 908
	if (crtc->mode_flags & I915_MODE_FLAG_VRR) {
		int scanlines = intel_crtc_scanlines_since_frame_timestamp(crtc);

		position = __intel_get_crtc_scanline(crtc);

		/*
		 * Already exiting vblank? If so, shift our position
		 * so it looks like we're already apporaching the full
		 * vblank end. This should make the generated timestamp
		 * more or less match when the active portion will start.
		 */
		if (position >= vbl_start && scanlines < position)
			position = min(crtc->vmax_vblank_start + scanlines, vtotal - 1);
	} else if (use_scanline_counter) {
909 910 911
		/* No obvious pixelcount register. Only query vertical
		 * scanout position from Display scan line register.
		 */
912
		position = __intel_get_crtc_scanline(crtc);
913 914 915 916 917
	} else {
		/* Have access to pixelcount since start of frame.
		 * We can split this into vertical and horizontal
		 * scanout position.
		 */
918
		position = (intel_de_read_fw(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
919

920 921 922 923
		/* convert to pixel counts */
		vbl_start *= htotal;
		vbl_end *= htotal;
		vtotal *= htotal;
924

925 926 927 928 929 930 931 932 933 934 935 936
		/*
		 * In interlaced modes, the pixel counter counts all pixels,
		 * so one field will have htotal more pixels. In order to avoid
		 * the reported position from jumping backwards when the pixel
		 * counter is beyond the length of the shorter field, just
		 * clamp the position the length of the shorter field. This
		 * matches how the scanline counter based position works since
		 * the scanline counter doesn't count the two half lines.
		 */
		if (position >= vtotal)
			position = vtotal - 1;

937 938 939 940 941 942 943 944 945 946
		/*
		 * Start of vblank interrupt is triggered at start of hsync,
		 * just prior to the first active line of vblank. However we
		 * consider lines to start at the leading edge of horizontal
		 * active. So, should we get here before we've crossed into
		 * the horizontal active of the first line in vblank, we would
		 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
		 * always add htotal-hsync_start to the current pixel position.
		 */
		position = (position + htotal - hsync_start) % vtotal;
947 948
	}

949 950 951 952 953 954 955 956
	/* Get optional system timestamp after query. */
	if (etime)
		*etime = ktime_get();

	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */

	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);

957 958 959 960 961 962 963 964 965 966
	/*
	 * While in vblank, position will be negative
	 * counting up towards 0 at vbl_end. And outside
	 * vblank, position will be positive counting
	 * up since vbl_end.
	 */
	if (position >= vbl_start)
		position -= vbl_end;
	else
		position += vtotal - vbl_end;
967

968
	if (use_scanline_counter) {
969 970 971 972 973 974
		*vpos = position;
		*hpos = 0;
	} else {
		*vpos = position / htotal;
		*hpos = position - (*vpos * htotal);
	}
975

976
	return true;
977 978
}

979 980 981 982 983
bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error,
				     ktime_t *vblank_time, bool in_vblank_irq)
{
	return drm_crtc_vblank_helper_get_vblank_timestamp_internal(
		crtc, max_error, vblank_time, in_vblank_irq,
984
		i915_get_crtc_scanoutpos);
985 986
}

987 988
int intel_get_crtc_scanline(struct intel_crtc *crtc)
{
989
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
990 991 992 993 994 995 996 997 998 999
	unsigned long irqflags;
	int position;

	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
	position = __intel_get_crtc_scanline(crtc);
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);

	return position;
}

1000
/**
1001
 * ivb_parity_work - Workqueue called when a parity error interrupt
1002 1003 1004 1005 1006 1007 1008
 * occurred.
 * @work: workqueue struct
 *
 * Doesn't actually do anything except notify userspace. As a consequence of
 * this event, userspace should try to remap the bad rows since statistically
 * it is likely the same row is more likely to go bad again.
 */
1009
static void ivb_parity_work(struct work_struct *work)
1010
{
1011
	struct drm_i915_private *dev_priv =
1012
		container_of(work, typeof(*dev_priv), l3_parity.error_work);
1013
	struct intel_gt *gt = &dev_priv->gt;
1014
	u32 error_status, row, bank, subbank;
1015
	char *parity_event[6];
1016 1017
	u32 misccpctl;
	u8 slice = 0;
1018 1019 1020 1021 1022

	/* We must turn off DOP level clock gating to access the L3 registers.
	 * In order to prevent a get/put style interface, acquire struct mutex
	 * any time we access those registers.
	 */
1023
	mutex_lock(&dev_priv->drm.struct_mutex);
1024

1025
	/* If we've screwed up tracking, just let the interrupt fire again */
1026
	if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice))
1027 1028
		goto out;

1029 1030 1031
	misccpctl = intel_uncore_read(&dev_priv->uncore, GEN7_MISCCPCTL);
	intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
	intel_uncore_posting_read(&dev_priv->uncore, GEN7_MISCCPCTL);
1032

1033
	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1034
		i915_reg_t reg;
1035

1036
		slice--;
1037 1038
		if (drm_WARN_ON_ONCE(&dev_priv->drm,
				     slice >= NUM_L3_SLICES(dev_priv)))
1039
			break;
1040

1041
		dev_priv->l3_parity.which_slice &= ~(1<<slice);
1042

1043
		reg = GEN7_L3CDERRST1(slice);
1044

1045
		error_status = intel_uncore_read(&dev_priv->uncore, reg);
1046 1047 1048 1049
		row = GEN7_PARITY_ERROR_ROW(error_status);
		bank = GEN7_PARITY_ERROR_BANK(error_status);
		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);

1050 1051
		intel_uncore_write(&dev_priv->uncore, reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
		intel_uncore_posting_read(&dev_priv->uncore, reg);
1052 1053 1054 1055 1056 1057 1058 1059

		parity_event[0] = I915_L3_PARITY_UEVENT "=1";
		parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
		parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
		parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
		parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
		parity_event[5] = NULL;

1060
		kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
1061
				   KOBJ_CHANGE, parity_event);
1062

1063 1064
		DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
			  slice, row, bank, subbank);
1065

1066 1067 1068 1069 1070
		kfree(parity_event[4]);
		kfree(parity_event[3]);
		kfree(parity_event[2]);
		kfree(parity_event[1]);
	}
1071

1072
	intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl);
1073

1074
out:
1075
	drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice);
1076 1077 1078
	spin_lock_irq(&gt->irq_lock);
	gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv));
	spin_unlock_irq(&gt->irq_lock);
1079

1080
	mutex_unlock(&dev_priv->drm.struct_mutex);
1081 1082
}

1083
static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1084
{
1085
	switch (pin) {
1086 1087 1088 1089 1090 1091
	case HPD_PORT_TC1:
	case HPD_PORT_TC2:
	case HPD_PORT_TC3:
	case HPD_PORT_TC4:
	case HPD_PORT_TC5:
	case HPD_PORT_TC6:
1092
		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(pin);
1093 1094 1095 1096 1097
	default:
		return false;
	}
}

1098
static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1099
{
1100 1101
	switch (pin) {
	case HPD_PORT_A:
1102
		return val & PORTA_HOTPLUG_LONG_DETECT;
1103
	case HPD_PORT_B:
1104
		return val & PORTB_HOTPLUG_LONG_DETECT;
1105
	case HPD_PORT_C:
1106 1107 1108 1109 1110 1111
		return val & PORTC_HOTPLUG_LONG_DETECT;
	default:
		return false;
	}
}

1112
static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1113
{
1114 1115 1116
	switch (pin) {
	case HPD_PORT_A:
	case HPD_PORT_B:
1117
	case HPD_PORT_C:
1118
	case HPD_PORT_D:
1119
		return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(pin);
1120 1121 1122 1123 1124
	default:
		return false;
	}
}

1125
static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1126
{
1127
	switch (pin) {
1128 1129 1130 1131 1132 1133
	case HPD_PORT_TC1:
	case HPD_PORT_TC2:
	case HPD_PORT_TC3:
	case HPD_PORT_TC4:
	case HPD_PORT_TC5:
	case HPD_PORT_TC6:
1134
		return val & ICP_TC_HPD_LONG_DETECT(pin);
1135 1136 1137 1138 1139
	default:
		return false;
	}
}

1140
static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val)
1141
{
1142 1143
	switch (pin) {
	case HPD_PORT_E:
1144 1145 1146 1147 1148 1149
		return val & PORTE_HOTPLUG_LONG_DETECT;
	default:
		return false;
	}
}

1150
static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1151
{
1152 1153
	switch (pin) {
	case HPD_PORT_A:
1154
		return val & PORTA_HOTPLUG_LONG_DETECT;
1155
	case HPD_PORT_B:
1156
		return val & PORTB_HOTPLUG_LONG_DETECT;
1157
	case HPD_PORT_C:
1158
		return val & PORTC_HOTPLUG_LONG_DETECT;
1159
	case HPD_PORT_D:
1160 1161 1162 1163 1164 1165
		return val & PORTD_HOTPLUG_LONG_DETECT;
	default:
		return false;
	}
}

1166
static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1167
{
1168 1169
	switch (pin) {
	case HPD_PORT_A:
1170 1171 1172 1173 1174 1175
		return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
	default:
		return false;
	}
}

1176
static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1177
{
1178 1179
	switch (pin) {
	case HPD_PORT_B:
1180
		return val & PORTB_HOTPLUG_LONG_DETECT;
1181
	case HPD_PORT_C:
1182
		return val & PORTC_HOTPLUG_LONG_DETECT;
1183
	case HPD_PORT_D:
1184 1185 1186
		return val & PORTD_HOTPLUG_LONG_DETECT;
	default:
		return false;
1187 1188 1189
	}
}

1190
static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1191
{
1192 1193
	switch (pin) {
	case HPD_PORT_B:
1194
		return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1195
	case HPD_PORT_C:
1196
		return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1197
	case HPD_PORT_D:
1198 1199 1200
		return val & PORTD_HOTPLUG_INT_LONG_PULSE;
	default:
		return false;
1201 1202 1203
	}
}

1204 1205 1206 1207 1208 1209 1210
/*
 * Get a bit mask of pins that have triggered, and which ones may be long.
 * This can be called multiple times with the same masks to accumulate
 * hotplug detection results from several registers.
 *
 * Note that the caller is expected to zero out the masks initially.
 */
1211 1212 1213 1214
static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
			       u32 *pin_mask, u32 *long_mask,
			       u32 hotplug_trigger, u32 dig_hotplug_reg,
			       const u32 hpd[HPD_NUM_PINS],
1215
			       bool long_pulse_detect(enum hpd_pin pin, u32 val))
1216
{
1217
	enum hpd_pin pin;
1218

1219 1220
	BUILD_BUG_ON(BITS_PER_TYPE(*pin_mask) < HPD_NUM_PINS);

1221 1222
	for_each_hpd_pin(pin) {
		if ((hpd[pin] & hotplug_trigger) == 0)
1223
			continue;
1224

1225
		*pin_mask |= BIT(pin);
1226

1227
		if (long_pulse_detect(pin, dig_hotplug_reg))
1228
			*long_mask |= BIT(pin);
1229 1230
	}

1231 1232 1233
	drm_dbg(&dev_priv->drm,
		"hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
		hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
1234 1235 1236

}

1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261
static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
				  const u32 hpd[HPD_NUM_PINS])
{
	struct intel_encoder *encoder;
	u32 enabled_irqs = 0;

	for_each_intel_encoder(&dev_priv->drm, encoder)
		if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
			enabled_irqs |= hpd[encoder->hpd_pin];

	return enabled_irqs;
}

static u32 intel_hpd_hotplug_irqs(struct drm_i915_private *dev_priv,
				  const u32 hpd[HPD_NUM_PINS])
{
	struct intel_encoder *encoder;
	u32 hotplug_irqs = 0;

	for_each_intel_encoder(&dev_priv->drm, encoder)
		hotplug_irqs |= hpd[encoder->hpd_pin];

	return hotplug_irqs;
}

1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273
static u32 intel_hpd_hotplug_enables(struct drm_i915_private *i915,
				     hotplug_enables_func hotplug_enables)
{
	struct intel_encoder *encoder;
	u32 hotplug = 0;

	for_each_intel_encoder(&i915->drm, encoder)
		hotplug |= hotplug_enables(i915, encoder->hpd_pin);

	return hotplug;
}

1274
static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
1275
{
1276
	wake_up_all(&dev_priv->gmbus_wait_queue);
1277 1278
}

1279
static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
1280
{
1281
	wake_up_all(&dev_priv->gmbus_wait_queue);
1282 1283
}

1284
#if defined(CONFIG_DEBUG_FS)
1285 1286
static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
					 enum pipe pipe,
1287 1288 1289
					 u32 crc0, u32 crc1,
					 u32 crc2, u32 crc3,
					 u32 crc4)
1290
{
T
Tomeu Vizoso 已提交
1291
	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
1292
	struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
1293 1294 1295
	u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };

	trace_intel_pipe_crc(crtc, crcs);
1296

1297
	spin_lock(&pipe_crc->lock);
1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308
	/*
	 * For some not yet identified reason, the first CRC is
	 * bonkers. So let's just wait for the next vblank and read
	 * out the buggy result.
	 *
	 * On GEN8+ sometimes the second CRC is bonkers as well, so
	 * don't trust that one either.
	 */
	if (pipe_crc->skipped <= 0 ||
	    (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
		pipe_crc->skipped++;
T
Tomeu Vizoso 已提交
1309
		spin_unlock(&pipe_crc->lock);
1310
		return;
T
Tomeu Vizoso 已提交
1311
	}
1312 1313 1314 1315 1316
	spin_unlock(&pipe_crc->lock);

	drm_crtc_add_crc_entry(&crtc->base, true,
				drm_crtc_accurate_vblank_count(&crtc->base),
				crcs);
1317
}
1318 1319
#else
static inline void
1320 1321
display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
			     enum pipe pipe,
1322 1323 1324
			     u32 crc0, u32 crc1,
			     u32 crc2, u32 crc3,
			     u32 crc4) {}
1325 1326
#endif

1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343
static void flip_done_handler(struct drm_i915_private *i915,
			      enum pipe pipe)
{
	struct intel_crtc *crtc = intel_get_crtc_for_pipe(i915, pipe);
	struct drm_crtc_state *crtc_state = crtc->base.state;
	struct drm_pending_vblank_event *e = crtc_state->event;
	struct drm_device *dev = &i915->drm;
	unsigned long irqflags;

	spin_lock_irqsave(&dev->event_lock, irqflags);

	crtc_state->event = NULL;

	drm_crtc_send_vblank_event(&crtc->base, e);

	spin_unlock_irqrestore(&dev->event_lock, irqflags);
}
1344

1345 1346
static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
				     enum pipe pipe)
D
Daniel Vetter 已提交
1347
{
1348
	display_pipe_crc_irq_handler(dev_priv, pipe,
1349
				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)),
1350
				     0, 0, 0, 0);
D
Daniel Vetter 已提交
1351 1352
}

1353 1354
static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
				     enum pipe pipe)
1355
{
1356
	display_pipe_crc_irq_handler(dev_priv, pipe,
1357 1358 1359 1360 1361
				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)),
				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_2_IVB(pipe)),
				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_3_IVB(pipe)),
				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_4_IVB(pipe)),
				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_5_IVB(pipe)));
1362
}
1363

1364 1365
static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
				      enum pipe pipe)
1366
{
1367
	u32 res1, res2;
1368

1369
	if (INTEL_GEN(dev_priv) >= 3)
1370
		res1 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES1_I915(pipe));
1371 1372 1373
	else
		res1 = 0;

1374
	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1375
		res2 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES2_G4X(pipe));
1376 1377
	else
		res2 = 0;
1378

1379
	display_pipe_crc_irq_handler(dev_priv, pipe,
1380 1381 1382
				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RED(pipe)),
				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_GREEN(pipe)),
				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_BLUE(pipe)),
1383
				     res1, res2);
1384
}
1385

1386 1387 1388 1389 1390
static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
{
	enum pipe pipe;

	for_each_pipe(dev_priv, pipe) {
1391
		intel_uncore_write(&dev_priv->uncore, PIPESTAT(pipe),
1392 1393 1394 1395 1396 1397 1398
			   PIPESTAT_INT_STATUS_MASK |
			   PIPE_FIFO_UNDERRUN_STATUS);

		dev_priv->pipestat_irq_mask[pipe] = 0;
	}
}

1399 1400
static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
				  u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1401
{
1402
	enum pipe pipe;
1403

1404
	spin_lock(&dev_priv->irq_lock);
1405 1406 1407 1408 1409 1410

	if (!dev_priv->display_irqs_enabled) {
		spin_unlock(&dev_priv->irq_lock);
		return;
	}

1411
	for_each_pipe(dev_priv, pipe) {
1412
		i915_reg_t reg;
1413
		u32 status_mask, enable_mask, iir_bit = 0;
1414

1415 1416 1417 1418 1419 1420 1421
		/*
		 * PIPESTAT bits get signalled even when the interrupt is
		 * disabled with the mask bits, and some of the status bits do
		 * not generate interrupts at all (like the underrun bit). Hence
		 * we need to be careful that we only handle what we want to
		 * handle.
		 */
1422 1423

		/* fifo underruns are filterered in the underrun handler. */
1424
		status_mask = PIPE_FIFO_UNDERRUN_STATUS;
1425 1426

		switch (pipe) {
1427
		default:
1428 1429 1430 1431 1432 1433
		case PIPE_A:
			iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
			break;
		case PIPE_B:
			iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
			break;
1434 1435 1436
		case PIPE_C:
			iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
			break;
1437 1438
		}
		if (iir & iir_bit)
1439
			status_mask |= dev_priv->pipestat_irq_mask[pipe];
1440

1441
		if (!status_mask)
1442 1443 1444
			continue;

		reg = PIPESTAT(pipe);
1445
		pipe_stats[pipe] = intel_uncore_read(&dev_priv->uncore, reg) & status_mask;
1446
		enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
1447 1448 1449

		/*
		 * Clear the PIPE*STAT regs before the IIR
1450 1451 1452 1453 1454 1455
		 *
		 * Toggle the enable bits to make sure we get an
		 * edge in the ISR pipe event bit if we don't clear
		 * all the enabled status bits. Otherwise the edge
		 * triggered IIR on i965/g4x wouldn't notice that
		 * an interrupt is still pending.
1456
		 */
1457
		if (pipe_stats[pipe]) {
1458 1459
			intel_uncore_write(&dev_priv->uncore, reg, pipe_stats[pipe]);
			intel_uncore_write(&dev_priv->uncore, reg, enable_mask);
1460
		}
1461
	}
1462
	spin_unlock(&dev_priv->irq_lock);
1463 1464
}

1465 1466 1467 1468 1469 1470 1471
static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
				      u16 iir, u32 pipe_stats[I915_MAX_PIPES])
{
	enum pipe pipe;

	for_each_pipe(dev_priv, pipe) {
		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1472
			intel_handle_vblank(dev_priv, pipe);
1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489

		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
			i9xx_pipe_crc_irq_handler(dev_priv, pipe);

		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
	}
}

static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
{
	bool blc_event = false;
	enum pipe pipe;

	for_each_pipe(dev_priv, pipe) {
		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1490
			intel_handle_vblank(dev_priv, pipe);
1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513

		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
			blc_event = true;

		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
			i9xx_pipe_crc_irq_handler(dev_priv, pipe);

		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
	}

	if (blc_event || (iir & I915_ASLE_INTERRUPT))
		intel_opregion_asle_intr(dev_priv);
}

static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
{
	bool blc_event = false;
	enum pipe pipe;

	for_each_pipe(dev_priv, pipe) {
		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1514
			intel_handle_vblank(dev_priv, pipe);
1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532

		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
			blc_event = true;

		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
			i9xx_pipe_crc_irq_handler(dev_priv, pipe);

		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
	}

	if (blc_event || (iir & I915_ASLE_INTERRUPT))
		intel_opregion_asle_intr(dev_priv);

	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
		gmbus_irq_handler(dev_priv);
}

1533
static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1534 1535 1536
					    u32 pipe_stats[I915_MAX_PIPES])
{
	enum pipe pipe;
1537

1538
	for_each_pipe(dev_priv, pipe) {
1539
		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1540
			intel_handle_vblank(dev_priv, pipe);
1541

1542 1543 1544
		if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV)
			flip_done_handler(dev_priv, pipe);

1545
		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1546
			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1547

1548 1549
		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1550 1551 1552
	}

	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1553
		gmbus_irq_handler(dev_priv);
1554 1555
}

1556
static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
1557
{
1558 1559 1560 1561 1562 1563 1564 1565 1566
	u32 hotplug_status = 0, hotplug_status_mask;
	int i;

	if (IS_G4X(dev_priv) ||
	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
		hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
			DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
	else
		hotplug_status_mask = HOTPLUG_INT_STATUS_I915;
1567

1568 1569 1570 1571 1572 1573 1574 1575 1576 1577
	/*
	 * We absolutely have to clear all the pending interrupt
	 * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port
	 * interrupt bit won't have an edge, and the i965/g4x
	 * edge triggered IIR will not notice that an interrupt
	 * is still pending. We can't use PORT_HOTPLUG_EN to
	 * guarantee the edge as the act of toggling the enable
	 * bits can itself generate a new hotplug interrupt :(
	 */
	for (i = 0; i < 10; i++) {
1578
		u32 tmp = intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT) & hotplug_status_mask;
1579 1580 1581 1582 1583

		if (tmp == 0)
			return hotplug_status;

		hotplug_status |= tmp;
1584
		intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, hotplug_status);
1585 1586
	}

1587 1588
	drm_WARN_ONCE(&dev_priv->drm, 1,
		      "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
1589
		      intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
1590

1591 1592 1593
	return hotplug_status;
}

1594
static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1595 1596 1597
				 u32 hotplug_status)
{
	u32 pin_mask = 0, long_mask = 0;
1598
	u32 hotplug_trigger;
1599

1600 1601 1602 1603 1604
	if (IS_G4X(dev_priv) ||
	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
		hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
	else
		hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1605

1606 1607 1608 1609 1610
	if (hotplug_trigger) {
		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
				   hotplug_trigger, hotplug_trigger,
				   dev_priv->hotplug.hpd,
				   i9xx_port_hotplug_long_detect);
1611

1612
		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1613
	}
1614 1615 1616 1617 1618

	if ((IS_G4X(dev_priv) ||
	     IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
	    hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
		dp_aux_irq_handler(dev_priv);
1619 1620
}

1621
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
J
Jesse Barnes 已提交
1622
{
1623
	struct drm_i915_private *dev_priv = arg;
J
Jesse Barnes 已提交
1624 1625
	irqreturn_t ret = IRQ_NONE;

1626 1627 1628
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

1629
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
1630
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1631

1632
	do {
1633
		u32 iir, gt_iir, pm_iir;
1634
		u32 pipe_stats[I915_MAX_PIPES] = {};
1635
		u32 hotplug_status = 0;
1636
		u32 ier = 0;
1637

1638 1639 1640
		gt_iir = intel_uncore_read(&dev_priv->uncore, GTIIR);
		pm_iir = intel_uncore_read(&dev_priv->uncore, GEN6_PMIIR);
		iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR);
J
Jesse Barnes 已提交
1641 1642

		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1643
			break;
J
Jesse Barnes 已提交
1644 1645 1646

		ret = IRQ_HANDLED;

1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659
		/*
		 * Theory on interrupt generation, based on empirical evidence:
		 *
		 * x = ((VLV_IIR & VLV_IER) ||
		 *      (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
		 *       (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
		 *
		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
		 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
		 * guarantee the CPU interrupt will be raised again even if we
		 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
		 * bits this time around.
		 */
1660 1661 1662
		intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
		ier = intel_uncore_read(&dev_priv->uncore, VLV_IER);
		intel_uncore_write(&dev_priv->uncore, VLV_IER, 0);
1663 1664

		if (gt_iir)
1665
			intel_uncore_write(&dev_priv->uncore, GTIIR, gt_iir);
1666
		if (pm_iir)
1667
			intel_uncore_write(&dev_priv->uncore, GEN6_PMIIR, pm_iir);
1668

1669
		if (iir & I915_DISPLAY_PORT_INTERRUPT)
1670
			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1671

1672 1673
		/* Call regardless, as some status bits might not be
		 * signalled in iir */
1674
		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1675

1676 1677 1678 1679
		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
			   I915_LPE_PIPE_B_INTERRUPT))
			intel_lpe_audio_irq_handler(dev_priv);

1680 1681 1682 1683 1684
		/*
		 * VLV_IIR is single buffered, and reflects the level
		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
		 */
		if (iir)
1685
			intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir);
1686

1687 1688
		intel_uncore_write(&dev_priv->uncore, VLV_IER, ier);
		intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
1689

1690
		if (gt_iir)
1691
			gen6_gt_irq_handler(&dev_priv->gt, gt_iir);
1692
		if (pm_iir)
1693
			gen6_rps_irq_handler(&dev_priv->gt.rps, pm_iir);
1694

1695
		if (hotplug_status)
1696
			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1697

1698
		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1699
	} while (0);
J
Jesse Barnes 已提交
1700

1701 1702
	pmu_irq_stats(dev_priv, ret);

1703
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1704

J
Jesse Barnes 已提交
1705 1706 1707
	return ret;
}

1708 1709
static irqreturn_t cherryview_irq_handler(int irq, void *arg)
{
1710
	struct drm_i915_private *dev_priv = arg;
1711 1712
	irqreturn_t ret = IRQ_NONE;

1713 1714 1715
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

1716
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
1717
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1718

1719
	do {
1720
		u32 master_ctl, iir;
1721
		u32 pipe_stats[I915_MAX_PIPES] = {};
1722
		u32 hotplug_status = 0;
1723 1724
		u32 ier = 0;

1725 1726
		master_ctl = intel_uncore_read(&dev_priv->uncore, GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
		iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR);
1727

1728 1729
		if (master_ctl == 0 && iir == 0)
			break;
1730

1731 1732
		ret = IRQ_HANDLED;

1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745
		/*
		 * Theory on interrupt generation, based on empirical evidence:
		 *
		 * x = ((VLV_IIR & VLV_IER) ||
		 *      ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
		 *       (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
		 *
		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
		 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
		 * guarantee the CPU interrupt will be raised again even if we
		 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
		 * bits this time around.
		 */
1746 1747 1748
		intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0);
		ier = intel_uncore_read(&dev_priv->uncore, VLV_IER);
		intel_uncore_write(&dev_priv->uncore, VLV_IER, 0);
1749

1750
		gen8_gt_irq_handler(&dev_priv->gt, master_ctl);
1751

1752
		if (iir & I915_DISPLAY_PORT_INTERRUPT)
1753
			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1754

1755 1756
		/* Call regardless, as some status bits might not be
		 * signalled in iir */
1757
		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1758

1759 1760 1761 1762 1763
		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
			   I915_LPE_PIPE_B_INTERRUPT |
			   I915_LPE_PIPE_C_INTERRUPT))
			intel_lpe_audio_irq_handler(dev_priv);

1764 1765 1766 1767 1768
		/*
		 * VLV_IIR is single buffered, and reflects the level
		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
		 */
		if (iir)
1769
			intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir);
1770

1771 1772
		intel_uncore_write(&dev_priv->uncore, VLV_IER, ier);
		intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
1773 1774

		if (hotplug_status)
1775
			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1776

1777
		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1778
	} while (0);
1779

1780 1781
	pmu_irq_stats(dev_priv, ret);

1782
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1783

1784 1785 1786
	return ret;
}

1787
static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1788
				u32 hotplug_trigger)
1789 1790 1791
{
	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;

1792 1793 1794 1795 1796 1797
	/*
	 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
	 * unless we touch the hotplug register, even if hotplug_trigger is
	 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
	 * errors.
	 */
1798
	dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
1799 1800 1801 1802 1803 1804 1805 1806
	if (!hotplug_trigger) {
		u32 mask = PORTA_HOTPLUG_STATUS_MASK |
			PORTD_HOTPLUG_STATUS_MASK |
			PORTC_HOTPLUG_STATUS_MASK |
			PORTB_HOTPLUG_STATUS_MASK;
		dig_hotplug_reg &= ~mask;
	}

1807
	intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg);
1808 1809
	if (!hotplug_trigger)
		return;
1810

1811 1812 1813
	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
			   hotplug_trigger, dig_hotplug_reg,
			   dev_priv->hotplug.pch_hpd,
1814 1815
			   pch_port_hotplug_long_detect);

1816
	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1817 1818
}

1819
static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1820
{
1821
	enum pipe pipe;
1822
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1823

1824
	ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
1825

1826 1827 1828
	if (pch_iir & SDE_AUDIO_POWER_MASK) {
		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
			       SDE_AUDIO_POWER_SHIFT);
1829 1830
		drm_dbg(&dev_priv->drm, "PCH audio power change on port %d\n",
			port_name(port));
1831
	}
1832

1833
	if (pch_iir & SDE_AUX_MASK)
1834
		dp_aux_irq_handler(dev_priv);
1835

1836
	if (pch_iir & SDE_GMBUS)
1837
		gmbus_irq_handler(dev_priv);
1838 1839

	if (pch_iir & SDE_AUDIO_HDCP_MASK)
1840
		drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n");
1841 1842

	if (pch_iir & SDE_AUDIO_TRANS_MASK)
1843
		drm_dbg(&dev_priv->drm, "PCH transcoder audio interrupt\n");
1844 1845

	if (pch_iir & SDE_POISON)
1846
		drm_err(&dev_priv->drm, "PCH poison interrupt\n");
1847

1848
	if (pch_iir & SDE_FDI_MASK) {
1849
		for_each_pipe(dev_priv, pipe)
1850 1851
			drm_dbg(&dev_priv->drm, "  pipe %c FDI IIR: 0x%08x\n",
				pipe_name(pipe),
1852
				intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe)));
1853
	}
1854 1855

	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1856
		drm_dbg(&dev_priv->drm, "PCH transcoder CRC done interrupt\n");
1857 1858

	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1859 1860
		drm_dbg(&dev_priv->drm,
			"PCH transcoder CRC error interrupt\n");
1861 1862

	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1863
		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
1864 1865

	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1866
		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
1867 1868
}

1869
static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
1870
{
1871
	u32 err_int = intel_uncore_read(&dev_priv->uncore, GEN7_ERR_INT);
D
Daniel Vetter 已提交
1872
	enum pipe pipe;
1873

1874
	if (err_int & ERR_INT_POISON)
1875
		drm_err(&dev_priv->drm, "Poison interrupt\n");
1876

1877
	for_each_pipe(dev_priv, pipe) {
1878 1879
		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1880

D
Daniel Vetter 已提交
1881
		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1882 1883
			if (IS_IVYBRIDGE(dev_priv))
				ivb_pipe_crc_irq_handler(dev_priv, pipe);
D
Daniel Vetter 已提交
1884
			else
1885
				hsw_pipe_crc_irq_handler(dev_priv, pipe);
D
Daniel Vetter 已提交
1886 1887
		}
	}
1888

1889
	intel_uncore_write(&dev_priv->uncore, GEN7_ERR_INT, err_int);
1890 1891
}

1892
static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
1893
{
1894
	u32 serr_int = intel_uncore_read(&dev_priv->uncore, SERR_INT);
1895
	enum pipe pipe;
1896

1897
	if (serr_int & SERR_INT_POISON)
1898
		drm_err(&dev_priv->drm, "PCH poison interrupt\n");
1899

1900 1901 1902
	for_each_pipe(dev_priv, pipe)
		if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
			intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
1903

1904
	intel_uncore_write(&dev_priv->uncore, SERR_INT, serr_int);
1905 1906
}

1907
static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1908
{
1909
	enum pipe pipe;
1910
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1911

1912
	ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
1913

1914 1915 1916
	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
			       SDE_AUDIO_POWER_SHIFT_CPT);
1917 1918
		drm_dbg(&dev_priv->drm, "PCH audio power change on port %c\n",
			port_name(port));
1919
	}
1920 1921

	if (pch_iir & SDE_AUX_MASK_CPT)
1922
		dp_aux_irq_handler(dev_priv);
1923 1924

	if (pch_iir & SDE_GMBUS_CPT)
1925
		gmbus_irq_handler(dev_priv);
1926 1927

	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1928
		drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n");
1929 1930

	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1931
		drm_dbg(&dev_priv->drm, "Audio CP change interrupt\n");
1932

1933
	if (pch_iir & SDE_FDI_MASK_CPT) {
1934
		for_each_pipe(dev_priv, pipe)
1935 1936
			drm_dbg(&dev_priv->drm, "  pipe %c FDI IIR: 0x%08x\n",
				pipe_name(pipe),
1937
				intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe)));
1938
	}
1939 1940

	if (pch_iir & SDE_ERROR_CPT)
1941
		cpt_serr_int_handler(dev_priv);
1942 1943
}

1944
static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1945
{
1946 1947
	u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_HOTPLUG_MASK_ICP;
	u32 tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_MASK_ICP;
1948 1949 1950 1951 1952
	u32 pin_mask = 0, long_mask = 0;

	if (ddi_hotplug_trigger) {
		u32 dig_hotplug_reg;

1953 1954
		dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_DDI);
		intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_DDI, dig_hotplug_reg);
1955 1956

		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1957 1958
				   ddi_hotplug_trigger, dig_hotplug_reg,
				   dev_priv->hotplug.pch_hpd,
1959 1960 1961 1962 1963 1964
				   icp_ddi_port_hotplug_long_detect);
	}

	if (tc_hotplug_trigger) {
		u32 dig_hotplug_reg;

1965 1966
		dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_TC);
		intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_TC, dig_hotplug_reg);
1967 1968

		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1969 1970
				   tc_hotplug_trigger, dig_hotplug_reg,
				   dev_priv->hotplug.pch_hpd,
1971
				   icp_tc_port_hotplug_long_detect);
1972 1973 1974 1975 1976 1977 1978 1979 1980
	}

	if (pin_mask)
		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);

	if (pch_iir & SDE_GMBUS_ICP)
		gmbus_irq_handler(dev_priv);
}

1981
static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1982 1983 1984 1985 1986 1987 1988 1989 1990
{
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
		~SDE_PORTE_HOTPLUG_SPT;
	u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
	u32 pin_mask = 0, long_mask = 0;

	if (hotplug_trigger) {
		u32 dig_hotplug_reg;

1991 1992
		dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
		intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg);
1993

1994
		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1995 1996
				   hotplug_trigger, dig_hotplug_reg,
				   dev_priv->hotplug.pch_hpd,
1997
				   spt_port_hotplug_long_detect);
1998 1999 2000 2001 2002
	}

	if (hotplug2_trigger) {
		u32 dig_hotplug_reg;

2003 2004
		dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG2);
		intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG2, dig_hotplug_reg);
2005

2006
		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2007 2008
				   hotplug2_trigger, dig_hotplug_reg,
				   dev_priv->hotplug.pch_hpd,
2009 2010 2011 2012
				   spt_port_hotplug2_long_detect);
	}

	if (pin_mask)
2013
		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2014 2015

	if (pch_iir & SDE_GMBUS_CPT)
2016
		gmbus_irq_handler(dev_priv);
2017 2018
}

2019
static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
2020
				u32 hotplug_trigger)
2021 2022 2023
{
	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;

2024 2025
	dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL);
	intel_uncore_write(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
2026

2027 2028 2029
	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
			   hotplug_trigger, dig_hotplug_reg,
			   dev_priv->hotplug.hpd,
2030 2031
			   ilk_port_hotplug_long_detect);

2032
	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2033 2034
}

2035 2036
static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
				    u32 de_iir)
2037
{
2038
	enum pipe pipe;
2039 2040
	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;

2041
	if (hotplug_trigger)
2042
		ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
2043 2044

	if (de_iir & DE_AUX_CHANNEL_A)
2045
		dp_aux_irq_handler(dev_priv);
2046 2047

	if (de_iir & DE_GSE)
2048
		intel_opregion_asle_intr(dev_priv);
2049 2050

	if (de_iir & DE_POISON)
2051
		drm_err(&dev_priv->drm, "Poison interrupt\n");
2052

2053
	for_each_pipe(dev_priv, pipe) {
2054
		if (de_iir & DE_PIPE_VBLANK(pipe))
2055
			intel_handle_vblank(dev_priv, pipe);
2056

2057 2058 2059
		if (de_iir & DE_PLANE_FLIP_DONE(pipe))
			flip_done_handler(dev_priv, pipe);

2060
		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2061
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2062

2063
		if (de_iir & DE_PIPE_CRC_DONE(pipe))
2064
			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2065 2066 2067 2068
	}

	/* check event from PCH */
	if (de_iir & DE_PCH_EVENT) {
2069
		u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
2070

2071 2072
		if (HAS_PCH_CPT(dev_priv))
			cpt_irq_handler(dev_priv, pch_iir);
2073
		else
2074
			ibx_irq_handler(dev_priv, pch_iir);
2075 2076

		/* should clear PCH hotplug event before clear CPU irq */
2077
		intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir);
2078 2079
	}

2080
	if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT)
2081
		gen5_rps_irq_handler(&dev_priv->gt.rps);
2082 2083
}

2084 2085
static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
				    u32 de_iir)
2086
{
2087
	enum pipe pipe;
2088 2089
	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;

2090
	if (hotplug_trigger)
2091
		ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
2092 2093

	if (de_iir & DE_ERR_INT_IVB)
2094
		ivb_err_int_handler(dev_priv);
2095

2096
	if (de_iir & DE_EDP_PSR_INT_HSW) {
2097
		struct intel_encoder *encoder;
2098

2099
		for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2100 2101 2102 2103 2104 2105 2106 2107 2108 2109
			struct intel_dp *intel_dp = enc_to_intel_dp(encoder);

			u32 psr_iir = intel_uncore_read(&dev_priv->uncore,
							EDP_PSR_IIR);

			intel_psr_irq_handler(intel_dp, psr_iir);
			intel_uncore_write(&dev_priv->uncore,
					   EDP_PSR_IIR, psr_iir);
			break;
		}
2110
	}
2111

2112
	if (de_iir & DE_AUX_CHANNEL_A_IVB)
2113
		dp_aux_irq_handler(dev_priv);
2114 2115

	if (de_iir & DE_GSE_IVB)
2116
		intel_opregion_asle_intr(dev_priv);
2117

2118
	for_each_pipe(dev_priv, pipe) {
2119
		if (de_iir & DE_PIPE_VBLANK_IVB(pipe))
2120
			intel_handle_vblank(dev_priv, pipe);
2121 2122 2123

		if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe))
			flip_done_handler(dev_priv, pipe);
2124 2125 2126
	}

	/* check event from PCH */
2127
	if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
2128
		u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
2129

2130
		cpt_irq_handler(dev_priv, pch_iir);
2131 2132

		/* clear PCH hotplug event before clear CPU irq */
2133
		intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir);
2134 2135 2136
	}
}

2137 2138 2139 2140 2141 2142 2143 2144
/*
 * To handle irqs with the minimum potential races with fresh interrupts, we:
 * 1 - Disable Master Interrupt Control.
 * 2 - Find the source(s) of the interrupt.
 * 3 - Clear the Interrupt Identity bits (IIR).
 * 4 - Process the interrupt(s) that had bits set in the IIRs.
 * 5 - Re-enable Master Interrupt Control.
 */
2145
static irqreturn_t ilk_irq_handler(int irq, void *arg)
2146
{
2147 2148
	struct drm_i915_private *i915 = arg;
	void __iomem * const regs = i915->uncore.regs;
2149
	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2150
	irqreturn_t ret = IRQ_NONE;
2151

2152
	if (unlikely(!intel_irqs_enabled(i915)))
2153 2154
		return IRQ_NONE;

2155
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2156
	disable_rpm_wakeref_asserts(&i915->runtime_pm);
2157

2158
	/* disable master interrupt before clearing iir  */
2159 2160
	de_ier = raw_reg_read(regs, DEIER);
	raw_reg_write(regs, DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2161

2162 2163 2164 2165 2166
	/* Disable south interrupts. We'll only write to SDEIIR once, so further
	 * interrupts will will be stored on its back queue, and then we'll be
	 * able to process them after we restore SDEIER (as soon as we restore
	 * it, we'll get an interrupt if SDEIIR still has something to process
	 * due to its back queue). */
2167 2168 2169
	if (!HAS_PCH_NOP(i915)) {
		sde_ier = raw_reg_read(regs, SDEIER);
		raw_reg_write(regs, SDEIER, 0);
2170
	}
2171

2172 2173
	/* Find, clear, then process each source of interrupt */

2174
	gt_iir = raw_reg_read(regs, GTIIR);
2175
	if (gt_iir) {
2176 2177 2178
		raw_reg_write(regs, GTIIR, gt_iir);
		if (INTEL_GEN(i915) >= 6)
			gen6_gt_irq_handler(&i915->gt, gt_iir);
2179
		else
2180 2181
			gen5_gt_irq_handler(&i915->gt, gt_iir);
		ret = IRQ_HANDLED;
2182 2183
	}

2184
	de_iir = raw_reg_read(regs, DEIIR);
2185
	if (de_iir) {
2186 2187 2188
		raw_reg_write(regs, DEIIR, de_iir);
		if (INTEL_GEN(i915) >= 7)
			ivb_display_irq_handler(i915, de_iir);
2189
		else
2190 2191
			ilk_display_irq_handler(i915, de_iir);
		ret = IRQ_HANDLED;
2192 2193
	}

2194 2195
	if (INTEL_GEN(i915) >= 6) {
		u32 pm_iir = raw_reg_read(regs, GEN6_PMIIR);
2196
		if (pm_iir) {
2197 2198
			raw_reg_write(regs, GEN6_PMIIR, pm_iir);
			gen6_rps_irq_handler(&i915->gt.rps, pm_iir);
2199 2200
			ret = IRQ_HANDLED;
		}
2201
	}
2202

2203 2204 2205
	raw_reg_write(regs, DEIER, de_ier);
	if (sde_ier)
		raw_reg_write(regs, SDEIER, sde_ier);
2206

2207 2208
	pmu_irq_stats(i915, ret);

2209
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2210
	enable_rpm_wakeref_asserts(&i915->runtime_pm);
2211

2212 2213 2214
	return ret;
}

2215
static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
2216
				u32 hotplug_trigger)
2217
{
2218
	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2219

2220 2221
	dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
	intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg);
2222

2223 2224 2225
	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
			   hotplug_trigger, dig_hotplug_reg,
			   dev_priv->hotplug.hpd,
2226
			   bxt_port_hotplug_long_detect);
2227

2228
	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2229 2230
}

2231 2232 2233
static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
{
	u32 pin_mask = 0, long_mask = 0;
2234 2235
	u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
	u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
2236 2237

	if (trigger_tc) {
2238 2239
		u32 dig_hotplug_reg;

2240 2241
		dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL);
		intel_uncore_write(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);
2242

2243 2244 2245
		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
				   trigger_tc, dig_hotplug_reg,
				   dev_priv->hotplug.hpd,
2246
				   gen11_port_hotplug_long_detect);
2247 2248 2249 2250 2251
	}

	if (trigger_tbt) {
		u32 dig_hotplug_reg;

2252 2253
		dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL);
		intel_uncore_write(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);
2254

2255 2256 2257
		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
				   trigger_tbt, dig_hotplug_reg,
				   dev_priv->hotplug.hpd,
2258
				   gen11_port_hotplug_long_detect);
2259 2260 2261
	}

	if (pin_mask)
2262
		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2263
	else
2264 2265
		drm_err(&dev_priv->drm,
			"Unexpected DE HPD interrupt 0x%08x\n", iir);
2266 2267
}

2268 2269
static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
{
2270
	u32 mask;
2271

2272 2273 2274
	if (INTEL_GEN(dev_priv) >= 12)
		return TGL_DE_PORT_AUX_DDIA |
			TGL_DE_PORT_AUX_DDIB |
2275 2276 2277 2278 2279 2280 2281 2282
			TGL_DE_PORT_AUX_DDIC |
			TGL_DE_PORT_AUX_USBC1 |
			TGL_DE_PORT_AUX_USBC2 |
			TGL_DE_PORT_AUX_USBC3 |
			TGL_DE_PORT_AUX_USBC4 |
			TGL_DE_PORT_AUX_USBC5 |
			TGL_DE_PORT_AUX_USBC6;

2283 2284

	mask = GEN8_AUX_CHANNEL_A;
2285 2286 2287 2288 2289
	if (INTEL_GEN(dev_priv) >= 9)
		mask |= GEN9_AUX_CHANNEL_B |
			GEN9_AUX_CHANNEL_C |
			GEN9_AUX_CHANNEL_D;

2290
	if (IS_CNL_WITH_PORT_F(dev_priv) || IS_GEN(dev_priv, 11))
2291 2292
		mask |= CNL_AUX_CHANNEL_F;

2293 2294
	if (IS_GEN(dev_priv, 11))
		mask |= ICL_AUX_CHANNEL_E;
2295 2296 2297 2298

	return mask;
}

2299 2300
static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
{
2301
	if (HAS_D12_PLANE_MINIMIZATION(dev_priv))
2302 2303
		return RKL_DE_PIPE_IRQ_FAULT_ERRORS;
	else if (INTEL_GEN(dev_priv) >= 11)
2304 2305
		return GEN11_DE_PIPE_IRQ_FAULT_ERRORS;
	else if (INTEL_GEN(dev_priv) >= 9)
2306 2307 2308 2309 2310
		return GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
	else
		return GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
}

2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321
static void
gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
{
	bool found = false;

	if (iir & GEN8_DE_MISC_GSE) {
		intel_opregion_asle_intr(dev_priv);
		found = true;
	}

	if (iir & GEN8_DE_EDP_PSR) {
2322
		struct intel_encoder *encoder;
2323 2324 2325
		u32 psr_iir;
		i915_reg_t iir_reg;

2326
		for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2327
			struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2328

2329 2330 2331 2332 2333 2334 2335 2336 2337 2338
			if (INTEL_GEN(dev_priv) >= 12)
				iir_reg = TRANS_PSR_IIR(intel_dp->psr.transcoder);
			else
				iir_reg = EDP_PSR_IIR;

			psr_iir = intel_uncore_read(&dev_priv->uncore, iir_reg);
			intel_uncore_write(&dev_priv->uncore, iir_reg, psr_iir);

			if (psr_iir)
				found = true;
2339

2340
			intel_psr_irq_handler(intel_dp, psr_iir);
2341

2342 2343 2344 2345
			/* prior GEN12 only have one EDP PSR */
			if (INTEL_GEN(dev_priv) < 12)
				break;
		}
2346 2347 2348
	}

	if (!found)
2349
		drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt\n");
2350 2351
}

2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363
static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
					   u32 te_trigger)
{
	enum pipe pipe = INVALID_PIPE;
	enum transcoder dsi_trans;
	enum port port;
	u32 val, tmp;

	/*
	 * Incase of dual link, TE comes from DSI_1
	 * this is to check if dual link is enabled
	 */
2364
	val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL2(TRANSCODER_DSI_0));
2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375
	val &= PORT_SYNC_MODE_ENABLE;

	/*
	 * if dual link is enabled, then read DSI_0
	 * transcoder registers
	 */
	port = ((te_trigger & DSI1_TE && val) || (te_trigger & DSI0_TE)) ?
						  PORT_A : PORT_B;
	dsi_trans = (port == PORT_A) ? TRANSCODER_DSI_0 : TRANSCODER_DSI_1;

	/* Check if DSI configured in command mode */
2376
	val = intel_uncore_read(&dev_priv->uncore, DSI_TRANS_FUNC_CONF(dsi_trans));
2377 2378 2379 2380 2381 2382 2383 2384
	val = val & OP_MODE_MASK;

	if (val != CMD_MODE_NO_GATE && val != CMD_MODE_TE_GATE) {
		drm_err(&dev_priv->drm, "DSI trancoder not configured in command mode\n");
		return;
	}

	/* Get PIPE for handling VBLANK event */
2385
	val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL(dsi_trans));
2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404
	switch (val & TRANS_DDI_EDP_INPUT_MASK) {
	case TRANS_DDI_EDP_INPUT_A_ON:
		pipe = PIPE_A;
		break;
	case TRANS_DDI_EDP_INPUT_B_ONOFF:
		pipe = PIPE_B;
		break;
	case TRANS_DDI_EDP_INPUT_C_ONOFF:
		pipe = PIPE_C;
		break;
	default:
		drm_err(&dev_priv->drm, "Invalid PIPE\n");
		return;
	}

	intel_handle_vblank(dev_priv, pipe);

	/* clear TE in dsi IIR */
	port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A;
2405 2406
	tmp = intel_uncore_read(&dev_priv->uncore, DSI_INTR_IDENT_REG(port));
	intel_uncore_write(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), tmp);
2407 2408
}

2409 2410 2411 2412 2413 2414 2415 2416
static u32 gen8_de_pipe_flip_done_mask(struct drm_i915_private *i915)
{
	if (INTEL_GEN(i915) >= 9)
		return GEN9_PIPE_PLANE1_FLIP_DONE;
	else
		return GEN8_PIPE_PRIMARY_FLIP_DONE;
}

2417 2418
static irqreturn_t
gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2419 2420
{
	irqreturn_t ret = IRQ_NONE;
2421
	u32 iir;
2422
	enum pipe pipe;
J
Jesse Barnes 已提交
2423

2424
	if (master_ctl & GEN8_DE_MISC_IRQ) {
2425
		iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_MISC_IIR);
2426
		if (iir) {
2427
			intel_uncore_write(&dev_priv->uncore, GEN8_DE_MISC_IIR, iir);
2428
			ret = IRQ_HANDLED;
2429 2430
			gen8_de_misc_irq_handler(dev_priv, iir);
		} else {
2431 2432
			drm_err(&dev_priv->drm,
				"The master control interrupt lied (DE MISC)!\n");
2433
		}
2434 2435
	}

2436
	if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
2437
		iir = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IIR);
2438
		if (iir) {
2439
			intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IIR, iir);
2440 2441 2442
			ret = IRQ_HANDLED;
			gen11_hpd_irq_handler(dev_priv, iir);
		} else {
2443 2444
			drm_err(&dev_priv->drm,
				"The master control interrupt lied, (DE HPD)!\n");
2445 2446 2447
		}
	}

2448
	if (master_ctl & GEN8_DE_PORT_IRQ) {
2449
		iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IIR);
2450
		if (iir) {
2451
			bool found = false;
2452

2453
			intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IIR, iir);
2454
			ret = IRQ_HANDLED;
J
Jesse Barnes 已提交
2455

2456
			if (iir & gen8_de_port_aux_mask(dev_priv)) {
2457
				dp_aux_irq_handler(dev_priv);
2458 2459 2460
				found = true;
			}

2461
			if (IS_GEN9_LP(dev_priv)) {
V
Ville Syrjälä 已提交
2462 2463 2464 2465
				u32 hotplug_trigger = iir & BXT_DE_PORT_HOTPLUG_MASK;

				if (hotplug_trigger) {
					bxt_hpd_irq_handler(dev_priv, hotplug_trigger);
2466 2467 2468
					found = true;
				}
			} else if (IS_BROADWELL(dev_priv)) {
V
Ville Syrjälä 已提交
2469 2470 2471 2472
				u32 hotplug_trigger = iir & BDW_DE_PORT_HOTPLUG_MASK;

				if (hotplug_trigger) {
					ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
2473 2474
					found = true;
				}
2475 2476
			}

2477
			if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
2478
				gmbus_irq_handler(dev_priv);
S
Shashank Sharma 已提交
2479 2480 2481
				found = true;
			}

2482
			if (INTEL_GEN(dev_priv) >= 11) {
V
Ville Syrjälä 已提交
2483 2484 2485 2486
				u32 te_trigger = iir & (DSI0_TE | DSI1_TE);

				if (te_trigger) {
					gen11_dsi_te_interrupt_handler(dev_priv, te_trigger);
2487 2488 2489 2490
					found = true;
				}
			}

2491
			if (!found)
2492 2493
				drm_err(&dev_priv->drm,
					"Unexpected DE Port interrupt\n");
2494
		}
2495
		else
2496 2497
			drm_err(&dev_priv->drm,
				"The master control interrupt lied (DE PORT)!\n");
2498 2499
	}

2500
	for_each_pipe(dev_priv, pipe) {
2501
		u32 fault_errors;
2502

2503 2504
		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
			continue;
2505

2506
		iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe));
2507
		if (!iir) {
2508 2509
			drm_err(&dev_priv->drm,
				"The master control interrupt lied (DE PIPE)!\n");
2510 2511
			continue;
		}
2512

2513
		ret = IRQ_HANDLED;
2514
		intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe), iir);
2515

2516
		if (iir & GEN8_PIPE_VBLANK)
2517
			intel_handle_vblank(dev_priv, pipe);
2518

2519
		if (iir & gen8_de_pipe_flip_done_mask(dev_priv))
2520 2521
			flip_done_handler(dev_priv, pipe);

2522
		if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
2523
			hsw_pipe_crc_irq_handler(dev_priv, pipe);
2524

2525 2526
		if (iir & GEN8_PIPE_FIFO_UNDERRUN)
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2527

2528
		fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv);
2529
		if (fault_errors)
2530 2531 2532 2533
			drm_err(&dev_priv->drm,
				"Fault errors on pipe %c: 0x%08x\n",
				pipe_name(pipe),
				fault_errors);
2534 2535
	}

2536
	if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2537
	    master_ctl & GEN8_DE_PCH_IRQ) {
2538 2539 2540 2541 2542
		/*
		 * FIXME(BDW): Assume for now that the new interrupt handling
		 * scheme also closed the SDE interrupt handling race we've seen
		 * on older pch-split platforms. But this needs testing.
		 */
2543
		iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
2544
		if (iir) {
2545
			intel_uncore_write(&dev_priv->uncore, SDEIIR, iir);
2546
			ret = IRQ_HANDLED;
2547

2548 2549
			if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
				icp_irq_handler(dev_priv, iir);
2550
			else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
2551
				spt_irq_handler(dev_priv, iir);
2552
			else
2553
				cpt_irq_handler(dev_priv, iir);
2554 2555 2556 2557 2558
		} else {
			/*
			 * Like on previous PCH there seems to be something
			 * fishy going on with forwarding PCH interrupts.
			 */
2559 2560
			drm_dbg(&dev_priv->drm,
				"The master control interrupt lied (SDE)!\n");
2561
		}
2562 2563
	}

2564 2565 2566
	return ret;
}

2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584
static inline u32 gen8_master_intr_disable(void __iomem * const regs)
{
	raw_reg_write(regs, GEN8_MASTER_IRQ, 0);

	/*
	 * Now with master disabled, get a sample of level indications
	 * for this interrupt. Indications will be cleared on related acks.
	 * New indications can and will light up during processing,
	 * and will generate new interrupt after enabling master.
	 */
	return raw_reg_read(regs, GEN8_MASTER_IRQ);
}

static inline void gen8_master_intr_enable(void __iomem * const regs)
{
	raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
}

2585 2586
static irqreturn_t gen8_irq_handler(int irq, void *arg)
{
2587
	struct drm_i915_private *dev_priv = arg;
2588
	void __iomem * const regs = dev_priv->uncore.regs;
2589 2590 2591 2592 2593
	u32 master_ctl;

	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

2594 2595 2596
	master_ctl = gen8_master_intr_disable(regs);
	if (!master_ctl) {
		gen8_master_intr_enable(regs);
2597
		return IRQ_NONE;
2598
	}
2599

2600 2601
	/* Find, queue (onto bottom-halves), then clear each source */
	gen8_gt_irq_handler(&dev_priv->gt, master_ctl);
2602 2603 2604

	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
	if (master_ctl & ~GEN8_GT_IRQS) {
2605
		disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2606
		gen8_de_irq_handler(dev_priv, master_ctl);
2607
		enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2608
	}
2609

2610
	gen8_master_intr_enable(regs);
2611

2612 2613
	pmu_irq_stats(dev_priv, IRQ_HANDLED);

2614
	return IRQ_HANDLED;
2615 2616
}

2617
static u32
2618
gen11_gu_misc_irq_ack(struct intel_gt *gt, const u32 master_ctl)
2619
{
2620
	void __iomem * const regs = gt->uncore->regs;
2621
	u32 iir;
2622 2623

	if (!(master_ctl & GEN11_GU_MISC_IRQ))
2624 2625 2626 2627 2628
		return 0;

	iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
	if (likely(iir))
		raw_reg_write(regs, GEN11_GU_MISC_IIR, iir);
2629

2630
	return iir;
2631 2632 2633
}

static void
2634
gen11_gu_misc_irq_handler(struct intel_gt *gt, const u32 iir)
2635 2636
{
	if (iir & GEN11_GU_MISC_GSE)
2637
		intel_opregion_asle_intr(gt->i915);
2638 2639
}

2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657
static inline u32 gen11_master_intr_disable(void __iomem * const regs)
{
	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);

	/*
	 * Now with master disabled, get a sample of level indications
	 * for this interrupt. Indications will be cleared on related acks.
	 * New indications can and will light up during processing,
	 * and will generate new interrupt after enabling master.
	 */
	return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
}

static inline void gen11_master_intr_enable(void __iomem * const regs)
{
	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
}

2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676
static void
gen11_display_irq_handler(struct drm_i915_private *i915)
{
	void __iomem * const regs = i915->uncore.regs;
	const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);

	disable_rpm_wakeref_asserts(&i915->runtime_pm);
	/*
	 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
	 * for the display related bits.
	 */
	raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 0x0);
	gen8_de_irq_handler(i915, disp_ctl);
	raw_reg_write(regs, GEN11_DISPLAY_INT_CTL,
		      GEN11_DISPLAY_IRQ_ENABLE);

	enable_rpm_wakeref_asserts(&i915->runtime_pm);
}

2677 2678 2679 2680
static __always_inline irqreturn_t
__gen11_irq_handler(struct drm_i915_private * const i915,
		    u32 (*intr_disable)(void __iomem * const regs),
		    void (*intr_enable)(void __iomem * const regs))
M
Mika Kuoppala 已提交
2681
{
2682
	void __iomem * const regs = i915->uncore.regs;
2683
	struct intel_gt *gt = &i915->gt;
M
Mika Kuoppala 已提交
2684
	u32 master_ctl;
2685
	u32 gu_misc_iir;
M
Mika Kuoppala 已提交
2686 2687 2688 2689

	if (!intel_irqs_enabled(i915))
		return IRQ_NONE;

2690
	master_ctl = intr_disable(regs);
2691
	if (!master_ctl) {
2692
		intr_enable(regs);
M
Mika Kuoppala 已提交
2693
		return IRQ_NONE;
2694
	}
M
Mika Kuoppala 已提交
2695

2696
	/* Find, queue (onto bottom-halves), then clear each source */
2697
	gen11_gt_irq_handler(gt, master_ctl);
M
Mika Kuoppala 已提交
2698 2699

	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2700 2701
	if (master_ctl & GEN11_DISPLAY_IRQ)
		gen11_display_irq_handler(i915);
M
Mika Kuoppala 已提交
2702

2703
	gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl);
2704

2705
	intr_enable(regs);
M
Mika Kuoppala 已提交
2706

2707
	gen11_gu_misc_irq_handler(gt, gu_misc_iir);
2708

2709 2710
	pmu_irq_stats(i915, IRQ_HANDLED);

M
Mika Kuoppala 已提交
2711 2712 2713
	return IRQ_HANDLED;
}

2714 2715 2716 2717 2718 2719 2720
static irqreturn_t gen11_irq_handler(int irq, void *arg)
{
	return __gen11_irq_handler(arg,
				   gen11_master_intr_disable,
				   gen11_master_intr_enable);
}

2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760
static u32 dg1_master_intr_disable_and_ack(void __iomem * const regs)
{
	u32 val;

	/* First disable interrupts */
	raw_reg_write(regs, DG1_MSTR_UNIT_INTR, 0);

	/* Get the indication levels and ack the master unit */
	val = raw_reg_read(regs, DG1_MSTR_UNIT_INTR);
	if (unlikely(!val))
		return 0;

	raw_reg_write(regs, DG1_MSTR_UNIT_INTR, val);

	/*
	 * Now with master disabled, get a sample of level indications
	 * for this interrupt and ack them right away - we keep GEN11_MASTER_IRQ
	 * out as this bit doesn't exist anymore for DG1
	 */
	val = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ) & ~GEN11_MASTER_IRQ;
	if (unlikely(!val))
		return 0;

	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, val);

	return val;
}

static inline void dg1_master_intr_enable(void __iomem * const regs)
{
	raw_reg_write(regs, DG1_MSTR_UNIT_INTR, DG1_MSTR_IRQ);
}

static irqreturn_t dg1_irq_handler(int irq, void *arg)
{
	return __gen11_irq_handler(arg,
				   dg1_master_intr_disable_and_ack,
				   dg1_master_intr_enable);
}

2761 2762 2763
/* Called from drm generic code, passed 'crtc' which
 * we use as a pipe index
 */
2764
int i8xx_enable_vblank(struct drm_crtc *crtc)
2765
{
2766 2767
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2768
	unsigned long irqflags;
2769

2770
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2771
	i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2772
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2773

2774 2775 2776
	return 0;
}

2777
int i915gm_enable_vblank(struct drm_crtc *crtc)
2778
{
2779
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2780

2781 2782 2783 2784 2785 2786 2787
	/*
	 * Vblank interrupts fail to wake the device up from C2+.
	 * Disabling render clock gating during C-states avoids
	 * the problem. There is a small power cost so we do this
	 * only when vblank interrupts are actually enabled.
	 */
	if (dev_priv->vblank_enabled++ == 0)
2788
		intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
2789

2790
	return i8xx_enable_vblank(crtc);
2791 2792
}

2793
int i965_enable_vblank(struct drm_crtc *crtc)
2794
{
2795 2796
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2797 2798 2799
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2800 2801
	i915_enable_pipestat(dev_priv, pipe,
			     PIPE_START_VBLANK_INTERRUPT_STATUS);
2802 2803 2804 2805 2806
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);

	return 0;
}

2807
int ilk_enable_vblank(struct drm_crtc *crtc)
J
Jesse Barnes 已提交
2808
{
2809 2810
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
J
Jesse Barnes 已提交
2811
	unsigned long irqflags;
2812
	u32 bit = INTEL_GEN(dev_priv) >= 7 ?
2813
		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
J
Jesse Barnes 已提交
2814 2815

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2816
	ilk_enable_display_irq(dev_priv, bit);
J
Jesse Barnes 已提交
2817 2818
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);

2819 2820 2821 2822
	/* Even though there is no DMC, frame counter can get stuck when
	 * PSR is active as no frames are generated.
	 */
	if (HAS_PSR(dev_priv))
2823
		drm_crtc_vblank_restore(crtc);
2824

J
Jesse Barnes 已提交
2825 2826 2827
	return 0;
}

2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844
static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc,
				   bool enable)
{
	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
	enum port port;
	u32 tmp;

	if (!(intel_crtc->mode_flags &
	    (I915_MODE_FLAG_DSI_USE_TE1 | I915_MODE_FLAG_DSI_USE_TE0)))
		return false;

	/* for dual link cases we consider TE from slave */
	if (intel_crtc->mode_flags & I915_MODE_FLAG_DSI_USE_TE1)
		port = PORT_B;
	else
		port = PORT_A;

2845
	tmp =  intel_uncore_read(&dev_priv->uncore, DSI_INTR_MASK_REG(port));
2846 2847 2848 2849 2850
	if (enable)
		tmp &= ~DSI_TE_EVENT;
	else
		tmp |= DSI_TE_EVENT;

2851
	intel_uncore_write(&dev_priv->uncore, DSI_INTR_MASK_REG(port), tmp);
2852

2853 2854
	tmp = intel_uncore_read(&dev_priv->uncore, DSI_INTR_IDENT_REG(port));
	intel_uncore_write(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), tmp);
2855 2856 2857 2858

	return true;
}

2859
int bdw_enable_vblank(struct drm_crtc *crtc)
2860
{
2861
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2862 2863
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
	enum pipe pipe = intel_crtc->pipe;
2864 2865
	unsigned long irqflags;

2866 2867 2868
	if (gen11_dsi_configure_te(intel_crtc, true))
		return 0;

2869
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2870
	bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2871
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2872

2873 2874 2875 2876
	/* Even if there is no DMC, frame counter can get stuck when
	 * PSR is active as no frames are generated, so check only for PSR.
	 */
	if (HAS_PSR(dev_priv))
2877
		drm_crtc_vblank_restore(crtc);
2878

2879 2880 2881
	return 0;
}

2882 2883 2884
/* Called from drm generic code, passed 'crtc' which
 * we use as a pipe index
 */
2885
void i8xx_disable_vblank(struct drm_crtc *crtc)
2886
{
2887 2888
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2889
	unsigned long irqflags;
2890

2891
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2892
	i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2893 2894 2895
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

2896
void i915gm_disable_vblank(struct drm_crtc *crtc)
2897
{
2898
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2899

2900
	i8xx_disable_vblank(crtc);
2901

2902
	if (--dev_priv->vblank_enabled == 0)
2903
		intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
2904 2905
}

2906
void i965_disable_vblank(struct drm_crtc *crtc)
2907
{
2908 2909
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2910 2911 2912
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2913 2914
	i915_disable_pipestat(dev_priv, pipe,
			      PIPE_START_VBLANK_INTERRUPT_STATUS);
2915 2916 2917
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

2918
void ilk_disable_vblank(struct drm_crtc *crtc)
J
Jesse Barnes 已提交
2919
{
2920 2921
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
J
Jesse Barnes 已提交
2922
	unsigned long irqflags;
2923
	u32 bit = INTEL_GEN(dev_priv) >= 7 ?
2924
		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
J
Jesse Barnes 已提交
2925 2926

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2927
	ilk_disable_display_irq(dev_priv, bit);
J
Jesse Barnes 已提交
2928 2929 2930
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

2931
void bdw_disable_vblank(struct drm_crtc *crtc)
2932
{
2933
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2934 2935
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
	enum pipe pipe = intel_crtc->pipe;
2936 2937
	unsigned long irqflags;

2938 2939 2940
	if (gen11_dsi_configure_te(intel_crtc, false))
		return;

2941
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2942
	bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2943 2944 2945
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

2946
static void ibx_irq_reset(struct drm_i915_private *dev_priv)
P
Paulo Zanoni 已提交
2947
{
2948 2949
	struct intel_uncore *uncore = &dev_priv->uncore;

2950
	if (HAS_PCH_NOP(dev_priv))
P
Paulo Zanoni 已提交
2951 2952
		return;

2953
	GEN3_IRQ_RESET(uncore, SDE);
2954

2955
	if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
2956
		intel_uncore_write(&dev_priv->uncore, SERR_INT, 0xffffffff);
P
Paulo Zanoni 已提交
2957
}
2958

2959 2960
static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
{
2961 2962
	struct intel_uncore *uncore = &dev_priv->uncore;

2963
	if (IS_CHERRYVIEW(dev_priv))
2964
		intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
2965
	else
2966
		intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK);
2967

2968
	i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
2969
	intel_uncore_write(uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
2970

2971
	i9xx_pipestat_irq_reset(dev_priv);
2972

2973
	GEN3_IRQ_RESET(uncore, VLV_);
2974
	dev_priv->irq_mask = ~0u;
2975 2976
}

2977 2978
static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
{
2979 2980
	struct intel_uncore *uncore = &dev_priv->uncore;

2981
	u32 pipestat_mask;
2982
	u32 enable_mask;
2983 2984
	enum pipe pipe;

2985
	pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
2986 2987 2988 2989 2990

	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
	for_each_pipe(dev_priv, pipe)
		i915_enable_pipestat(dev_priv, pipe, pipestat_mask);

2991 2992
	enable_mask = I915_DISPLAY_PORT_INTERRUPT |
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2993 2994 2995 2996
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
		I915_LPE_PIPE_A_INTERRUPT |
		I915_LPE_PIPE_B_INTERRUPT;

2997
	if (IS_CHERRYVIEW(dev_priv))
2998 2999
		enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
			I915_LPE_PIPE_C_INTERRUPT;
3000

3001
	drm_WARN_ON(&dev_priv->drm, dev_priv->irq_mask != ~0u);
3002

3003 3004
	dev_priv->irq_mask = ~enable_mask;

3005
	GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask);
3006 3007 3008 3009
}

/* drm_dma.h hooks
*/
3010
static void ilk_irq_reset(struct drm_i915_private *dev_priv)
3011
{
3012
	struct intel_uncore *uncore = &dev_priv->uncore;
3013

3014
	GEN3_IRQ_RESET(uncore, DE);
3015 3016
	dev_priv->irq_mask = ~0u;

3017
	if (IS_GEN(dev_priv, 7))
3018
		intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff);
3019

3020
	if (IS_HASWELL(dev_priv)) {
3021 3022
		intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
		intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
3023 3024
	}

3025
	gen5_gt_irq_reset(&dev_priv->gt);
3026

3027
	ibx_irq_reset(dev_priv);
3028 3029
}

3030
static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
J
Jesse Barnes 已提交
3031
{
3032 3033
	intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
	intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
3034

3035
	gen5_gt_irq_reset(&dev_priv->gt);
J
Jesse Barnes 已提交
3036

3037
	spin_lock_irq(&dev_priv->irq_lock);
3038 3039
	if (dev_priv->display_irqs_enabled)
		vlv_display_irq_reset(dev_priv);
3040
	spin_unlock_irq(&dev_priv->irq_lock);
J
Jesse Barnes 已提交
3041 3042
}

3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060
static void cnp_display_clock_wa(struct drm_i915_private *dev_priv)
{
	struct intel_uncore *uncore = &dev_priv->uncore;

	/*
	 * Wa_14010685332:cnp/cmp,tgp,adp
	 * TODO: Clarify which platforms this applies to
	 * TODO: Figure out if this workaround can be applied in the s0ix suspend/resume handlers as
	 * on earlier platforms and whether the workaround is also needed for runtime suspend/resume
	 */
	if (INTEL_PCH_TYPE(dev_priv) == PCH_CNP ||
	    (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP && INTEL_PCH_TYPE(dev_priv) < PCH_DG1)) {
		intel_uncore_rmw(uncore, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS,
				 SBCLK_RUN_REFCLK_DIS);
		intel_uncore_rmw(uncore, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
	}
}

3061
static void gen8_irq_reset(struct drm_i915_private *dev_priv)
3062
{
3063
	struct intel_uncore *uncore = &dev_priv->uncore;
3064
	enum pipe pipe;
3065

3066
	gen8_master_intr_disable(dev_priv->uncore.regs);
3067

3068
	gen8_gt_irq_reset(&dev_priv->gt);
3069

3070 3071
	intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
	intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
3072

3073
	for_each_pipe(dev_priv, pipe)
3074 3075
		if (intel_display_power_is_enabled(dev_priv,
						   POWER_DOMAIN_PIPE(pipe)))
3076
			GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3077

3078 3079 3080
	GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
	GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3081

3082
	if (HAS_PCH_SPLIT(dev_priv))
3083
		ibx_irq_reset(dev_priv);
3084 3085

	cnp_display_clock_wa(dev_priv);
3086
}
3087

3088
static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
M
Mika Kuoppala 已提交
3089
{
3090
	struct intel_uncore *uncore = &dev_priv->uncore;
3091
	enum pipe pipe;
3092 3093
	u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
		BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
M
Mika Kuoppala 已提交
3094

3095
	intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0);
M
Mika Kuoppala 已提交
3096

3097 3098 3099
	if (INTEL_GEN(dev_priv) >= 12) {
		enum transcoder trans;

3100
		for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113
			enum intel_display_power_domain domain;

			domain = POWER_DOMAIN_TRANSCODER(trans);
			if (!intel_display_power_is_enabled(dev_priv, domain))
				continue;

			intel_uncore_write(uncore, TRANS_PSR_IMR(trans), 0xffffffff);
			intel_uncore_write(uncore, TRANS_PSR_IIR(trans), 0xffffffff);
		}
	} else {
		intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
		intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
	}
3114

M
Mika Kuoppala 已提交
3115 3116 3117
	for_each_pipe(dev_priv, pipe)
		if (intel_display_power_is_enabled(dev_priv,
						   POWER_DOMAIN_PIPE(pipe)))
3118
			GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
M
Mika Kuoppala 已提交
3119

3120 3121 3122
	GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
	GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
	GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_);
3123

3124
	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3125
		GEN3_IRQ_RESET(uncore, SDE);
M
Matt Roper 已提交
3126

3127
	cnp_display_clock_wa(dev_priv);
M
Mika Kuoppala 已提交
3128 3129
}

3130 3131 3132 3133
static void gen11_irq_reset(struct drm_i915_private *dev_priv)
{
	struct intel_uncore *uncore = &dev_priv->uncore;

3134 3135 3136 3137
	if (HAS_MASTER_UNIT_IRQ(dev_priv))
		dg1_master_intr_disable_and_ack(dev_priv->uncore.regs);
	else
		gen11_master_intr_disable(dev_priv->uncore.regs);
3138 3139 3140 3141 3142 3143 3144 3145

	gen11_gt_irq_reset(&dev_priv->gt);
	gen11_display_irq_reset(dev_priv);

	GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
}

3146
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3147
				     u8 pipe_mask)
3148
{
3149
	struct intel_uncore *uncore = &dev_priv->uncore;
3150 3151
	u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN |
		gen8_de_pipe_flip_done_mask(dev_priv);
3152
	enum pipe pipe;
3153

3154
	spin_lock_irq(&dev_priv->irq_lock);
3155 3156 3157 3158 3159 3160

	if (!intel_irqs_enabled(dev_priv)) {
		spin_unlock_irq(&dev_priv->irq_lock);
		return;
	}

3161
	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3162
		GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
3163 3164
				  dev_priv->de_irq_mask[pipe],
				  ~dev_priv->de_irq_mask[pipe] | extra_ier);
3165

3166
	spin_unlock_irq(&dev_priv->irq_lock);
3167 3168
}

3169
void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
3170
				     u8 pipe_mask)
3171
{
3172
	struct intel_uncore *uncore = &dev_priv->uncore;
3173 3174
	enum pipe pipe;

3175
	spin_lock_irq(&dev_priv->irq_lock);
3176 3177 3178 3179 3180 3181

	if (!intel_irqs_enabled(dev_priv)) {
		spin_unlock_irq(&dev_priv->irq_lock);
		return;
	}

3182
	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3183
		GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3184

3185 3186 3187
	spin_unlock_irq(&dev_priv->irq_lock);

	/* make sure we're done processing display irqs */
3188
	intel_synchronize_irq(dev_priv);
3189 3190
}

3191
static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
3192
{
3193
	struct intel_uncore *uncore = &dev_priv->uncore;
3194

3195 3196
	intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0);
	intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
3197

3198
	gen8_gt_irq_reset(&dev_priv->gt);
3199

3200
	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3201

3202
	spin_lock_irq(&dev_priv->irq_lock);
3203 3204
	if (dev_priv->display_irqs_enabled)
		vlv_display_irq_reset(dev_priv);
3205
	spin_unlock_irq(&dev_priv->irq_lock);
3206 3207
}

3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232
static u32 ibx_hotplug_enables(struct drm_i915_private *i915,
			       enum hpd_pin pin)
{
	switch (pin) {
	case HPD_PORT_A:
		/*
		 * When CPU and PCH are on the same package, port A
		 * HPD must be enabled in both north and south.
		 */
		return HAS_PCH_LPT_LP(i915) ?
			PORTA_HOTPLUG_ENABLE : 0;
	case HPD_PORT_B:
		return PORTB_HOTPLUG_ENABLE |
			PORTB_PULSE_DURATION_2ms;
	case HPD_PORT_C:
		return PORTC_HOTPLUG_ENABLE |
			PORTC_PULSE_DURATION_2ms;
	case HPD_PORT_D:
		return PORTD_HOTPLUG_ENABLE |
			PORTD_PULSE_DURATION_2ms;
	default:
		return 0;
	}
}

3233
static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
3234
{
3235
	u32 hotplug;
3236 3237 3238

	/*
	 * Enable digital hotplug on the PCH, and configure the DP short pulse
3239 3240
	 * duration to 2ms (which is the minimum in the Display Port spec).
	 * The pulse duration bits are reserved on LPT+.
3241
	 */
3242
	hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
3243 3244 3245 3246 3247
	hotplug &= ~(PORTA_HOTPLUG_ENABLE |
		     PORTB_HOTPLUG_ENABLE |
		     PORTC_HOTPLUG_ENABLE |
		     PORTD_HOTPLUG_ENABLE |
		     PORTB_PULSE_DURATION_MASK |
3248 3249
		     PORTC_PULSE_DURATION_MASK |
		     PORTD_PULSE_DURATION_MASK);
3250
	hotplug |= intel_hpd_hotplug_enables(dev_priv, ibx_hotplug_enables);
3251
	intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, hotplug);
3252
}
X
Xiong Zhang 已提交
3253

3254 3255 3256 3257
static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug_irqs, enabled_irqs;

3258
	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3259
	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3260 3261 3262 3263 3264 3265

	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);

	ibx_hpd_detection_setup(dev_priv);
}

3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296
static u32 icp_ddi_hotplug_enables(struct drm_i915_private *i915,
				   enum hpd_pin pin)
{
	switch (pin) {
	case HPD_PORT_A:
	case HPD_PORT_B:
	case HPD_PORT_C:
	case HPD_PORT_D:
		return SHOTPLUG_CTL_DDI_HPD_ENABLE(pin);
	default:
		return 0;
	}
}

static u32 icp_tc_hotplug_enables(struct drm_i915_private *i915,
				  enum hpd_pin pin)
{
	switch (pin) {
	case HPD_PORT_TC1:
	case HPD_PORT_TC2:
	case HPD_PORT_TC3:
	case HPD_PORT_TC4:
	case HPD_PORT_TC5:
	case HPD_PORT_TC6:
		return ICP_TC_HPD_ENABLE(pin);
	default:
		return 0;
	}
}

static void icp_ddi_hpd_detection_setup(struct drm_i915_private *dev_priv)
3297 3298 3299
{
	u32 hotplug;

3300
	hotplug = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_DDI);
3301 3302 3303 3304 3305
	hotplug &= ~(SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_A) |
		     SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_B) |
		     SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_C) |
		     SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_D));
	hotplug |= intel_hpd_hotplug_enables(dev_priv, icp_ddi_hotplug_enables);
3306
	intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_DDI, hotplug);
3307
}
3308

3309
static void icp_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
3310 3311 3312
{
	u32 hotplug;

3313
	hotplug = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_TC);
3314 3315 3316 3317 3318 3319 3320
	hotplug &= ~(ICP_TC_HPD_ENABLE(HPD_PORT_TC1) |
		     ICP_TC_HPD_ENABLE(HPD_PORT_TC2) |
		     ICP_TC_HPD_ENABLE(HPD_PORT_TC3) |
		     ICP_TC_HPD_ENABLE(HPD_PORT_TC4) |
		     ICP_TC_HPD_ENABLE(HPD_PORT_TC5) |
		     ICP_TC_HPD_ENABLE(HPD_PORT_TC6));
	hotplug |= intel_hpd_hotplug_enables(dev_priv, icp_tc_hotplug_enables);
3321
	intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_TC, hotplug);
3322 3323
}

3324
static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
3325 3326 3327
{
	u32 hotplug_irqs, enabled_irqs;

3328
	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3329
	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3330

3331
	if (INTEL_PCH_TYPE(dev_priv) <= PCH_TGP)
3332
		intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
3333

3334 3335
	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);

3336 3337
	icp_ddi_hpd_detection_setup(dev_priv);
	icp_tc_hpd_detection_setup(dev_priv);
3338 3339
}

3340 3341
static u32 gen11_hotplug_enables(struct drm_i915_private *i915,
				 enum hpd_pin pin)
3342
{
3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353
	switch (pin) {
	case HPD_PORT_TC1:
	case HPD_PORT_TC2:
	case HPD_PORT_TC3:
	case HPD_PORT_TC4:
	case HPD_PORT_TC5:
	case HPD_PORT_TC6:
		return GEN11_HOTPLUG_CTL_ENABLE(pin);
	default:
		return 0;
	}
M
Matt Roper 已提交
3354 3355
}

3356 3357
static void dg1_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
C
Clinton A Taylor 已提交
3358 3359
	u32 val;

3360
	val = intel_uncore_read(&dev_priv->uncore, SOUTH_CHICKEN1);
C
Clinton A Taylor 已提交
3361 3362 3363 3364
	val |= (INVERT_DDIA_HPD |
		INVERT_DDIB_HPD |
		INVERT_DDIC_HPD |
		INVERT_DDID_HPD);
3365
	intel_uncore_write(&dev_priv->uncore, SOUTH_CHICKEN1, val);
C
Clinton A Taylor 已提交
3366

3367
	icp_hpd_irq_setup(dev_priv);
3368 3369
}

3370
static void gen11_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
3371 3372 3373
{
	u32 hotplug;

3374
	hotplug = intel_uncore_read(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL);
3375 3376 3377 3378 3379 3380 3381
	hotplug &= ~(GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) |
		     GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) |
		     GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) |
		     GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC4) |
		     GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) |
		     GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6));
	hotplug |= intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables);
3382
	intel_uncore_write(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, hotplug);
3383 3384 3385 3386 3387
}

static void gen11_tbt_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug;
3388

3389
	hotplug = intel_uncore_read(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL);
3390 3391 3392 3393 3394 3395 3396
	hotplug &= ~(GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) |
		     GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) |
		     GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) |
		     GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC4) |
		     GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) |
		     GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6));
	hotplug |= intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables);
3397
	intel_uncore_write(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, hotplug);
3398 3399 3400 3401 3402 3403 3404
}

static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug_irqs, enabled_irqs;
	u32 val;

3405
	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
3406
	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd);
3407

3408
	val = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IMR);
3409
	val &= ~hotplug_irqs;
3410
	val |= ~enabled_irqs & hotplug_irqs;
3411 3412
	intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IMR, val);
	intel_uncore_posting_read(&dev_priv->uncore, GEN11_DE_HPD_IMR);
3413

3414 3415
	gen11_tc_hpd_detection_setup(dev_priv);
	gen11_tbt_hpd_detection_setup(dev_priv);
3416

3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446
	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
		icp_hpd_irq_setup(dev_priv);
}

static u32 spt_hotplug_enables(struct drm_i915_private *i915,
			       enum hpd_pin pin)
{
	switch (pin) {
	case HPD_PORT_A:
		return PORTA_HOTPLUG_ENABLE;
	case HPD_PORT_B:
		return PORTB_HOTPLUG_ENABLE;
	case HPD_PORT_C:
		return PORTC_HOTPLUG_ENABLE;
	case HPD_PORT_D:
		return PORTD_HOTPLUG_ENABLE;
	default:
		return 0;
	}
}

static u32 spt_hotplug2_enables(struct drm_i915_private *i915,
				enum hpd_pin pin)
{
	switch (pin) {
	case HPD_PORT_E:
		return PORTE_HOTPLUG_ENABLE;
	default:
		return 0;
	}
3447 3448
}

3449
static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3450
{
3451 3452 3453 3454
	u32 val, hotplug;

	/* Display WA #1179 WaHardHangonHotPlug: cnp */
	if (HAS_PCH_CNP(dev_priv)) {
3455
		val = intel_uncore_read(&dev_priv->uncore, SOUTH_CHICKEN1);
3456 3457
		val &= ~CHASSIS_CLK_REQ_DURATION_MASK;
		val |= CHASSIS_CLK_REQ_DURATION(0xf);
3458
		intel_uncore_write(&dev_priv->uncore, SOUTH_CHICKEN1, val);
3459
	}
3460 3461

	/* Enable digital hotplug on the PCH */
3462
	hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
3463 3464 3465 3466 3467
	hotplug &= ~(PORTA_HOTPLUG_ENABLE |
		     PORTB_HOTPLUG_ENABLE |
		     PORTC_HOTPLUG_ENABLE |
		     PORTD_HOTPLUG_ENABLE);
	hotplug |= intel_hpd_hotplug_enables(dev_priv, spt_hotplug_enables);
3468
	intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, hotplug);
3469

3470
	hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG2);
3471 3472
	hotplug &= ~PORTE_HOTPLUG_ENABLE;
	hotplug |= intel_hpd_hotplug_enables(dev_priv, spt_hotplug2_enables);
3473
	intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG2, hotplug);
3474 3475
}

3476 3477 3478 3479
static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug_irqs, enabled_irqs;

3480
	if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
3481
		intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
3482

3483
	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3484
	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3485 3486 3487 3488 3489 3490

	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);

	spt_hpd_detection_setup(dev_priv);
}

3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502
static u32 ilk_hotplug_enables(struct drm_i915_private *i915,
			       enum hpd_pin pin)
{
	switch (pin) {
	case HPD_PORT_A:
		return DIGITAL_PORTA_HOTPLUG_ENABLE |
			DIGITAL_PORTA_PULSE_DURATION_2ms;
	default:
		return 0;
	}
}

3503 3504 3505 3506 3507 3508 3509 3510 3511
static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug;

	/*
	 * Enable digital hotplug on the CPU, and configure the DP short pulse
	 * duration to 2ms (which is the minimum in the Display Port spec)
	 * The pulse duration bits are reserved on HSW+.
	 */
3512
	hotplug = intel_uncore_read(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL);
3513 3514 3515
	hotplug &= ~(DIGITAL_PORTA_HOTPLUG_ENABLE |
		     DIGITAL_PORTA_PULSE_DURATION_MASK);
	hotplug |= intel_hpd_hotplug_enables(dev_priv, ilk_hotplug_enables);
3516
	intel_uncore_write(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3517 3518
}

3519
static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3520
{
3521
	u32 hotplug_irqs, enabled_irqs;
3522

3523 3524
	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd);
3525

3526
	if (INTEL_GEN(dev_priv) >= 8)
3527
		bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3528
	else
3529
		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3530

3531
	ilk_hpd_detection_setup(dev_priv);
3532

3533
	ibx_hpd_irq_setup(dev_priv);
3534 3535
}

3536 3537
static u32 bxt_hotplug_enables(struct drm_i915_private *i915,
			       enum hpd_pin pin)
3538
{
3539
	u32 hotplug;
3540

3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560
	switch (pin) {
	case HPD_PORT_A:
		hotplug = PORTA_HOTPLUG_ENABLE;
		if (intel_bios_is_port_hpd_inverted(i915, PORT_A))
			hotplug |= BXT_DDIA_HPD_INVERT;
		return hotplug;
	case HPD_PORT_B:
		hotplug = PORTB_HOTPLUG_ENABLE;
		if (intel_bios_is_port_hpd_inverted(i915, PORT_B))
			hotplug |= BXT_DDIB_HPD_INVERT;
		return hotplug;
	case HPD_PORT_C:
		hotplug = PORTC_HOTPLUG_ENABLE;
		if (intel_bios_is_port_hpd_inverted(i915, PORT_C))
			hotplug |= BXT_DDIC_HPD_INVERT;
		return hotplug;
	default:
		return 0;
	}
}
3561

3562 3563 3564
static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug;
3565

3566
	hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
3567 3568 3569 3570 3571 3572 3573
	hotplug &= ~(PORTA_HOTPLUG_ENABLE |
		     PORTB_HOTPLUG_ENABLE |
		     PORTC_HOTPLUG_ENABLE |
		     BXT_DDIA_HPD_INVERT |
		     BXT_DDIB_HPD_INVERT |
		     BXT_DDIC_HPD_INVERT);
	hotplug |= intel_hpd_hotplug_enables(dev_priv, bxt_hotplug_enables);
3574
	intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, hotplug);
3575 3576
}

3577 3578 3579 3580
static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug_irqs, enabled_irqs;

3581
	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
3582
	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd);
3583 3584 3585

	bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);

3586
	bxt_hpd_detection_setup(dev_priv);
3587 3588
}

3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599
/*
 * SDEIER is also touched by the interrupt handler to work around missed PCH
 * interrupts. Hence we can't update it after the interrupt handler is enabled -
 * instead we unconditionally enable all PCH interrupt sources here, but then
 * only unmask them as needed with SDEIMR.
 *
 * Note that we currently do this after installing the interrupt handler,
 * but before we enable the master interrupt. That should be sufficient
 * to avoid races with the irq handler, assuming we have MSI. Shared legacy
 * interrupts could still race.
 */
3600
static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
P
Paulo Zanoni 已提交
3601
{
3602
	struct intel_uncore *uncore = &dev_priv->uncore;
3603
	u32 mask;
3604

3605
	if (HAS_PCH_NOP(dev_priv))
D
Daniel Vetter 已提交
3606 3607
		return;

3608
	if (HAS_PCH_IBX(dev_priv))
3609
		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3610
	else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3611
		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3612 3613
	else
		mask = SDE_GMBUS_CPT;
3614

3615
	GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff);
P
Paulo Zanoni 已提交
3616 3617
}

3618
static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
3619
{
3620
	struct intel_uncore *uncore = &dev_priv->uncore;
3621 3622
	u32 display_mask, extra_mask;

3623
	if (INTEL_GEN(dev_priv) >= 7) {
3624
		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3625
				DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
3626
		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3627
			      DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3628 3629 3630
			      DE_PLANE_FLIP_DONE_IVB(PLANE_C) |
			      DE_PLANE_FLIP_DONE_IVB(PLANE_B) |
			      DE_PLANE_FLIP_DONE_IVB(PLANE_A) |
3631
			      DE_DP_A_HOTPLUG_IVB);
3632 3633
	} else {
		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3634 3635
				DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
				DE_PIPEA_CRC_DONE | DE_POISON);
3636
		extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK |
3637
			      DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3638 3639
			      DE_PLANE_FLIP_DONE(PLANE_A) |
			      DE_PLANE_FLIP_DONE(PLANE_B) |
3640
			      DE_DP_A_HOTPLUG);
3641
	}
3642

3643
	if (IS_HASWELL(dev_priv)) {
3644
		gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
3645 3646 3647
		display_mask |= DE_EDP_PSR_INT_HSW;
	}

3648 3649 3650
	if (IS_IRONLAKE_M(dev_priv))
		extra_mask |= DE_PCU_EVENT;

3651
	dev_priv->irq_mask = ~display_mask;
3652

3653
	ibx_irq_postinstall(dev_priv);
P
Paulo Zanoni 已提交
3654

3655 3656
	gen5_gt_irq_postinstall(&dev_priv->gt);

3657 3658
	GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
		      display_mask | extra_mask);
3659 3660
}

3661 3662
void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
{
3663
	lockdep_assert_held(&dev_priv->irq_lock);
3664 3665 3666 3667 3668 3669

	if (dev_priv->display_irqs_enabled)
		return;

	dev_priv->display_irqs_enabled = true;

3670 3671
	if (intel_irqs_enabled(dev_priv)) {
		vlv_display_irq_reset(dev_priv);
3672
		vlv_display_irq_postinstall(dev_priv);
3673
	}
3674 3675 3676 3677
}

void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
{
3678
	lockdep_assert_held(&dev_priv->irq_lock);
3679 3680 3681 3682 3683 3684

	if (!dev_priv->display_irqs_enabled)
		return;

	dev_priv->display_irqs_enabled = false;

3685
	if (intel_irqs_enabled(dev_priv))
3686
		vlv_display_irq_reset(dev_priv);
3687 3688
}

3689

3690
static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
3691
{
3692
	gen5_gt_irq_postinstall(&dev_priv->gt);
J
Jesse Barnes 已提交
3693

3694
	spin_lock_irq(&dev_priv->irq_lock);
3695 3696
	if (dev_priv->display_irqs_enabled)
		vlv_display_irq_postinstall(dev_priv);
3697 3698
	spin_unlock_irq(&dev_priv->irq_lock);

3699 3700
	intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
	intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
3701 3702
}

3703 3704
static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
{
3705 3706
	struct intel_uncore *uncore = &dev_priv->uncore;

3707 3708
	u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) |
		GEN8_PIPE_CDCLK_CRC_DONE;
3709
	u32 de_pipe_enables;
3710
	u32 de_port_masked = gen8_de_port_aux_mask(dev_priv);
3711
	u32 de_port_enables;
3712
	u32 de_misc_masked = GEN8_DE_EDP_PSR;
3713 3714
	u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
		BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
3715
	enum pipe pipe;
3716

3717 3718 3719
	if (INTEL_GEN(dev_priv) <= 10)
		de_misc_masked |= GEN8_DE_MISC_GSE;

3720 3721
	if (IS_GEN9_LP(dev_priv))
		de_port_masked |= BXT_DE_PORT_GMBUS;
R
Rodrigo Vivi 已提交
3722

3723 3724 3725 3726 3727 3728 3729
	if (INTEL_GEN(dev_priv) >= 11) {
		enum port port;

		if (intel_bios_is_dsi_present(dev_priv, &port))
			de_port_masked |= DSI0_TE | DSI1_TE;
	}

3730 3731 3732
	de_pipe_enables = de_pipe_masked |
		GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN |
		gen8_de_pipe_flip_done_mask(dev_priv);
3733

3734
	de_port_enables = de_port_masked;
3735
	if (IS_GEN9_LP(dev_priv))
3736 3737
		de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
	else if (IS_BROADWELL(dev_priv))
3738
		de_port_enables |= BDW_DE_PORT_HOTPLUG_MASK;
3739

3740 3741 3742
	if (INTEL_GEN(dev_priv) >= 12) {
		enum transcoder trans;

3743
		for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754
			enum intel_display_power_domain domain;

			domain = POWER_DOMAIN_TRANSCODER(trans);
			if (!intel_display_power_is_enabled(dev_priv, domain))
				continue;

			gen3_assert_iir_is_zero(uncore, TRANS_PSR_IIR(trans));
		}
	} else {
		gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
	}
3755

M
Mika Kahola 已提交
3756 3757
	for_each_pipe(dev_priv, pipe) {
		dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
3758

3759
		if (intel_display_power_is_enabled(dev_priv,
3760
				POWER_DOMAIN_PIPE(pipe)))
3761
			GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
3762 3763
					  dev_priv->de_irq_mask[pipe],
					  de_pipe_enables);
M
Mika Kahola 已提交
3764
	}
3765

3766 3767
	GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
	GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
3768

3769 3770
	if (INTEL_GEN(dev_priv) >= 11) {
		u32 de_hpd_masked = 0;
3771 3772
		u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
				     GEN11_DE_TBT_HOTPLUG_MASK;
3773

3774 3775
		GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked,
			      de_hpd_enables);
3776
	}
3777 3778
}

3779 3780 3781 3782 3783 3784 3785 3786
static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
{
	struct intel_uncore *uncore = &dev_priv->uncore;
	u32 mask = SDE_GMBUS_ICP;

	GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff);
}

3787
static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
3788
{
3789 3790 3791
	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
		icp_irq_postinstall(dev_priv);
	else if (HAS_PCH_SPLIT(dev_priv))
3792
		ibx_irq_postinstall(dev_priv);
P
Paulo Zanoni 已提交
3793

3794
	gen8_gt_irq_postinstall(&dev_priv->gt);
3795 3796
	gen8_de_irq_postinstall(dev_priv);

3797
	gen8_master_intr_enable(dev_priv->uncore.regs);
3798 3799
}

3800

3801
static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
M
Mika Kuoppala 已提交
3802
{
3803
	struct intel_uncore *uncore = &dev_priv->uncore;
3804
	u32 gu_misc_masked = GEN11_GU_MISC_GSE;
M
Mika Kuoppala 已提交
3805

3806
	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3807
		icp_irq_postinstall(dev_priv);
3808

3809
	gen11_gt_irq_postinstall(&dev_priv->gt);
M
Mika Kuoppala 已提交
3810 3811
	gen8_de_irq_postinstall(dev_priv);

3812
	GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
3813

3814
	intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
M
Mika Kuoppala 已提交
3815

3816 3817
	if (HAS_MASTER_UNIT_IRQ(dev_priv)) {
		dg1_master_intr_enable(uncore->regs);
3818
		intel_uncore_posting_read(&dev_priv->uncore, DG1_MSTR_UNIT_INTR);
3819 3820
	} else {
		gen11_master_intr_enable(uncore->regs);
3821
		intel_uncore_posting_read(&dev_priv->uncore, GEN11_GFX_MSTR_IRQ);
3822
	}
M
Mika Kuoppala 已提交
3823 3824
}

3825
static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
3826
{
3827
	gen8_gt_irq_postinstall(&dev_priv->gt);
3828

3829
	spin_lock_irq(&dev_priv->irq_lock);
3830 3831
	if (dev_priv->display_irqs_enabled)
		vlv_display_irq_postinstall(dev_priv);
3832 3833
	spin_unlock_irq(&dev_priv->irq_lock);

3834 3835
	intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
	intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
3836 3837
}

3838
static void i8xx_irq_reset(struct drm_i915_private *dev_priv)
L
Linus Torvalds 已提交
3839
{
3840
	struct intel_uncore *uncore = &dev_priv->uncore;
3841

3842 3843
	i9xx_pipestat_irq_reset(dev_priv);

3844
	GEN2_IRQ_RESET(uncore);
3845
	dev_priv->irq_mask = ~0u;
C
Chris Wilson 已提交
3846 3847
}

3848
static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv)
C
Chris Wilson 已提交
3849
{
3850
	struct intel_uncore *uncore = &dev_priv->uncore;
3851
	u16 enable_mask;
C
Chris Wilson 已提交
3852

3853 3854 3855 3856
	intel_uncore_write16(uncore,
			     EMR,
			     ~(I915_ERROR_PAGE_TABLE |
			       I915_ERROR_MEMORY_REFRESH));
C
Chris Wilson 已提交
3857 3858 3859 3860

	/* Unmask the interrupts that we always want on. */
	dev_priv->irq_mask =
		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3861 3862
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
		  I915_MASTER_ERROR_INTERRUPT);
C
Chris Wilson 已提交
3863

3864 3865 3866
	enable_mask =
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3867
		I915_MASTER_ERROR_INTERRUPT |
3868 3869
		I915_USER_INTERRUPT;

3870
	GEN2_IRQ_INIT(uncore, dev_priv->irq_mask, enable_mask);
C
Chris Wilson 已提交
3871

3872 3873
	/* Interrupt setup is already guaranteed to be single-threaded, this is
	 * just to make the assert_spin_locked check happy. */
3874
	spin_lock_irq(&dev_priv->irq_lock);
3875 3876
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3877
	spin_unlock_irq(&dev_priv->irq_lock);
C
Chris Wilson 已提交
3878 3879
}

3880
static void i8xx_error_irq_ack(struct drm_i915_private *i915,
3881 3882
			       u16 *eir, u16 *eir_stuck)
{
3883
	struct intel_uncore *uncore = &i915->uncore;
3884 3885
	u16 emr;

3886
	*eir = intel_uncore_read16(uncore, EIR);
3887 3888

	if (*eir)
3889
		intel_uncore_write16(uncore, EIR, *eir);
3890

3891
	*eir_stuck = intel_uncore_read16(uncore, EIR);
3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904
	if (*eir_stuck == 0)
		return;

	/*
	 * Toggle all EMR bits to make sure we get an edge
	 * in the ISR master error bit if we don't clear
	 * all the EIR bits. Otherwise the edge triggered
	 * IIR on i965/g4x wouldn't notice that an interrupt
	 * is still pending. Also some EIR bits can't be
	 * cleared except by handling the underlying error
	 * (or by a GPU reset) so we mask any bit that
	 * remains set.
	 */
3905 3906 3907
	emr = intel_uncore_read16(uncore, EMR);
	intel_uncore_write16(uncore, EMR, 0xffff);
	intel_uncore_write16(uncore, EMR, emr | *eir_stuck);
3908 3909 3910 3911 3912 3913 3914 3915
}

static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
				   u16 eir, u16 eir_stuck)
{
	DRM_DEBUG("Master Error: EIR 0x%04x\n", eir);

	if (eir_stuck)
3916 3917
		drm_dbg(&dev_priv->drm, "EIR stuck: 0x%04x, masked\n",
			eir_stuck);
3918 3919 3920 3921 3922 3923 3924
}

static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
			       u32 *eir, u32 *eir_stuck)
{
	u32 emr;

3925
	*eir = intel_uncore_read(&dev_priv->uncore, EIR);
3926

3927
	intel_uncore_write(&dev_priv->uncore, EIR, *eir);
3928

3929
	*eir_stuck = intel_uncore_read(&dev_priv->uncore, EIR);
3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942
	if (*eir_stuck == 0)
		return;

	/*
	 * Toggle all EMR bits to make sure we get an edge
	 * in the ISR master error bit if we don't clear
	 * all the EIR bits. Otherwise the edge triggered
	 * IIR on i965/g4x wouldn't notice that an interrupt
	 * is still pending. Also some EIR bits can't be
	 * cleared except by handling the underlying error
	 * (or by a GPU reset) so we mask any bit that
	 * remains set.
	 */
3943 3944 3945
	emr = intel_uncore_read(&dev_priv->uncore, EMR);
	intel_uncore_write(&dev_priv->uncore, EMR, 0xffffffff);
	intel_uncore_write(&dev_priv->uncore, EMR, emr | *eir_stuck);
3946 3947 3948 3949 3950 3951 3952 3953
}

static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
				   u32 eir, u32 eir_stuck)
{
	DRM_DEBUG("Master Error, EIR 0x%08x\n", eir);

	if (eir_stuck)
3954 3955
		drm_dbg(&dev_priv->drm, "EIR stuck: 0x%08x, masked\n",
			eir_stuck);
3956 3957
}

3958
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
C
Chris Wilson 已提交
3959
{
3960
	struct drm_i915_private *dev_priv = arg;
3961
	irqreturn_t ret = IRQ_NONE;
C
Chris Wilson 已提交
3962

3963 3964 3965
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

3966
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
3967
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3968

3969
	do {
3970
		u32 pipe_stats[I915_MAX_PIPES] = {};
3971
		u16 eir = 0, eir_stuck = 0;
3972
		u16 iir;
3973

3974
		iir = intel_uncore_read16(&dev_priv->uncore, GEN2_IIR);
3975 3976 3977 3978
		if (iir == 0)
			break;

		ret = IRQ_HANDLED;
C
Chris Wilson 已提交
3979

3980 3981 3982
		/* Call regardless, as some status bits might not be
		 * signalled in iir */
		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
C
Chris Wilson 已提交
3983

3984 3985 3986
		if (iir & I915_MASTER_ERROR_INTERRUPT)
			i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);

3987
		intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir);
C
Chris Wilson 已提交
3988 3989

		if (iir & I915_USER_INTERRUPT)
3990
			intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]);
C
Chris Wilson 已提交
3991

3992 3993
		if (iir & I915_MASTER_ERROR_INTERRUPT)
			i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
C
Chris Wilson 已提交
3994

3995 3996
		i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
	} while (0);
3997

3998 3999
	pmu_irq_stats(dev_priv, ret);

4000
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
C
Chris Wilson 已提交
4001

4002
	return ret;
C
Chris Wilson 已提交
4003 4004
}

4005
static void i915_irq_reset(struct drm_i915_private *dev_priv)
4006
{
4007
	struct intel_uncore *uncore = &dev_priv->uncore;
4008

4009
	if (I915_HAS_HOTPLUG(dev_priv)) {
4010
		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4011
		intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
4012 4013
	}

4014 4015
	i9xx_pipestat_irq_reset(dev_priv);

4016
	GEN3_IRQ_RESET(uncore, GEN2_);
4017
	dev_priv->irq_mask = ~0u;
4018 4019
}

4020
static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
4021
{
4022
	struct intel_uncore *uncore = &dev_priv->uncore;
4023
	u32 enable_mask;
4024

4025
	intel_uncore_write(&dev_priv->uncore, EMR, ~(I915_ERROR_PAGE_TABLE |
4026
			  I915_ERROR_MEMORY_REFRESH));
4027 4028 4029 4030 4031

	/* Unmask the interrupts that we always want on. */
	dev_priv->irq_mask =
		~(I915_ASLE_INTERRUPT |
		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4032 4033
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
		  I915_MASTER_ERROR_INTERRUPT);
4034 4035 4036 4037 4038

	enable_mask =
		I915_ASLE_INTERRUPT |
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4039
		I915_MASTER_ERROR_INTERRUPT |
4040 4041
		I915_USER_INTERRUPT;

4042
	if (I915_HAS_HOTPLUG(dev_priv)) {
4043 4044 4045 4046 4047 4048
		/* Enable in IER... */
		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
		/* and unmask in IMR */
		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
	}

4049
	GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
4050

4051 4052
	/* Interrupt setup is already guaranteed to be single-threaded, this is
	 * just to make the assert_spin_locked check happy. */
4053
	spin_lock_irq(&dev_priv->irq_lock);
4054 4055
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4056
	spin_unlock_irq(&dev_priv->irq_lock);
4057

4058
	i915_enable_asle_pipestat(dev_priv);
4059 4060
}

4061
static irqreturn_t i915_irq_handler(int irq, void *arg)
4062
{
4063
	struct drm_i915_private *dev_priv = arg;
4064
	irqreturn_t ret = IRQ_NONE;
4065

4066 4067 4068
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

4069
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
4070
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4071

4072
	do {
4073
		u32 pipe_stats[I915_MAX_PIPES] = {};
4074
		u32 eir = 0, eir_stuck = 0;
4075 4076
		u32 hotplug_status = 0;
		u32 iir;
4077

4078
		iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR);
4079 4080 4081 4082 4083 4084 4085 4086
		if (iir == 0)
			break;

		ret = IRQ_HANDLED;

		if (I915_HAS_HOTPLUG(dev_priv) &&
		    iir & I915_DISPLAY_PORT_INTERRUPT)
			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4087

4088 4089 4090
		/* Call regardless, as some status bits might not be
		 * signalled in iir */
		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4091

4092 4093 4094
		if (iir & I915_MASTER_ERROR_INTERRUPT)
			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);

4095
		intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
4096 4097

		if (iir & I915_USER_INTERRUPT)
4098
			intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]);
4099

4100 4101
		if (iir & I915_MASTER_ERROR_INTERRUPT)
			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4102

4103 4104 4105 4106 4107
		if (hotplug_status)
			i9xx_hpd_irq_handler(dev_priv, hotplug_status);

		i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
	} while (0);
4108

4109 4110
	pmu_irq_stats(dev_priv, ret);

4111
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4112

4113 4114 4115
	return ret;
}

4116
static void i965_irq_reset(struct drm_i915_private *dev_priv)
4117
{
4118
	struct intel_uncore *uncore = &dev_priv->uncore;
4119

4120
	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4121
	intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
4122

4123 4124
	i9xx_pipestat_irq_reset(dev_priv);

4125
	GEN3_IRQ_RESET(uncore, GEN2_);
4126
	dev_priv->irq_mask = ~0u;
4127 4128
}

4129
static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
4130
{
4131
	struct intel_uncore *uncore = &dev_priv->uncore;
4132
	u32 enable_mask;
4133 4134
	u32 error_mask;

4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147
	/*
	 * Enable some error detection, note the instruction error mask
	 * bit is reserved, so we leave it masked.
	 */
	if (IS_G4X(dev_priv)) {
		error_mask = ~(GM45_ERROR_PAGE_TABLE |
			       GM45_ERROR_MEM_PRIV |
			       GM45_ERROR_CP_PRIV |
			       I915_ERROR_MEMORY_REFRESH);
	} else {
		error_mask = ~(I915_ERROR_PAGE_TABLE |
			       I915_ERROR_MEMORY_REFRESH);
	}
4148
	intel_uncore_write(&dev_priv->uncore, EMR, error_mask);
4149

4150
	/* Unmask the interrupts that we always want on. */
4151 4152 4153 4154 4155
	dev_priv->irq_mask =
		~(I915_ASLE_INTERRUPT |
		  I915_DISPLAY_PORT_INTERRUPT |
		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4156
		  I915_MASTER_ERROR_INTERRUPT);
4157

4158 4159 4160 4161 4162
	enable_mask =
		I915_ASLE_INTERRUPT |
		I915_DISPLAY_PORT_INTERRUPT |
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4163
		I915_MASTER_ERROR_INTERRUPT |
4164
		I915_USER_INTERRUPT;
4165

4166
	if (IS_G4X(dev_priv))
4167
		enable_mask |= I915_BSD_USER_INTERRUPT;
4168

4169
	GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
4170

4171 4172
	/* Interrupt setup is already guaranteed to be single-threaded, this is
	 * just to make the assert_spin_locked check happy. */
4173
	spin_lock_irq(&dev_priv->irq_lock);
4174 4175 4176
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4177
	spin_unlock_irq(&dev_priv->irq_lock);
4178

4179
	i915_enable_asle_pipestat(dev_priv);
4180 4181
}

4182
static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
4183 4184 4185
{
	u32 hotplug_en;

4186
	lockdep_assert_held(&dev_priv->irq_lock);
4187

4188 4189
	/* Note HDMI and DP share hotplug bits */
	/* enable bits are the same for all generations */
4190
	hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
4191 4192 4193 4194
	/* Programming the CRT detection parameters tends
	   to generate a spurious hotplug event about three
	   seconds later.  So just do it once.
	*/
4195
	if (IS_G4X(dev_priv))
4196 4197 4198 4199
		hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
	hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;

	/* Ignore TV since it's buggy */
4200
	i915_hotplug_interrupt_update_locked(dev_priv,
4201 4202 4203 4204
					     HOTPLUG_INT_EN_MASK |
					     CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
					     CRT_HOTPLUG_ACTIVATION_PERIOD_64,
					     hotplug_en);
4205 4206
}

4207
static irqreturn_t i965_irq_handler(int irq, void *arg)
4208
{
4209
	struct drm_i915_private *dev_priv = arg;
4210
	irqreturn_t ret = IRQ_NONE;
4211

4212 4213 4214
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

4215
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
4216
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4217

4218
	do {
4219
		u32 pipe_stats[I915_MAX_PIPES] = {};
4220
		u32 eir = 0, eir_stuck = 0;
4221 4222
		u32 hotplug_status = 0;
		u32 iir;
4223

4224
		iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR);
4225
		if (iir == 0)
4226 4227 4228 4229
			break;

		ret = IRQ_HANDLED;

4230 4231 4232 4233 4234 4235
		if (iir & I915_DISPLAY_PORT_INTERRUPT)
			hotplug_status = i9xx_hpd_irq_ack(dev_priv);

		/* Call regardless, as some status bits might not be
		 * signalled in iir */
		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4236

4237 4238 4239
		if (iir & I915_MASTER_ERROR_INTERRUPT)
			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);

4240
		intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
4241 4242

		if (iir & I915_USER_INTERRUPT)
4243
			intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]);
4244

4245
		if (iir & I915_BSD_USER_INTERRUPT)
4246
			intel_engine_signal_breadcrumbs(dev_priv->gt.engine[VCS0]);
4247

4248 4249
		if (iir & I915_MASTER_ERROR_INTERRUPT)
			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4250

4251 4252 4253 4254 4255
		if (hotplug_status)
			i9xx_hpd_irq_handler(dev_priv, hotplug_status);

		i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
	} while (0);
4256

4257 4258
	pmu_irq_stats(dev_priv, IRQ_HANDLED);

4259
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4260

4261 4262 4263
	return ret;
}

4264 4265 4266 4267 4268 4269 4270
/**
 * intel_irq_init - initializes irq support
 * @dev_priv: i915 device instance
 *
 * This function initializes all the irq support including work items, timers
 * and all the vtables. It does not setup the interrupt itself though.
 */
4271
void intel_irq_init(struct drm_i915_private *dev_priv)
4272
{
4273
	struct drm_device *dev = &dev_priv->drm;
4274
	int i;
4275

4276
	INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work);
4277 4278
	for (i = 0; i < MAX_L3_SLICES; ++i)
		dev_priv->l3_parity.remap_info[i] = NULL;
4279

4280
	/* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */
4281
	if (HAS_GT_UC(dev_priv) && INTEL_GEN(dev_priv) < 11)
4282
		dev_priv->gt.pm_guc_events = GUC_INTR_GUC2HOST << 16;
4283

4284 4285 4286
	if (!HAS_DISPLAY(dev_priv))
		return;

4287 4288 4289 4290
	intel_hpd_init_pins(dev_priv);

	intel_hpd_init_work(dev_priv);

4291
	dev->vblank_disable_immediate = true;
4292

4293 4294 4295 4296 4297 4298 4299 4300 4301 4302
	/* Most platforms treat the display irq block as an always-on
	 * power domain. vlv/chv can disable it at runtime and need
	 * special care to avoid writing any of the display block registers
	 * outside of the power domain. We defer setting up the display irqs
	 * in this case to the runtime pm.
	 */
	dev_priv->display_irqs_enabled = true;
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
		dev_priv->display_irqs_enabled = false;

L
Lyude 已提交
4303
	dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4304 4305 4306 4307 4308 4309 4310
	/* If we have MST support, we want to avoid doing short HPD IRQ storm
	 * detection, as short HPD storms will occur as a natural part of
	 * sideband messaging with MST.
	 * On older platforms however, IRQ storms can occur with both long and
	 * short pulses, as seen on some G4x systems.
	 */
	dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
L
Lyude 已提交
4311

4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326
	if (HAS_GMCH(dev_priv)) {
		if (I915_HAS_HOTPLUG(dev_priv))
			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
	} else {
		if (HAS_PCH_DG1(dev_priv))
			dev_priv->display.hpd_irq_setup = dg1_hpd_irq_setup;
		else if (INTEL_GEN(dev_priv) >= 11)
			dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
		else if (IS_GEN9_LP(dev_priv))
			dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
		else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
			dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
		else
			dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
	}
4327
}
4328

4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342
/**
 * intel_irq_fini - deinitializes IRQ support
 * @i915: i915 device instance
 *
 * This function deinitializes all the IRQ support.
 */
void intel_irq_fini(struct drm_i915_private *i915)
{
	int i;

	for (i = 0; i < MAX_L3_SLICES; ++i)
		kfree(i915->l3_parity.remap_info[i]);
}

4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356
static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
{
	if (HAS_GMCH(dev_priv)) {
		if (IS_CHERRYVIEW(dev_priv))
			return cherryview_irq_handler;
		else if (IS_VALLEYVIEW(dev_priv))
			return valleyview_irq_handler;
		else if (IS_GEN(dev_priv, 4))
			return i965_irq_handler;
		else if (IS_GEN(dev_priv, 3))
			return i915_irq_handler;
		else
			return i8xx_irq_handler;
	} else {
4357 4358
		if (HAS_MASTER_UNIT_IRQ(dev_priv))
			return dg1_irq_handler;
4359 4360 4361 4362 4363
		if (INTEL_GEN(dev_priv) >= 11)
			return gen11_irq_handler;
		else if (INTEL_GEN(dev_priv) >= 8)
			return gen8_irq_handler;
		else
4364
			return ilk_irq_handler;
4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386
	}
}

static void intel_irq_reset(struct drm_i915_private *dev_priv)
{
	if (HAS_GMCH(dev_priv)) {
		if (IS_CHERRYVIEW(dev_priv))
			cherryview_irq_reset(dev_priv);
		else if (IS_VALLEYVIEW(dev_priv))
			valleyview_irq_reset(dev_priv);
		else if (IS_GEN(dev_priv, 4))
			i965_irq_reset(dev_priv);
		else if (IS_GEN(dev_priv, 3))
			i915_irq_reset(dev_priv);
		else
			i8xx_irq_reset(dev_priv);
	} else {
		if (INTEL_GEN(dev_priv) >= 11)
			gen11_irq_reset(dev_priv);
		else if (INTEL_GEN(dev_priv) >= 8)
			gen8_irq_reset(dev_priv);
		else
4387
			ilk_irq_reset(dev_priv);
4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409
	}
}

static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
{
	if (HAS_GMCH(dev_priv)) {
		if (IS_CHERRYVIEW(dev_priv))
			cherryview_irq_postinstall(dev_priv);
		else if (IS_VALLEYVIEW(dev_priv))
			valleyview_irq_postinstall(dev_priv);
		else if (IS_GEN(dev_priv, 4))
			i965_irq_postinstall(dev_priv);
		else if (IS_GEN(dev_priv, 3))
			i915_irq_postinstall(dev_priv);
		else
			i8xx_irq_postinstall(dev_priv);
	} else {
		if (INTEL_GEN(dev_priv) >= 11)
			gen11_irq_postinstall(dev_priv);
		else if (INTEL_GEN(dev_priv) >= 8)
			gen8_irq_postinstall(dev_priv);
		else
4410
			ilk_irq_postinstall(dev_priv);
4411 4412 4413
	}
}

4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424
/**
 * intel_irq_install - enables the hardware interrupt
 * @dev_priv: i915 device instance
 *
 * This function enables the hardware interrupt handling, but leaves the hotplug
 * handling still disabled. It is called after intel_irq_init().
 *
 * In the driver load and resume code we need working interrupts in a few places
 * but don't want to deal with the hassle of concurrent probe and hotplug
 * workers. Hence the split into this two-stage approach.
 */
4425 4426
int intel_irq_install(struct drm_i915_private *dev_priv)
{
4427
	int irq = to_pci_dev(dev_priv->drm.dev)->irq;
4428 4429
	int ret;

4430 4431 4432 4433 4434
	/*
	 * We enable some interrupt sources in our postinstall hooks, so mark
	 * interrupts as enabled _before_ actually enabling them to avoid
	 * special cases in our ordering checks.
	 */
4435
	dev_priv->runtime_pm.irqs_enabled = true;
4436

4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450
	dev_priv->drm.irq_enabled = true;

	intel_irq_reset(dev_priv);

	ret = request_irq(irq, intel_irq_handler(dev_priv),
			  IRQF_SHARED, DRIVER_NAME, dev_priv);
	if (ret < 0) {
		dev_priv->drm.irq_enabled = false;
		return ret;
	}

	intel_irq_postinstall(dev_priv);

	return ret;
4451 4452
}

4453 4454 4455 4456 4457 4458 4459
/**
 * intel_irq_uninstall - finilizes all irq handling
 * @dev_priv: i915 device instance
 *
 * This stops interrupt and hotplug handling and unregisters and frees all
 * resources acquired in the init functions.
 */
4460 4461
void intel_irq_uninstall(struct drm_i915_private *dev_priv)
{
4462
	int irq = to_pci_dev(dev_priv->drm.dev)->irq;
4463 4464

	/*
4465 4466 4467 4468
	 * FIXME we can get called twice during driver probe
	 * error handling as well as during driver remove due to
	 * intel_modeset_driver_remove() calling us out of sequence.
	 * Would be nice if it didn't do that...
4469 4470 4471 4472 4473 4474 4475 4476 4477 4478
	 */
	if (!dev_priv->drm.irq_enabled)
		return;

	dev_priv->drm.irq_enabled = false;

	intel_irq_reset(dev_priv);

	free_irq(irq, dev_priv);

4479
	intel_hpd_cancel_work(dev_priv);
4480
	dev_priv->runtime_pm.irqs_enabled = false;
4481 4482
}

4483 4484 4485 4486 4487 4488 4489
/**
 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
 * @dev_priv: i915 device instance
 *
 * This function is used to disable interrupts at runtime, both in the runtime
 * pm and the system suspend/resume code.
 */
4490
void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4491
{
4492
	intel_irq_reset(dev_priv);
4493
	dev_priv->runtime_pm.irqs_enabled = false;
4494
	intel_synchronize_irq(dev_priv);
4495 4496
}

4497 4498 4499 4500 4501 4502 4503
/**
 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
 * @dev_priv: i915 device instance
 *
 * This function is used to enable interrupts at runtime, both in the runtime
 * pm and the system suspend/resume code.
 */
4504
void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4505
{
4506
	dev_priv->runtime_pm.irqs_enabled = true;
4507 4508
	intel_irq_reset(dev_priv);
	intel_irq_postinstall(dev_priv);
4509
}
4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521

bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
{
	/*
	 * We only use drm_irq_uninstall() at unload and VT switch, so
	 * this is the only thing we need to check.
	 */
	return dev_priv->runtime_pm.irqs_enabled;
}

void intel_synchronize_irq(struct drm_i915_private *i915)
{
4522
	synchronize_irq(to_pci_dev(i915->drm.dev)->irq);
4523
}