i915_irq.c 115.8 KB
Newer Older
D
Dave Airlie 已提交
1
/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
L
Linus Torvalds 已提交
2
 */
D
Dave Airlie 已提交
3
/*
L
Linus Torvalds 已提交
4 5
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
 * All Rights Reserved.
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
D
Dave Airlie 已提交
27
 */
L
Linus Torvalds 已提交
28

29 30
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

31
#include <linux/circ_buf.h>
32 33 34
#include <linux/slab.h>
#include <linux/sysrq.h>

35
#include <drm/drm_drv.h>
36 37
#include <drm/drm_irq.h>

38
#include "display/intel_display_types.h"
39 40 41 42 43
#include "display/intel_fifo_underrun.h"
#include "display/intel_hotplug.h"
#include "display/intel_lpe_audio.h"
#include "display/intel_psr.h"

44
#include "gt/intel_breadcrumbs.h"
45
#include "gt/intel_gt.h"
46
#include "gt/intel_gt_irq.h"
47
#include "gt/intel_gt_pm_irq.h"
48
#include "gt/intel_rps.h"
49

L
Linus Torvalds 已提交
50
#include "i915_drv.h"
51
#include "i915_irq.h"
C
Chris Wilson 已提交
52
#include "i915_trace.h"
53
#include "intel_pm.h"
L
Linus Torvalds 已提交
54

55 56 57 58 59 60 61 62
/**
 * DOC: interrupt handling
 *
 * These functions provide the basic support for enabling and disabling the
 * interrupt handling support. There's a lot more functionality in i915_irq.c
 * and related files, but that will be described in separate chapters.
 */

63 64
typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val);

65 66 67 68
static const u32 hpd_ilk[HPD_NUM_PINS] = {
	[HPD_PORT_A] = DE_DP_A_HOTPLUG,
};

69 70 71 72
static const u32 hpd_ivb[HPD_NUM_PINS] = {
	[HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
};

73 74 75 76
static const u32 hpd_bdw[HPD_NUM_PINS] = {
	[HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
};

77
static const u32 hpd_ibx[HPD_NUM_PINS] = {
78 79 80 81
	[HPD_CRT] = SDE_CRT_HOTPLUG,
	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
82
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG,
83 84
};

85
static const u32 hpd_cpt[HPD_NUM_PINS] = {
86
	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
87
	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
88 89
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
90
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
91 92
};

X
Xiong Zhang 已提交
93
static const u32 hpd_spt[HPD_NUM_PINS] = {
94
	[HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
X
Xiong Zhang 已提交
95 96 97
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
98
	[HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT,
X
Xiong Zhang 已提交
99 100
};

101
static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
102 103 104 105 106
	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
107
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN,
108 109
};

110
static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
111 112 113 114 115
	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
116
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
117 118
};

119
static const u32 hpd_status_i915[HPD_NUM_PINS] = {
120 121 122 123 124
	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
125
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
126 127
};

128
static const u32 hpd_bxt[HPD_NUM_PINS] = {
129
	[HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
130
	[HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
131
	[HPD_PORT_C] = BXT_DE_PORT_HP_DDIC,
132 133
};

134
static const u32 hpd_gen11[HPD_NUM_PINS] = {
135 136 137 138 139 140
	[HPD_PORT_TC1] = GEN11_TC_HOTPLUG(PORT_TC1) | GEN11_TBT_HOTPLUG(PORT_TC1),
	[HPD_PORT_TC2] = GEN11_TC_HOTPLUG(PORT_TC2) | GEN11_TBT_HOTPLUG(PORT_TC2),
	[HPD_PORT_TC3] = GEN11_TC_HOTPLUG(PORT_TC3) | GEN11_TBT_HOTPLUG(PORT_TC3),
	[HPD_PORT_TC4] = GEN11_TC_HOTPLUG(PORT_TC4) | GEN11_TBT_HOTPLUG(PORT_TC4),
	[HPD_PORT_TC5] = GEN11_TC_HOTPLUG(PORT_TC5) | GEN11_TBT_HOTPLUG(PORT_TC5),
	[HPD_PORT_TC6] = GEN11_TC_HOTPLUG(PORT_TC6) | GEN11_TBT_HOTPLUG(PORT_TC6),
141 142
};

143
static const u32 hpd_icp[HPD_NUM_PINS] = {
144 145 146
	[HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(PORT_A),
	[HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(PORT_B),
	[HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(PORT_C),
147 148 149 150 151 152
	[HPD_PORT_TC1] = SDE_TC_HOTPLUG_ICP(PORT_TC1),
	[HPD_PORT_TC2] = SDE_TC_HOTPLUG_ICP(PORT_TC2),
	[HPD_PORT_TC3] = SDE_TC_HOTPLUG_ICP(PORT_TC3),
	[HPD_PORT_TC4] = SDE_TC_HOTPLUG_ICP(PORT_TC4),
	[HPD_PORT_TC5] = SDE_TC_HOTPLUG_ICP(PORT_TC5),
	[HPD_PORT_TC6] = SDE_TC_HOTPLUG_ICP(PORT_TC6),
153 154
};

155 156 157 158 159 160 161 162 163 164 165 166 167
static void intel_hpd_init_pins(struct drm_i915_private *dev_priv)
{
	struct i915_hotplug *hpd = &dev_priv->hotplug;

	if (HAS_GMCH(dev_priv)) {
		if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
		    IS_CHERRYVIEW(dev_priv))
			hpd->hpd = hpd_status_g4x;
		else
			hpd->hpd = hpd_status_i915;
		return;
	}

168
	if (INTEL_GEN(dev_priv) >= 11)
169 170 171 172 173 174 175 176 177 178 179 180 181
		hpd->hpd = hpd_gen11;
	else if (IS_GEN9_LP(dev_priv))
		hpd->hpd = hpd_bxt;
	else if (INTEL_GEN(dev_priv) >= 8)
		hpd->hpd = hpd_bdw;
	else if (INTEL_GEN(dev_priv) >= 7)
		hpd->hpd = hpd_ivb;
	else
		hpd->hpd = hpd_ilk;

	if (!HAS_PCH_SPLIT(dev_priv) || HAS_PCH_NOP(dev_priv))
		return;

182 183
	if (HAS_PCH_TGP(dev_priv) || HAS_PCH_JSP(dev_priv) ||
	    HAS_PCH_ICP(dev_priv) || HAS_PCH_MCC(dev_priv))
184 185 186 187 188 189 190 191 192 193 194
		hpd->pch_hpd = hpd_icp;
	else if (HAS_PCH_CNP(dev_priv) || HAS_PCH_SPT(dev_priv))
		hpd->pch_hpd = hpd_spt;
	else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_CPT(dev_priv))
		hpd->pch_hpd = hpd_cpt;
	else if (HAS_PCH_IBX(dev_priv))
		hpd->pch_hpd = hpd_ibx;
	else
		MISSING_CASE(INTEL_PCH_TYPE(dev_priv));
}

195 196 197 198 199 200 201 202
static void
intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
{
	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);

	drm_crtc_handle_vblank(&crtc->base);
}

203 204
void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
		    i915_reg_t iir, i915_reg_t ier)
205
{
206 207
	intel_uncore_write(uncore, imr, 0xffffffff);
	intel_uncore_posting_read(uncore, imr);
208

209
	intel_uncore_write(uncore, ier, 0);
210 211

	/* IIR can theoretically queue up two events. Be paranoid. */
212 213 214 215
	intel_uncore_write(uncore, iir, 0xffffffff);
	intel_uncore_posting_read(uncore, iir);
	intel_uncore_write(uncore, iir, 0xffffffff);
	intel_uncore_posting_read(uncore, iir);
216 217
}

218
void gen2_irq_reset(struct intel_uncore *uncore)
219
{
220 221
	intel_uncore_write16(uncore, GEN2_IMR, 0xffff);
	intel_uncore_posting_read16(uncore, GEN2_IMR);
222

223
	intel_uncore_write16(uncore, GEN2_IER, 0);
224 225

	/* IIR can theoretically queue up two events. Be paranoid. */
226 227 228 229
	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
	intel_uncore_posting_read16(uncore, GEN2_IIR);
	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
	intel_uncore_posting_read16(uncore, GEN2_IIR);
230 231
}

232 233 234
/*
 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
 */
235
static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
236
{
237
	u32 val = intel_uncore_read(uncore, reg);
238 239 240 241

	if (val == 0)
		return;

242 243 244
	drm_WARN(&uncore->i915->drm, 1,
		 "Interrupt register 0x%x is not zero: 0x%08x\n",
		 i915_mmio_reg_offset(reg), val);
245 246 247 248
	intel_uncore_write(uncore, reg, 0xffffffff);
	intel_uncore_posting_read(uncore, reg);
	intel_uncore_write(uncore, reg, 0xffffffff);
	intel_uncore_posting_read(uncore, reg);
249
}
250

251
static void gen2_assert_iir_is_zero(struct intel_uncore *uncore)
252
{
253
	u16 val = intel_uncore_read16(uncore, GEN2_IIR);
254 255 256 257

	if (val == 0)
		return;

258 259 260
	drm_WARN(&uncore->i915->drm, 1,
		 "Interrupt register 0x%x is not zero: 0x%08x\n",
		 i915_mmio_reg_offset(GEN2_IIR), val);
261 262 263 264
	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
	intel_uncore_posting_read16(uncore, GEN2_IIR);
	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
	intel_uncore_posting_read16(uncore, GEN2_IIR);
265 266
}

267 268 269 270
void gen3_irq_init(struct intel_uncore *uncore,
		   i915_reg_t imr, u32 imr_val,
		   i915_reg_t ier, u32 ier_val,
		   i915_reg_t iir)
271
{
272
	gen3_assert_iir_is_zero(uncore, iir);
273

274 275 276
	intel_uncore_write(uncore, ier, ier_val);
	intel_uncore_write(uncore, imr, imr_val);
	intel_uncore_posting_read(uncore, imr);
277 278
}

279 280
void gen2_irq_init(struct intel_uncore *uncore,
		   u32 imr_val, u32 ier_val)
281
{
282
	gen2_assert_iir_is_zero(uncore);
283

284 285 286
	intel_uncore_write16(uncore, GEN2_IER, ier_val);
	intel_uncore_write16(uncore, GEN2_IMR, imr_val);
	intel_uncore_posting_read16(uncore, GEN2_IMR);
287 288
}

289 290 291
/* For display hotplug interrupt */
static inline void
i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
292 293
				     u32 mask,
				     u32 bits)
294
{
295
	u32 val;
296

297
	lockdep_assert_held(&dev_priv->irq_lock);
298
	drm_WARN_ON(&dev_priv->drm, bits & ~mask);
299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318

	val = I915_READ(PORT_HOTPLUG_EN);
	val &= ~mask;
	val |= bits;
	I915_WRITE(PORT_HOTPLUG_EN, val);
}

/**
 * i915_hotplug_interrupt_update - update hotplug interrupt enable
 * @dev_priv: driver private
 * @mask: bits to update
 * @bits: bits to enable
 * NOTE: the HPD enable bits are modified both inside and outside
 * of an interrupt context. To avoid that read-modify-write cycles
 * interfer, these bits are protected by a spinlock. Since this
 * function is usually not called from a context where the lock is
 * held already, this function acquires the lock itself. A non-locking
 * version is also available.
 */
void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
319 320
				   u32 mask,
				   u32 bits)
321 322 323 324 325 326
{
	spin_lock_irq(&dev_priv->irq_lock);
	i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
	spin_unlock_irq(&dev_priv->irq_lock);
}

327 328 329 330 331 332
/**
 * ilk_update_display_irq - update DEIMR
 * @dev_priv: driver private
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
333
void ilk_update_display_irq(struct drm_i915_private *dev_priv,
334 335
			    u32 interrupt_mask,
			    u32 enabled_irq_mask)
336
{
337
	u32 new_val;
338

339
	lockdep_assert_held(&dev_priv->irq_lock);
340

341
	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
342

343
	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
344 345
		return;

346 347 348 349 350 351
	new_val = dev_priv->irq_mask;
	new_val &= ~interrupt_mask;
	new_val |= (~enabled_irq_mask & interrupt_mask);

	if (new_val != dev_priv->irq_mask) {
		dev_priv->irq_mask = new_val;
352
		I915_WRITE(DEIMR, dev_priv->irq_mask);
353
		POSTING_READ(DEIMR);
354 355 356
	}
}

357
/**
358 359 360 361 362
 * bdw_update_port_irq - update DE port interrupt
 * @dev_priv: driver private
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
363
static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
364 365
				u32 interrupt_mask,
				u32 enabled_irq_mask)
366
{
367 368
	u32 new_val;
	u32 old_val;
369

370
	lockdep_assert_held(&dev_priv->irq_lock);
371

372
	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
373

374
	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
375 376 377 378 379 380 381 382 383 384 385 386 387 388
		return;

	old_val = I915_READ(GEN8_DE_PORT_IMR);

	new_val = old_val;
	new_val &= ~interrupt_mask;
	new_val |= (~enabled_irq_mask & interrupt_mask);

	if (new_val != old_val) {
		I915_WRITE(GEN8_DE_PORT_IMR, new_val);
		POSTING_READ(GEN8_DE_PORT_IMR);
	}
}

389 390 391 392 393 394 395 396 397
/**
 * bdw_update_pipe_irq - update DE pipe interrupt
 * @dev_priv: driver private
 * @pipe: pipe whose interrupt to update
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
			 enum pipe pipe,
398 399
			 u32 interrupt_mask,
			 u32 enabled_irq_mask)
400
{
401
	u32 new_val;
402

403
	lockdep_assert_held(&dev_priv->irq_lock);
404

405
	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
406

407
	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
408 409 410 411 412 413 414 415 416 417 418 419 420
		return;

	new_val = dev_priv->de_irq_mask[pipe];
	new_val &= ~interrupt_mask;
	new_val |= (~enabled_irq_mask & interrupt_mask);

	if (new_val != dev_priv->de_irq_mask[pipe]) {
		dev_priv->de_irq_mask[pipe] = new_val;
		I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
		POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
	}
}

421 422 423 424 425 426
/**
 * ibx_display_interrupt_update - update SDEIMR
 * @dev_priv: driver private
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
427
void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
428 429
				  u32 interrupt_mask,
				  u32 enabled_irq_mask)
430
{
431
	u32 sdeimr = I915_READ(SDEIMR);
432 433 434
	sdeimr &= ~interrupt_mask;
	sdeimr |= (~enabled_irq_mask & interrupt_mask);

435
	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
436

437
	lockdep_assert_held(&dev_priv->irq_lock);
438

439
	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
440 441
		return;

442 443 444
	I915_WRITE(SDEIMR, sdeimr);
	POSTING_READ(SDEIMR);
}
445

446 447
u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
			      enum pipe pipe)
448
{
449 450
	u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
	u32 enable_mask = status_mask << 16;
451

452
	lockdep_assert_held(&dev_priv->irq_lock);
453

454 455
	if (INTEL_GEN(dev_priv) < 5)
		goto out;
456 457

	/*
458 459
	 * On pipe A we don't support the PSR interrupt yet,
	 * on pipe B and C the same bit MBZ.
460
	 */
461 462
	if (drm_WARN_ON_ONCE(&dev_priv->drm,
			     status_mask & PIPE_A_PSR_STATUS_VLV))
463
		return 0;
464 465 466 467
	/*
	 * On pipe B and C we don't support the PSR interrupt yet, on pipe
	 * A the same bit is for perf counters which we don't use either.
	 */
468 469
	if (drm_WARN_ON_ONCE(&dev_priv->drm,
			     status_mask & PIPE_B_PSR_STATUS_VLV))
470
		return 0;
471 472 473 474 475 476 477 478 479

	enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
			 SPRITE0_FLIP_DONE_INT_EN_VLV |
			 SPRITE1_FLIP_DONE_INT_EN_VLV);
	if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
		enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
	if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
		enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;

480
out:
481 482 483 484 485
	drm_WARN_ONCE(&dev_priv->drm,
		      enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
		      pipe_name(pipe), enable_mask, status_mask);
486

487 488 489
	return enable_mask;
}

490 491
void i915_enable_pipestat(struct drm_i915_private *dev_priv,
			  enum pipe pipe, u32 status_mask)
492
{
493
	i915_reg_t reg = PIPESTAT(pipe);
494 495
	u32 enable_mask;

496 497 498
	drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
		      "pipe %c: status_mask=0x%x\n",
		      pipe_name(pipe), status_mask);
499 500

	lockdep_assert_held(&dev_priv->irq_lock);
501
	drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
502 503 504 505 506 507 508 509 510

	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
		return;

	dev_priv->pipestat_irq_mask[pipe] |= status_mask;
	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);

	I915_WRITE(reg, enable_mask | status_mask);
	POSTING_READ(reg);
511 512
}

513 514
void i915_disable_pipestat(struct drm_i915_private *dev_priv,
			   enum pipe pipe, u32 status_mask)
515
{
516
	i915_reg_t reg = PIPESTAT(pipe);
517 518
	u32 enable_mask;

519 520 521
	drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
		      "pipe %c: status_mask=0x%x\n",
		      pipe_name(pipe), status_mask);
522 523

	lockdep_assert_held(&dev_priv->irq_lock);
524
	drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
525 526 527 528 529 530 531 532 533

	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
		return;

	dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);

	I915_WRITE(reg, enable_mask | status_mask);
	POSTING_READ(reg);
534 535
}

536 537 538 539 540 541 542 543
static bool i915_has_asle(struct drm_i915_private *dev_priv)
{
	if (!dev_priv->opregion.asle)
		return false;

	return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
}

544
/**
545
 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
546
 * @dev_priv: i915 device private
547
 */
548
static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
549
{
550
	if (!i915_has_asle(dev_priv))
551 552
		return;

553
	spin_lock_irq(&dev_priv->irq_lock);
554

555
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
556
	if (INTEL_GEN(dev_priv) >= 4)
557
		i915_enable_pipestat(dev_priv, PIPE_A,
558
				     PIPE_LEGACY_BLC_EVENT_STATUS);
559

560
	spin_unlock_irq(&dev_priv->irq_lock);
561 562
}

563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612
/*
 * This timing diagram depicts the video signal in and
 * around the vertical blanking period.
 *
 * Assumptions about the fictitious mode used in this example:
 *  vblank_start >= 3
 *  vsync_start = vblank_start + 1
 *  vsync_end = vblank_start + 2
 *  vtotal = vblank_start + 3
 *
 *           start of vblank:
 *           latch double buffered registers
 *           increment frame counter (ctg+)
 *           generate start of vblank interrupt (gen4+)
 *           |
 *           |          frame start:
 *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
 *           |          may be shifted forward 1-3 extra lines via PIPECONF
 *           |          |
 *           |          |  start of vsync:
 *           |          |  generate vsync interrupt
 *           |          |  |
 * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
 *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
 * ----va---> <-----------------vb--------------------> <--------va-------------
 *       |          |       <----vs----->                     |
 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
 *       |          |                                         |
 *       last visible pixel                                   first visible pixel
 *                  |                                         increment frame counter (gen3/4)
 *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
 *
 * x  = horizontal active
 * _  = horizontal blanking
 * hs = horizontal sync
 * va = vertical active
 * vb = vertical blanking
 * vs = vertical sync
 * vbs = vblank_start (number)
 *
 * Summary:
 * - most events happen at the start of horizontal sync
 * - frame start happens at the start of horizontal blank, 1-4 lines
 *   (depending on PIPECONF settings) after the start of vblank
 * - gen3/4 pixel and frame counter are synchronized with the start
 *   of horizontal active on the first line of vertical active
 */

613 614 615
/* Called from drm generic code, passed a 'crtc', which
 * we use as a pipe index
 */
616
u32 i915_get_vblank_counter(struct drm_crtc *crtc)
617
{
618 619
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
620
	const struct drm_display_mode *mode = &vblank->hwmode;
621
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
622
	i915_reg_t high_frame, low_frame;
623
	u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
624
	unsigned long irqflags;
625

626 627 628 629 630 631 632 633 634 635 636 637 638 639
	/*
	 * On i965gm TV output the frame counter only works up to
	 * the point when we enable the TV encoder. After that the
	 * frame counter ceases to work and reads zero. We need a
	 * vblank wait before enabling the TV encoder and so we
	 * have to enable vblank interrupts while the frame counter
	 * is still in a working state. However the core vblank code
	 * does not like us returning non-zero frame counter values
	 * when we've told it that we don't have a working frame
	 * counter. Thus we must stop non-zero values leaking out.
	 */
	if (!vblank->max_vblank_count)
		return 0;

640 641 642 643 644
	htotal = mode->crtc_htotal;
	hsync_start = mode->crtc_hsync_start;
	vbl_start = mode->crtc_vblank_start;
	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
		vbl_start = DIV_ROUND_UP(vbl_start, 2);
645

646 647 648 649 650 651
	/* Convert to pixel count */
	vbl_start *= htotal;

	/* Start of vblank event occurs at start of hsync */
	vbl_start -= htotal - hsync_start;

652 653
	high_frame = PIPEFRAME(pipe);
	low_frame = PIPEFRAMEPIXEL(pipe);
654

655 656
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);

657 658 659 660 661 662
	/*
	 * High & low register fields aren't synchronized, so make sure
	 * we get a low value that's stable across two reads of the high
	 * register.
	 */
	do {
663 664 665
		high1 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK;
		low   = intel_de_read_fw(dev_priv, low_frame);
		high2 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK;
666 667
	} while (high1 != high2);

668 669
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);

670
	high1 >>= PIPE_FRAME_HIGH_SHIFT;
671
	pixel = low & PIPE_PIXEL_MASK;
672
	low >>= PIPE_FRAME_LOW_SHIFT;
673 674 675 676 677 678

	/*
	 * The frame counter increments at beginning of active.
	 * Cook up a vblank counter by also checking the pixel
	 * counter against vblank start.
	 */
679
	return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
680 681
}

682
u32 g4x_get_vblank_counter(struct drm_crtc *crtc)
683
{
684 685
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
686

687
	return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
688 689
}

690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721
/*
 * On certain encoders on certain platforms, pipe
 * scanline register will not work to get the scanline,
 * since the timings are driven from the PORT or issues
 * with scanline register updates.
 * This function will use Framestamp and current
 * timestamp registers to calculate the scanline.
 */
static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
{
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
	struct drm_vblank_crtc *vblank =
		&crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
	const struct drm_display_mode *mode = &vblank->hwmode;
	u32 vblank_start = mode->crtc_vblank_start;
	u32 vtotal = mode->crtc_vtotal;
	u32 htotal = mode->crtc_htotal;
	u32 clock = mode->crtc_clock;
	u32 scanline, scan_prev_time, scan_curr_time, scan_post_time;

	/*
	 * To avoid the race condition where we might cross into the
	 * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR
	 * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR
	 * during the same frame.
	 */
	do {
		/*
		 * This field provides read back of the display
		 * pipe frame time stamp. The time stamp value
		 * is sampled at every start of vertical blank.
		 */
722 723
		scan_prev_time = intel_de_read_fw(dev_priv,
						  PIPE_FRMTMSTMP(crtc->pipe));
724 725 726 727 728

		/*
		 * The TIMESTAMP_CTR register has the current
		 * time stamp value.
		 */
729
		scan_curr_time = intel_de_read_fw(dev_priv, IVB_TIMESTAMP_CTR);
730

731 732
		scan_post_time = intel_de_read_fw(dev_priv,
						  PIPE_FRMTMSTMP(crtc->pipe));
733 734 735 736 737 738 739 740 741 742
	} while (scan_post_time != scan_prev_time);

	scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
					clock), 1000 * htotal);
	scanline = min(scanline, vtotal - 1);
	scanline = (scanline + vblank_start) % vtotal;

	return scanline;
}

743 744 745 746
/*
 * intel_de_read_fw(), only for fast reads of display block, no need for
 * forcewake etc.
 */
747 748 749
static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
{
	struct drm_device *dev = crtc->base.dev;
750
	struct drm_i915_private *dev_priv = to_i915(dev);
751 752
	const struct drm_display_mode *mode;
	struct drm_vblank_crtc *vblank;
753
	enum pipe pipe = crtc->pipe;
754
	int position, vtotal;
755

756 757 758
	if (!crtc->active)
		return -1;

759 760 761
	vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
	mode = &vblank->hwmode;

762
	if (crtc->mode_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP)
763 764
		return __intel_get_crtc_scanline_from_timestamp(crtc);

765
	vtotal = mode->crtc_vtotal;
766 767 768
	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
		vtotal /= 2;

769
	if (IS_GEN(dev_priv, 2))
770
		position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
771
	else
772
		position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
773

774 775 776 777 778 779 780 781 782 783 784 785
	/*
	 * On HSW, the DSL reg (0x70000) appears to return 0 if we
	 * read it just before the start of vblank.  So try it again
	 * so we don't accidentally end up spanning a vblank frame
	 * increment, causing the pipe_update_end() code to squak at us.
	 *
	 * The nature of this problem means we can't simply check the ISR
	 * bit and return the vblank start value; nor can we use the scanline
	 * debug register in the transcoder as it appears to have the same
	 * problem.  We may need to extend this to include other platforms,
	 * but so far testing only shows the problem on HSW.
	 */
786
	if (HAS_DDI(dev_priv) && !position) {
787 788 789 790
		int i, temp;

		for (i = 0; i < 100; i++) {
			udelay(1);
791
			temp = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
792 793 794 795 796 797 798
			if (temp != position) {
				position = temp;
				break;
			}
		}
	}

799
	/*
800 801
	 * See update_scanline_offset() for the details on the
	 * scanline_offset adjustment.
802
	 */
803
	return (position + crtc->scanline_offset) % vtotal;
804 805
}

806 807 808 809 810
static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
				     bool in_vblank_irq,
				     int *vpos, int *hpos,
				     ktime_t *stime, ktime_t *etime,
				     const struct drm_display_mode *mode)
811
{
812
	struct drm_device *dev = _crtc->dev;
813
	struct drm_i915_private *dev_priv = to_i915(dev);
814
	struct intel_crtc *crtc = to_intel_crtc(_crtc);
815
	enum pipe pipe = crtc->pipe;
816
	int position;
817
	int vbl_start, vbl_end, hsync_start, htotal, vtotal;
818
	unsigned long irqflags;
819 820
	bool use_scanline_counter = INTEL_GEN(dev_priv) >= 5 ||
		IS_G4X(dev_priv) || IS_GEN(dev_priv, 2) ||
821
		crtc->mode_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER;
822

823
	if (drm_WARN_ON(&dev_priv->drm, !mode->crtc_clock)) {
824 825 826
		drm_dbg(&dev_priv->drm,
			"trying to get scanoutpos for disabled "
			"pipe %c\n", pipe_name(pipe));
827
		return false;
828 829
	}

830
	htotal = mode->crtc_htotal;
831
	hsync_start = mode->crtc_hsync_start;
832 833 834
	vtotal = mode->crtc_vtotal;
	vbl_start = mode->crtc_vblank_start;
	vbl_end = mode->crtc_vblank_end;
835

836 837 838 839 840 841
	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
		vbl_start = DIV_ROUND_UP(vbl_start, 2);
		vbl_end /= 2;
		vtotal /= 2;
	}

842 843 844 845 846 847
	/*
	 * Lock uncore.lock, as we will do multiple timing critical raw
	 * register reads, potentially with preemption disabled, so the
	 * following code must not block on uncore.lock.
	 */
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
848

849 850 851 852 853 854
	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */

	/* Get optional system timestamp before query. */
	if (stime)
		*stime = ktime_get();

855
	if (use_scanline_counter) {
856 857 858
		/* No obvious pixelcount register. Only query vertical
		 * scanout position from Display scan line register.
		 */
859
		position = __intel_get_crtc_scanline(crtc);
860 861 862 863 864
	} else {
		/* Have access to pixelcount since start of frame.
		 * We can split this into vertical and horizontal
		 * scanout position.
		 */
865
		position = (intel_de_read_fw(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
866

867 868 869 870
		/* convert to pixel counts */
		vbl_start *= htotal;
		vbl_end *= htotal;
		vtotal *= htotal;
871

872 873 874 875 876 877 878 879 880 881 882 883
		/*
		 * In interlaced modes, the pixel counter counts all pixels,
		 * so one field will have htotal more pixels. In order to avoid
		 * the reported position from jumping backwards when the pixel
		 * counter is beyond the length of the shorter field, just
		 * clamp the position the length of the shorter field. This
		 * matches how the scanline counter based position works since
		 * the scanline counter doesn't count the two half lines.
		 */
		if (position >= vtotal)
			position = vtotal - 1;

884 885 886 887 888 889 890 891 892 893
		/*
		 * Start of vblank interrupt is triggered at start of hsync,
		 * just prior to the first active line of vblank. However we
		 * consider lines to start at the leading edge of horizontal
		 * active. So, should we get here before we've crossed into
		 * the horizontal active of the first line in vblank, we would
		 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
		 * always add htotal-hsync_start to the current pixel position.
		 */
		position = (position + htotal - hsync_start) % vtotal;
894 895
	}

896 897 898 899 900 901 902 903
	/* Get optional system timestamp after query. */
	if (etime)
		*etime = ktime_get();

	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */

	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);

904 905 906 907 908 909 910 911 912 913
	/*
	 * While in vblank, position will be negative
	 * counting up towards 0 at vbl_end. And outside
	 * vblank, position will be positive counting
	 * up since vbl_end.
	 */
	if (position >= vbl_start)
		position -= vbl_end;
	else
		position += vtotal - vbl_end;
914

915
	if (use_scanline_counter) {
916 917 918 919 920 921
		*vpos = position;
		*hpos = 0;
	} else {
		*vpos = position / htotal;
		*hpos = position - (*vpos * htotal);
	}
922

923
	return true;
924 925
}

926 927 928 929 930
bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error,
				     ktime_t *vblank_time, bool in_vblank_irq)
{
	return drm_crtc_vblank_helper_get_vblank_timestamp_internal(
		crtc, max_error, vblank_time, in_vblank_irq,
931
		i915_get_crtc_scanoutpos);
932 933
}

934 935
int intel_get_crtc_scanline(struct intel_crtc *crtc)
{
936
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
937 938 939 940 941 942 943 944 945 946
	unsigned long irqflags;
	int position;

	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
	position = __intel_get_crtc_scanline(crtc);
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);

	return position;
}

947
/**
948
 * ivb_parity_work - Workqueue called when a parity error interrupt
949 950 951 952 953 954 955
 * occurred.
 * @work: workqueue struct
 *
 * Doesn't actually do anything except notify userspace. As a consequence of
 * this event, userspace should try to remap the bad rows since statistically
 * it is likely the same row is more likely to go bad again.
 */
956
static void ivb_parity_work(struct work_struct *work)
957
{
958
	struct drm_i915_private *dev_priv =
959
		container_of(work, typeof(*dev_priv), l3_parity.error_work);
960
	struct intel_gt *gt = &dev_priv->gt;
961
	u32 error_status, row, bank, subbank;
962
	char *parity_event[6];
963 964
	u32 misccpctl;
	u8 slice = 0;
965 966 967 968 969

	/* We must turn off DOP level clock gating to access the L3 registers.
	 * In order to prevent a get/put style interface, acquire struct mutex
	 * any time we access those registers.
	 */
970
	mutex_lock(&dev_priv->drm.struct_mutex);
971

972
	/* If we've screwed up tracking, just let the interrupt fire again */
973
	if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice))
974 975
		goto out;

976 977 978 979
	misccpctl = I915_READ(GEN7_MISCCPCTL);
	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
	POSTING_READ(GEN7_MISCCPCTL);

980
	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
981
		i915_reg_t reg;
982

983
		slice--;
984 985
		if (drm_WARN_ON_ONCE(&dev_priv->drm,
				     slice >= NUM_L3_SLICES(dev_priv)))
986
			break;
987

988
		dev_priv->l3_parity.which_slice &= ~(1<<slice);
989

990
		reg = GEN7_L3CDERRST1(slice);
991

992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006
		error_status = I915_READ(reg);
		row = GEN7_PARITY_ERROR_ROW(error_status);
		bank = GEN7_PARITY_ERROR_BANK(error_status);
		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);

		I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
		POSTING_READ(reg);

		parity_event[0] = I915_L3_PARITY_UEVENT "=1";
		parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
		parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
		parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
		parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
		parity_event[5] = NULL;

1007
		kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
1008
				   KOBJ_CHANGE, parity_event);
1009

1010 1011
		DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
			  slice, row, bank, subbank);
1012

1013 1014 1015 1016 1017
		kfree(parity_event[4]);
		kfree(parity_event[3]);
		kfree(parity_event[2]);
		kfree(parity_event[1]);
	}
1018

1019
	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1020

1021
out:
1022
	drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice);
1023 1024 1025
	spin_lock_irq(&gt->irq_lock);
	gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv));
	spin_unlock_irq(&gt->irq_lock);
1026

1027
	mutex_unlock(&dev_priv->drm.struct_mutex);
1028 1029
}

1030
static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1031
{
1032
	switch (pin) {
1033
	case HPD_PORT_TC1:
1034
		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1);
1035
	case HPD_PORT_TC2:
1036
		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2);
1037
	case HPD_PORT_TC3:
1038
		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3);
1039
	case HPD_PORT_TC4:
1040
		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4);
1041
	case HPD_PORT_TC5:
1042
		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC5);
1043
	case HPD_PORT_TC6:
1044 1045 1046 1047 1048 1049
		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC6);
	default:
		return false;
	}
}

1050
static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1051
{
1052 1053
	switch (pin) {
	case HPD_PORT_A:
1054
		return val & PORTA_HOTPLUG_LONG_DETECT;
1055
	case HPD_PORT_B:
1056
		return val & PORTB_HOTPLUG_LONG_DETECT;
1057
	case HPD_PORT_C:
1058 1059 1060 1061 1062 1063
		return val & PORTC_HOTPLUG_LONG_DETECT;
	default:
		return false;
	}
}

1064
static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1065
{
1066 1067
	switch (pin) {
	case HPD_PORT_A:
1068
		return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_A);
1069
	case HPD_PORT_B:
1070
		return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_B);
1071
	case HPD_PORT_C:
1072
		return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_C);
1073 1074 1075 1076 1077
	default:
		return false;
	}
}

1078
static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1079
{
1080
	switch (pin) {
1081
	case HPD_PORT_TC1:
1082
		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1);
1083
	case HPD_PORT_TC2:
1084
		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2);
1085
	case HPD_PORT_TC3:
1086
		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3);
1087
	case HPD_PORT_TC4:
1088
		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4);
1089
	case HPD_PORT_TC5:
1090
		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC5);
1091
	case HPD_PORT_TC6:
1092 1093 1094 1095 1096 1097
		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC6);
	default:
		return false;
	}
}

1098
static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val)
1099
{
1100 1101
	switch (pin) {
	case HPD_PORT_E:
1102 1103 1104 1105 1106 1107
		return val & PORTE_HOTPLUG_LONG_DETECT;
	default:
		return false;
	}
}

1108
static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1109
{
1110 1111
	switch (pin) {
	case HPD_PORT_A:
1112
		return val & PORTA_HOTPLUG_LONG_DETECT;
1113
	case HPD_PORT_B:
1114
		return val & PORTB_HOTPLUG_LONG_DETECT;
1115
	case HPD_PORT_C:
1116
		return val & PORTC_HOTPLUG_LONG_DETECT;
1117
	case HPD_PORT_D:
1118 1119 1120 1121 1122 1123
		return val & PORTD_HOTPLUG_LONG_DETECT;
	default:
		return false;
	}
}

1124
static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1125
{
1126 1127
	switch (pin) {
	case HPD_PORT_A:
1128 1129 1130 1131 1132 1133
		return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
	default:
		return false;
	}
}

1134
static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1135
{
1136 1137
	switch (pin) {
	case HPD_PORT_B:
1138
		return val & PORTB_HOTPLUG_LONG_DETECT;
1139
	case HPD_PORT_C:
1140
		return val & PORTC_HOTPLUG_LONG_DETECT;
1141
	case HPD_PORT_D:
1142 1143 1144
		return val & PORTD_HOTPLUG_LONG_DETECT;
	default:
		return false;
1145 1146 1147
	}
}

1148
static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1149
{
1150 1151
	switch (pin) {
	case HPD_PORT_B:
1152
		return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1153
	case HPD_PORT_C:
1154
		return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1155
	case HPD_PORT_D:
1156 1157 1158
		return val & PORTD_HOTPLUG_INT_LONG_PULSE;
	default:
		return false;
1159 1160 1161
	}
}

1162 1163 1164 1165 1166 1167 1168
/*
 * Get a bit mask of pins that have triggered, and which ones may be long.
 * This can be called multiple times with the same masks to accumulate
 * hotplug detection results from several registers.
 *
 * Note that the caller is expected to zero out the masks initially.
 */
1169 1170 1171 1172
static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
			       u32 *pin_mask, u32 *long_mask,
			       u32 hotplug_trigger, u32 dig_hotplug_reg,
			       const u32 hpd[HPD_NUM_PINS],
1173
			       bool long_pulse_detect(enum hpd_pin pin, u32 val))
1174
{
1175
	enum hpd_pin pin;
1176

1177 1178
	BUILD_BUG_ON(BITS_PER_TYPE(*pin_mask) < HPD_NUM_PINS);

1179 1180
	for_each_hpd_pin(pin) {
		if ((hpd[pin] & hotplug_trigger) == 0)
1181
			continue;
1182

1183
		*pin_mask |= BIT(pin);
1184

1185
		if (long_pulse_detect(pin, dig_hotplug_reg))
1186
			*long_mask |= BIT(pin);
1187 1188
	}

1189 1190 1191
	drm_dbg(&dev_priv->drm,
		"hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
		hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
1192 1193 1194

}

1195
static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
1196
{
1197
	wake_up_all(&dev_priv->gmbus_wait_queue);
1198 1199
}

1200
static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
1201
{
1202
	wake_up_all(&dev_priv->gmbus_wait_queue);
1203 1204
}

1205
#if defined(CONFIG_DEBUG_FS)
1206 1207
static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
					 enum pipe pipe,
1208 1209 1210
					 u32 crc0, u32 crc1,
					 u32 crc2, u32 crc3,
					 u32 crc4)
1211
{
T
Tomeu Vizoso 已提交
1212
	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
1213
	struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
1214 1215 1216
	u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };

	trace_intel_pipe_crc(crtc, crcs);
1217

1218
	spin_lock(&pipe_crc->lock);
1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229
	/*
	 * For some not yet identified reason, the first CRC is
	 * bonkers. So let's just wait for the next vblank and read
	 * out the buggy result.
	 *
	 * On GEN8+ sometimes the second CRC is bonkers as well, so
	 * don't trust that one either.
	 */
	if (pipe_crc->skipped <= 0 ||
	    (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
		pipe_crc->skipped++;
T
Tomeu Vizoso 已提交
1230
		spin_unlock(&pipe_crc->lock);
1231
		return;
T
Tomeu Vizoso 已提交
1232
	}
1233 1234 1235 1236 1237
	spin_unlock(&pipe_crc->lock);

	drm_crtc_add_crc_entry(&crtc->base, true,
				drm_crtc_accurate_vblank_count(&crtc->base),
				crcs);
1238
}
1239 1240
#else
static inline void
1241 1242
display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
			     enum pipe pipe,
1243 1244 1245
			     u32 crc0, u32 crc1,
			     u32 crc2, u32 crc3,
			     u32 crc4) {}
1246 1247
#endif

1248

1249 1250
static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
				     enum pipe pipe)
D
Daniel Vetter 已提交
1251
{
1252
	display_pipe_crc_irq_handler(dev_priv, pipe,
1253 1254
				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
				     0, 0, 0, 0);
D
Daniel Vetter 已提交
1255 1256
}

1257 1258
static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
				     enum pipe pipe)
1259
{
1260
	display_pipe_crc_irq_handler(dev_priv, pipe,
1261 1262 1263 1264 1265
				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
				     I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
				     I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
				     I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
				     I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1266
}
1267

1268 1269
static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
				      enum pipe pipe)
1270
{
1271
	u32 res1, res2;
1272

1273
	if (INTEL_GEN(dev_priv) >= 3)
1274 1275 1276 1277
		res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
	else
		res1 = 0;

1278
	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1279 1280 1281
		res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
	else
		res2 = 0;
1282

1283
	display_pipe_crc_irq_handler(dev_priv, pipe,
1284 1285 1286 1287
				     I915_READ(PIPE_CRC_RES_RED(pipe)),
				     I915_READ(PIPE_CRC_RES_GREEN(pipe)),
				     I915_READ(PIPE_CRC_RES_BLUE(pipe)),
				     res1, res2);
1288
}
1289

1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302
static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
{
	enum pipe pipe;

	for_each_pipe(dev_priv, pipe) {
		I915_WRITE(PIPESTAT(pipe),
			   PIPESTAT_INT_STATUS_MASK |
			   PIPE_FIFO_UNDERRUN_STATUS);

		dev_priv->pipestat_irq_mask[pipe] = 0;
	}
}

1303 1304
static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
				  u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1305
{
1306
	enum pipe pipe;
1307

1308
	spin_lock(&dev_priv->irq_lock);
1309 1310 1311 1312 1313 1314

	if (!dev_priv->display_irqs_enabled) {
		spin_unlock(&dev_priv->irq_lock);
		return;
	}

1315
	for_each_pipe(dev_priv, pipe) {
1316
		i915_reg_t reg;
1317
		u32 status_mask, enable_mask, iir_bit = 0;
1318

1319 1320 1321 1322 1323 1324 1325
		/*
		 * PIPESTAT bits get signalled even when the interrupt is
		 * disabled with the mask bits, and some of the status bits do
		 * not generate interrupts at all (like the underrun bit). Hence
		 * we need to be careful that we only handle what we want to
		 * handle.
		 */
1326 1327

		/* fifo underruns are filterered in the underrun handler. */
1328
		status_mask = PIPE_FIFO_UNDERRUN_STATUS;
1329 1330

		switch (pipe) {
1331
		default:
1332 1333 1334 1335 1336 1337
		case PIPE_A:
			iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
			break;
		case PIPE_B:
			iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
			break;
1338 1339 1340
		case PIPE_C:
			iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
			break;
1341 1342
		}
		if (iir & iir_bit)
1343
			status_mask |= dev_priv->pipestat_irq_mask[pipe];
1344

1345
		if (!status_mask)
1346 1347 1348
			continue;

		reg = PIPESTAT(pipe);
1349 1350
		pipe_stats[pipe] = I915_READ(reg) & status_mask;
		enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
1351 1352 1353

		/*
		 * Clear the PIPE*STAT regs before the IIR
1354 1355 1356 1357 1358 1359
		 *
		 * Toggle the enable bits to make sure we get an
		 * edge in the ISR pipe event bit if we don't clear
		 * all the enabled status bits. Otherwise the edge
		 * triggered IIR on i965/g4x wouldn't notice that
		 * an interrupt is still pending.
1360
		 */
1361 1362 1363 1364
		if (pipe_stats[pipe]) {
			I915_WRITE(reg, pipe_stats[pipe]);
			I915_WRITE(reg, enable_mask);
		}
1365
	}
1366
	spin_unlock(&dev_priv->irq_lock);
1367 1368
}

1369 1370 1371 1372 1373 1374 1375
static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
				      u16 iir, u32 pipe_stats[I915_MAX_PIPES])
{
	enum pipe pipe;

	for_each_pipe(dev_priv, pipe) {
		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1376
			intel_handle_vblank(dev_priv, pipe);
1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393

		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
			i9xx_pipe_crc_irq_handler(dev_priv, pipe);

		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
	}
}

static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
{
	bool blc_event = false;
	enum pipe pipe;

	for_each_pipe(dev_priv, pipe) {
		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1394
			intel_handle_vblank(dev_priv, pipe);
1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417

		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
			blc_event = true;

		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
			i9xx_pipe_crc_irq_handler(dev_priv, pipe);

		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
	}

	if (blc_event || (iir & I915_ASLE_INTERRUPT))
		intel_opregion_asle_intr(dev_priv);
}

static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
{
	bool blc_event = false;
	enum pipe pipe;

	for_each_pipe(dev_priv, pipe) {
		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1418
			intel_handle_vblank(dev_priv, pipe);
1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436

		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
			blc_event = true;

		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
			i9xx_pipe_crc_irq_handler(dev_priv, pipe);

		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
	}

	if (blc_event || (iir & I915_ASLE_INTERRUPT))
		intel_opregion_asle_intr(dev_priv);

	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
		gmbus_irq_handler(dev_priv);
}

1437
static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1438 1439 1440
					    u32 pipe_stats[I915_MAX_PIPES])
{
	enum pipe pipe;
1441

1442
	for_each_pipe(dev_priv, pipe) {
1443
		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1444
			intel_handle_vblank(dev_priv, pipe);
1445 1446

		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1447
			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1448

1449 1450
		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1451 1452 1453
	}

	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1454
		gmbus_irq_handler(dev_priv);
1455 1456
}

1457
static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
1458
{
1459 1460 1461 1462 1463 1464 1465 1466 1467
	u32 hotplug_status = 0, hotplug_status_mask;
	int i;

	if (IS_G4X(dev_priv) ||
	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
		hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
			DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
	else
		hotplug_status_mask = HOTPLUG_INT_STATUS_I915;
1468

1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484
	/*
	 * We absolutely have to clear all the pending interrupt
	 * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port
	 * interrupt bit won't have an edge, and the i965/g4x
	 * edge triggered IIR will not notice that an interrupt
	 * is still pending. We can't use PORT_HOTPLUG_EN to
	 * guarantee the edge as the act of toggling the enable
	 * bits can itself generate a new hotplug interrupt :(
	 */
	for (i = 0; i < 10; i++) {
		u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask;

		if (tmp == 0)
			return hotplug_status;

		hotplug_status |= tmp;
1485
		I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1486 1487
	}

1488 1489 1490
	drm_WARN_ONCE(&dev_priv->drm, 1,
		      "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
		      I915_READ(PORT_HOTPLUG_STAT));
1491

1492 1493 1494
	return hotplug_status;
}

1495
static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1496 1497 1498
				 u32 hotplug_status)
{
	u32 pin_mask = 0, long_mask = 0;
1499
	u32 hotplug_trigger;
1500

1501 1502 1503 1504 1505
	if (IS_G4X(dev_priv) ||
	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
		hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
	else
		hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1506

1507 1508 1509 1510 1511
	if (hotplug_trigger) {
		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
				   hotplug_trigger, hotplug_trigger,
				   dev_priv->hotplug.hpd,
				   i9xx_port_hotplug_long_detect);
1512

1513
		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1514
	}
1515 1516 1517 1518 1519

	if ((IS_G4X(dev_priv) ||
	     IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
	    hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
		dp_aux_irq_handler(dev_priv);
1520 1521
}

1522
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
J
Jesse Barnes 已提交
1523
{
1524
	struct drm_i915_private *dev_priv = arg;
J
Jesse Barnes 已提交
1525 1526
	irqreturn_t ret = IRQ_NONE;

1527 1528 1529
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

1530
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
1531
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1532

1533
	do {
1534
		u32 iir, gt_iir, pm_iir;
1535
		u32 pipe_stats[I915_MAX_PIPES] = {};
1536
		u32 hotplug_status = 0;
1537
		u32 ier = 0;
1538

J
Jesse Barnes 已提交
1539 1540
		gt_iir = I915_READ(GTIIR);
		pm_iir = I915_READ(GEN6_PMIIR);
1541
		iir = I915_READ(VLV_IIR);
J
Jesse Barnes 已提交
1542 1543

		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1544
			break;
J
Jesse Barnes 已提交
1545 1546 1547

		ret = IRQ_HANDLED;

1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560
		/*
		 * Theory on interrupt generation, based on empirical evidence:
		 *
		 * x = ((VLV_IIR & VLV_IER) ||
		 *      (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
		 *       (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
		 *
		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
		 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
		 * guarantee the CPU interrupt will be raised again even if we
		 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
		 * bits this time around.
		 */
1561
		I915_WRITE(VLV_MASTER_IER, 0);
1562 1563
		ier = I915_READ(VLV_IER);
		I915_WRITE(VLV_IER, 0);
1564 1565 1566 1567 1568 1569

		if (gt_iir)
			I915_WRITE(GTIIR, gt_iir);
		if (pm_iir)
			I915_WRITE(GEN6_PMIIR, pm_iir);

1570
		if (iir & I915_DISPLAY_PORT_INTERRUPT)
1571
			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1572

1573 1574
		/* Call regardless, as some status bits might not be
		 * signalled in iir */
1575
		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1576

1577 1578 1579 1580
		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
			   I915_LPE_PIPE_B_INTERRUPT))
			intel_lpe_audio_irq_handler(dev_priv);

1581 1582 1583 1584 1585 1586
		/*
		 * VLV_IIR is single buffered, and reflects the level
		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
		 */
		if (iir)
			I915_WRITE(VLV_IIR, iir);
1587

1588
		I915_WRITE(VLV_IER, ier);
1589
		I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
1590

1591
		if (gt_iir)
1592
			gen6_gt_irq_handler(&dev_priv->gt, gt_iir);
1593
		if (pm_iir)
1594
			gen6_rps_irq_handler(&dev_priv->gt.rps, pm_iir);
1595

1596
		if (hotplug_status)
1597
			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1598

1599
		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1600
	} while (0);
J
Jesse Barnes 已提交
1601

1602
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1603

J
Jesse Barnes 已提交
1604 1605 1606
	return ret;
}

1607 1608
static irqreturn_t cherryview_irq_handler(int irq, void *arg)
{
1609
	struct drm_i915_private *dev_priv = arg;
1610 1611
	irqreturn_t ret = IRQ_NONE;

1612 1613 1614
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

1615
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
1616
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1617

1618
	do {
1619
		u32 master_ctl, iir;
1620
		u32 pipe_stats[I915_MAX_PIPES] = {};
1621
		u32 hotplug_status = 0;
1622 1623
		u32 ier = 0;

1624 1625
		master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
		iir = I915_READ(VLV_IIR);
1626

1627 1628
		if (master_ctl == 0 && iir == 0)
			break;
1629

1630 1631
		ret = IRQ_HANDLED;

1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644
		/*
		 * Theory on interrupt generation, based on empirical evidence:
		 *
		 * x = ((VLV_IIR & VLV_IER) ||
		 *      ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
		 *       (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
		 *
		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
		 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
		 * guarantee the CPU interrupt will be raised again even if we
		 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
		 * bits this time around.
		 */
1645
		I915_WRITE(GEN8_MASTER_IRQ, 0);
1646 1647
		ier = I915_READ(VLV_IER);
		I915_WRITE(VLV_IER, 0);
1648

1649
		gen8_gt_irq_handler(&dev_priv->gt, master_ctl);
1650

1651
		if (iir & I915_DISPLAY_PORT_INTERRUPT)
1652
			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1653

1654 1655
		/* Call regardless, as some status bits might not be
		 * signalled in iir */
1656
		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1657

1658 1659 1660 1661 1662
		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
			   I915_LPE_PIPE_B_INTERRUPT |
			   I915_LPE_PIPE_C_INTERRUPT))
			intel_lpe_audio_irq_handler(dev_priv);

1663 1664 1665 1666 1667 1668 1669
		/*
		 * VLV_IIR is single buffered, and reflects the level
		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
		 */
		if (iir)
			I915_WRITE(VLV_IIR, iir);

1670
		I915_WRITE(VLV_IER, ier);
1671
		I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
1672 1673

		if (hotplug_status)
1674
			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1675

1676
		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1677
	} while (0);
1678

1679
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1680

1681 1682 1683
	return ret;
}

1684
static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1685
				u32 hotplug_trigger)
1686 1687 1688
{
	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;

1689 1690 1691 1692 1693 1694
	/*
	 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
	 * unless we touch the hotplug register, even if hotplug_trigger is
	 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
	 * errors.
	 */
1695
	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1696 1697 1698 1699 1700 1701 1702 1703
	if (!hotplug_trigger) {
		u32 mask = PORTA_HOTPLUG_STATUS_MASK |
			PORTD_HOTPLUG_STATUS_MASK |
			PORTC_HOTPLUG_STATUS_MASK |
			PORTB_HOTPLUG_STATUS_MASK;
		dig_hotplug_reg &= ~mask;
	}

1704
	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1705 1706
	if (!hotplug_trigger)
		return;
1707

1708 1709 1710
	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
			   hotplug_trigger, dig_hotplug_reg,
			   dev_priv->hotplug.pch_hpd,
1711 1712
			   pch_port_hotplug_long_detect);

1713
	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1714 1715
}

1716
static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1717
{
1718
	enum pipe pipe;
1719
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1720

1721
	ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
1722

1723 1724 1725
	if (pch_iir & SDE_AUDIO_POWER_MASK) {
		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
			       SDE_AUDIO_POWER_SHIFT);
1726 1727
		drm_dbg(&dev_priv->drm, "PCH audio power change on port %d\n",
			port_name(port));
1728
	}
1729

1730
	if (pch_iir & SDE_AUX_MASK)
1731
		dp_aux_irq_handler(dev_priv);
1732

1733
	if (pch_iir & SDE_GMBUS)
1734
		gmbus_irq_handler(dev_priv);
1735 1736

	if (pch_iir & SDE_AUDIO_HDCP_MASK)
1737
		drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n");
1738 1739

	if (pch_iir & SDE_AUDIO_TRANS_MASK)
1740
		drm_dbg(&dev_priv->drm, "PCH transcoder audio interrupt\n");
1741 1742

	if (pch_iir & SDE_POISON)
1743
		drm_err(&dev_priv->drm, "PCH poison interrupt\n");
1744

1745
	if (pch_iir & SDE_FDI_MASK) {
1746
		for_each_pipe(dev_priv, pipe)
1747 1748 1749
			drm_dbg(&dev_priv->drm, "  pipe %c FDI IIR: 0x%08x\n",
				pipe_name(pipe),
				I915_READ(FDI_RX_IIR(pipe)));
1750
	}
1751 1752

	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1753
		drm_dbg(&dev_priv->drm, "PCH transcoder CRC done interrupt\n");
1754 1755

	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1756 1757
		drm_dbg(&dev_priv->drm,
			"PCH transcoder CRC error interrupt\n");
1758 1759

	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1760
		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
1761 1762

	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1763
		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
1764 1765
}

1766
static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
1767 1768
{
	u32 err_int = I915_READ(GEN7_ERR_INT);
D
Daniel Vetter 已提交
1769
	enum pipe pipe;
1770

1771
	if (err_int & ERR_INT_POISON)
1772
		drm_err(&dev_priv->drm, "Poison interrupt\n");
1773

1774
	for_each_pipe(dev_priv, pipe) {
1775 1776
		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1777

D
Daniel Vetter 已提交
1778
		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1779 1780
			if (IS_IVYBRIDGE(dev_priv))
				ivb_pipe_crc_irq_handler(dev_priv, pipe);
D
Daniel Vetter 已提交
1781
			else
1782
				hsw_pipe_crc_irq_handler(dev_priv, pipe);
D
Daniel Vetter 已提交
1783 1784
		}
	}
1785

1786 1787 1788
	I915_WRITE(GEN7_ERR_INT, err_int);
}

1789
static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
1790 1791
{
	u32 serr_int = I915_READ(SERR_INT);
1792
	enum pipe pipe;
1793

1794
	if (serr_int & SERR_INT_POISON)
1795
		drm_err(&dev_priv->drm, "PCH poison interrupt\n");
1796

1797 1798 1799
	for_each_pipe(dev_priv, pipe)
		if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
			intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
1800 1801

	I915_WRITE(SERR_INT, serr_int);
1802 1803
}

1804
static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1805
{
1806
	enum pipe pipe;
1807
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1808

1809
	ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
1810

1811 1812 1813
	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
			       SDE_AUDIO_POWER_SHIFT_CPT);
1814 1815
		drm_dbg(&dev_priv->drm, "PCH audio power change on port %c\n",
			port_name(port));
1816
	}
1817 1818

	if (pch_iir & SDE_AUX_MASK_CPT)
1819
		dp_aux_irq_handler(dev_priv);
1820 1821

	if (pch_iir & SDE_GMBUS_CPT)
1822
		gmbus_irq_handler(dev_priv);
1823 1824

	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1825
		drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n");
1826 1827

	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1828
		drm_dbg(&dev_priv->drm, "Audio CP change interrupt\n");
1829

1830
	if (pch_iir & SDE_FDI_MASK_CPT) {
1831
		for_each_pipe(dev_priv, pipe)
1832 1833 1834
			drm_dbg(&dev_priv->drm, "  pipe %c FDI IIR: 0x%08x\n",
				pipe_name(pipe),
				I915_READ(FDI_RX_IIR(pipe)));
1835
	}
1836 1837

	if (pch_iir & SDE_ERROR_CPT)
1838
		cpt_serr_int_handler(dev_priv);
1839 1840
}

1841
static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1842
{
1843
	u32 ddi_hotplug_trigger, tc_hotplug_trigger;
1844 1845
	u32 pin_mask = 0, long_mask = 0;

1846 1847 1848
	if (HAS_PCH_TGP(dev_priv)) {
		ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP;
		tc_hotplug_trigger = pch_iir & SDE_TC_MASK_TGP;
M
Matt Roper 已提交
1849 1850 1851
	} else if (HAS_PCH_JSP(dev_priv)) {
		ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP;
		tc_hotplug_trigger = 0;
1852
	} else if (HAS_PCH_MCC(dev_priv)) {
1853 1854
		ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
		tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_ICP(PORT_TC1);
1855
	} else {
1856 1857 1858
		drm_WARN(&dev_priv->drm, !HAS_PCH_ICP(dev_priv),
			 "Unrecognized PCH type 0x%x\n",
			 INTEL_PCH_TYPE(dev_priv));
M
Matt Roper 已提交
1859

1860 1861 1862 1863
		ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
		tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP;
	}

1864 1865 1866 1867 1868 1869 1870
	if (ddi_hotplug_trigger) {
		u32 dig_hotplug_reg;

		dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI);
		I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg);

		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1871 1872
				   ddi_hotplug_trigger, dig_hotplug_reg,
				   dev_priv->hotplug.pch_hpd,
1873 1874 1875 1876 1877 1878 1879 1880 1881 1882
				   icp_ddi_port_hotplug_long_detect);
	}

	if (tc_hotplug_trigger) {
		u32 dig_hotplug_reg;

		dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC);
		I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg);

		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1883 1884
				   tc_hotplug_trigger, dig_hotplug_reg,
				   dev_priv->hotplug.pch_hpd,
1885
				   icp_tc_port_hotplug_long_detect);
1886 1887 1888 1889 1890 1891 1892 1893 1894
	}

	if (pin_mask)
		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);

	if (pch_iir & SDE_GMBUS_ICP)
		gmbus_irq_handler(dev_priv);
}

1895
static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907
{
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
		~SDE_PORTE_HOTPLUG_SPT;
	u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
	u32 pin_mask = 0, long_mask = 0;

	if (hotplug_trigger) {
		u32 dig_hotplug_reg;

		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
		I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);

1908
		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1909 1910
				   hotplug_trigger, dig_hotplug_reg,
				   dev_priv->hotplug.pch_hpd,
1911
				   spt_port_hotplug_long_detect);
1912 1913 1914 1915 1916 1917 1918 1919
	}

	if (hotplug2_trigger) {
		u32 dig_hotplug_reg;

		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
		I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);

1920
		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1921 1922
				   hotplug2_trigger, dig_hotplug_reg,
				   dev_priv->hotplug.pch_hpd,
1923 1924 1925 1926
				   spt_port_hotplug2_long_detect);
	}

	if (pin_mask)
1927
		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1928 1929

	if (pch_iir & SDE_GMBUS_CPT)
1930
		gmbus_irq_handler(dev_priv);
1931 1932
}

1933
static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
1934
				u32 hotplug_trigger)
1935 1936 1937 1938 1939 1940
{
	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;

	dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);

1941 1942 1943
	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
			   hotplug_trigger, dig_hotplug_reg,
			   dev_priv->hotplug.hpd,
1944 1945
			   ilk_port_hotplug_long_detect);

1946
	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1947 1948
}

1949 1950
static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
				    u32 de_iir)
1951
{
1952
	enum pipe pipe;
1953 1954
	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;

1955
	if (hotplug_trigger)
1956
		ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
1957 1958

	if (de_iir & DE_AUX_CHANNEL_A)
1959
		dp_aux_irq_handler(dev_priv);
1960 1961

	if (de_iir & DE_GSE)
1962
		intel_opregion_asle_intr(dev_priv);
1963 1964

	if (de_iir & DE_POISON)
1965
		drm_err(&dev_priv->drm, "Poison interrupt\n");
1966

1967
	for_each_pipe(dev_priv, pipe) {
1968
		if (de_iir & DE_PIPE_VBLANK(pipe))
1969
			intel_handle_vblank(dev_priv, pipe);
1970

1971
		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
1972
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1973

1974
		if (de_iir & DE_PIPE_CRC_DONE(pipe))
1975
			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1976 1977 1978 1979 1980 1981
	}

	/* check event from PCH */
	if (de_iir & DE_PCH_EVENT) {
		u32 pch_iir = I915_READ(SDEIIR);

1982 1983
		if (HAS_PCH_CPT(dev_priv))
			cpt_irq_handler(dev_priv, pch_iir);
1984
		else
1985
			ibx_irq_handler(dev_priv, pch_iir);
1986 1987 1988 1989 1990

		/* should clear PCH hotplug event before clear CPU irq */
		I915_WRITE(SDEIIR, pch_iir);
	}

1991
	if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT)
1992
		gen5_rps_irq_handler(&dev_priv->gt.rps);
1993 1994
}

1995 1996
static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
				    u32 de_iir)
1997
{
1998
	enum pipe pipe;
1999 2000
	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;

2001
	if (hotplug_trigger)
2002
		ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
2003 2004

	if (de_iir & DE_ERR_INT_IVB)
2005
		ivb_err_int_handler(dev_priv);
2006

2007 2008 2009 2010 2011 2012
	if (de_iir & DE_EDP_PSR_INT_HSW) {
		u32 psr_iir = I915_READ(EDP_PSR_IIR);

		intel_psr_irq_handler(dev_priv, psr_iir);
		I915_WRITE(EDP_PSR_IIR, psr_iir);
	}
2013

2014
	if (de_iir & DE_AUX_CHANNEL_A_IVB)
2015
		dp_aux_irq_handler(dev_priv);
2016 2017

	if (de_iir & DE_GSE_IVB)
2018
		intel_opregion_asle_intr(dev_priv);
2019

2020
	for_each_pipe(dev_priv, pipe) {
2021
		if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
2022
			intel_handle_vblank(dev_priv, pipe);
2023 2024 2025
	}

	/* check event from PCH */
2026
	if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
2027 2028
		u32 pch_iir = I915_READ(SDEIIR);

2029
		cpt_irq_handler(dev_priv, pch_iir);
2030 2031 2032 2033 2034 2035

		/* clear PCH hotplug event before clear CPU irq */
		I915_WRITE(SDEIIR, pch_iir);
	}
}

2036 2037 2038 2039 2040 2041 2042 2043
/*
 * To handle irqs with the minimum potential races with fresh interrupts, we:
 * 1 - Disable Master Interrupt Control.
 * 2 - Find the source(s) of the interrupt.
 * 3 - Clear the Interrupt Identity bits (IIR).
 * 4 - Process the interrupt(s) that had bits set in the IIRs.
 * 5 - Re-enable Master Interrupt Control.
 */
2044
static irqreturn_t ilk_irq_handler(int irq, void *arg)
2045
{
2046 2047
	struct drm_i915_private *i915 = arg;
	void __iomem * const regs = i915->uncore.regs;
2048
	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2049
	irqreturn_t ret = IRQ_NONE;
2050

2051
	if (unlikely(!intel_irqs_enabled(i915)))
2052 2053
		return IRQ_NONE;

2054
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2055
	disable_rpm_wakeref_asserts(&i915->runtime_pm);
2056

2057
	/* disable master interrupt before clearing iir  */
2058 2059
	de_ier = raw_reg_read(regs, DEIER);
	raw_reg_write(regs, DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2060

2061 2062 2063 2064 2065
	/* Disable south interrupts. We'll only write to SDEIIR once, so further
	 * interrupts will will be stored on its back queue, and then we'll be
	 * able to process them after we restore SDEIER (as soon as we restore
	 * it, we'll get an interrupt if SDEIIR still has something to process
	 * due to its back queue). */
2066 2067 2068
	if (!HAS_PCH_NOP(i915)) {
		sde_ier = raw_reg_read(regs, SDEIER);
		raw_reg_write(regs, SDEIER, 0);
2069
	}
2070

2071 2072
	/* Find, clear, then process each source of interrupt */

2073
	gt_iir = raw_reg_read(regs, GTIIR);
2074
	if (gt_iir) {
2075 2076 2077
		raw_reg_write(regs, GTIIR, gt_iir);
		if (INTEL_GEN(i915) >= 6)
			gen6_gt_irq_handler(&i915->gt, gt_iir);
2078
		else
2079 2080
			gen5_gt_irq_handler(&i915->gt, gt_iir);
		ret = IRQ_HANDLED;
2081 2082
	}

2083
	de_iir = raw_reg_read(regs, DEIIR);
2084
	if (de_iir) {
2085 2086 2087
		raw_reg_write(regs, DEIIR, de_iir);
		if (INTEL_GEN(i915) >= 7)
			ivb_display_irq_handler(i915, de_iir);
2088
		else
2089 2090
			ilk_display_irq_handler(i915, de_iir);
		ret = IRQ_HANDLED;
2091 2092
	}

2093 2094
	if (INTEL_GEN(i915) >= 6) {
		u32 pm_iir = raw_reg_read(regs, GEN6_PMIIR);
2095
		if (pm_iir) {
2096 2097
			raw_reg_write(regs, GEN6_PMIIR, pm_iir);
			gen6_rps_irq_handler(&i915->gt.rps, pm_iir);
2098 2099
			ret = IRQ_HANDLED;
		}
2100
	}
2101

2102 2103 2104
	raw_reg_write(regs, DEIER, de_ier);
	if (sde_ier)
		raw_reg_write(regs, SDEIER, sde_ier);
2105

2106
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2107
	enable_rpm_wakeref_asserts(&i915->runtime_pm);
2108

2109 2110 2111
	return ret;
}

2112
static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
2113
				u32 hotplug_trigger)
2114
{
2115
	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2116

2117 2118
	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2119

2120 2121 2122
	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
			   hotplug_trigger, dig_hotplug_reg,
			   dev_priv->hotplug.hpd,
2123
			   bxt_port_hotplug_long_detect);
2124

2125
	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2126 2127
}

2128 2129 2130
static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
{
	u32 pin_mask = 0, long_mask = 0;
2131 2132
	u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
	u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
2133 2134

	if (trigger_tc) {
2135 2136
		u32 dig_hotplug_reg;

2137 2138 2139
		dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL);
		I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);

2140 2141 2142
		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
				   trigger_tc, dig_hotplug_reg,
				   dev_priv->hotplug.hpd,
2143
				   gen11_port_hotplug_long_detect);
2144 2145 2146 2147 2148 2149 2150 2151
	}

	if (trigger_tbt) {
		u32 dig_hotplug_reg;

		dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL);
		I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);

2152 2153 2154
		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
				   trigger_tbt, dig_hotplug_reg,
				   dev_priv->hotplug.hpd,
2155
				   gen11_port_hotplug_long_detect);
2156 2157 2158
	}

	if (pin_mask)
2159
		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2160
	else
2161 2162
		drm_err(&dev_priv->drm,
			"Unexpected DE HPD interrupt 0x%08x\n", iir);
2163 2164
}

2165 2166
static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
{
2167
	u32 mask;
2168

2169 2170 2171
	if (INTEL_GEN(dev_priv) >= 12)
		return TGL_DE_PORT_AUX_DDIA |
			TGL_DE_PORT_AUX_DDIB |
2172 2173 2174 2175 2176 2177 2178 2179
			TGL_DE_PORT_AUX_DDIC |
			TGL_DE_PORT_AUX_USBC1 |
			TGL_DE_PORT_AUX_USBC2 |
			TGL_DE_PORT_AUX_USBC3 |
			TGL_DE_PORT_AUX_USBC4 |
			TGL_DE_PORT_AUX_USBC5 |
			TGL_DE_PORT_AUX_USBC6;

2180 2181

	mask = GEN8_AUX_CHANNEL_A;
2182 2183 2184 2185 2186
	if (INTEL_GEN(dev_priv) >= 9)
		mask |= GEN9_AUX_CHANNEL_B |
			GEN9_AUX_CHANNEL_C |
			GEN9_AUX_CHANNEL_D;

2187
	if (IS_CNL_WITH_PORT_F(dev_priv) || IS_GEN(dev_priv, 11))
2188 2189
		mask |= CNL_AUX_CHANNEL_F;

2190 2191
	if (IS_GEN(dev_priv, 11))
		mask |= ICL_AUX_CHANNEL_E;
2192 2193 2194 2195

	return mask;
}

2196 2197
static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
{
2198 2199 2200
	if (IS_ROCKETLAKE(dev_priv))
		return RKL_DE_PIPE_IRQ_FAULT_ERRORS;
	else if (INTEL_GEN(dev_priv) >= 11)
2201 2202
		return GEN11_DE_PIPE_IRQ_FAULT_ERRORS;
	else if (INTEL_GEN(dev_priv) >= 9)
2203 2204 2205 2206 2207
		return GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
	else
		return GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
}

2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218
static void
gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
{
	bool found = false;

	if (iir & GEN8_DE_MISC_GSE) {
		intel_opregion_asle_intr(dev_priv);
		found = true;
	}

	if (iir & GEN8_DE_EDP_PSR) {
2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231
		u32 psr_iir;
		i915_reg_t iir_reg;

		if (INTEL_GEN(dev_priv) >= 12)
			iir_reg = TRANS_PSR_IIR(dev_priv->psr.transcoder);
		else
			iir_reg = EDP_PSR_IIR;

		psr_iir = I915_READ(iir_reg);
		I915_WRITE(iir_reg, psr_iir);

		if (psr_iir)
			found = true;
2232 2233 2234 2235 2236

		intel_psr_irq_handler(dev_priv, psr_iir);
	}

	if (!found)
2237
		drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt\n");
2238 2239
}

2240 2241
static irqreturn_t
gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2242 2243
{
	irqreturn_t ret = IRQ_NONE;
2244
	u32 iir;
2245
	enum pipe pipe;
J
Jesse Barnes 已提交
2246

2247
	if (master_ctl & GEN8_DE_MISC_IRQ) {
2248 2249 2250
		iir = I915_READ(GEN8_DE_MISC_IIR);
		if (iir) {
			I915_WRITE(GEN8_DE_MISC_IIR, iir);
2251
			ret = IRQ_HANDLED;
2252 2253
			gen8_de_misc_irq_handler(dev_priv, iir);
		} else {
2254 2255
			drm_err(&dev_priv->drm,
				"The master control interrupt lied (DE MISC)!\n");
2256
		}
2257 2258
	}

2259 2260 2261 2262 2263 2264 2265
	if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
		iir = I915_READ(GEN11_DE_HPD_IIR);
		if (iir) {
			I915_WRITE(GEN11_DE_HPD_IIR, iir);
			ret = IRQ_HANDLED;
			gen11_hpd_irq_handler(dev_priv, iir);
		} else {
2266 2267
			drm_err(&dev_priv->drm,
				"The master control interrupt lied, (DE HPD)!\n");
2268 2269 2270
		}
	}

2271
	if (master_ctl & GEN8_DE_PORT_IRQ) {
2272 2273 2274
		iir = I915_READ(GEN8_DE_PORT_IIR);
		if (iir) {
			u32 tmp_mask;
2275
			bool found = false;
2276

2277
			I915_WRITE(GEN8_DE_PORT_IIR, iir);
2278
			ret = IRQ_HANDLED;
J
Jesse Barnes 已提交
2279

2280
			if (iir & gen8_de_port_aux_mask(dev_priv)) {
2281
				dp_aux_irq_handler(dev_priv);
2282 2283 2284
				found = true;
			}

2285
			if (IS_GEN9_LP(dev_priv)) {
2286 2287
				tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
				if (tmp_mask) {
2288
					bxt_hpd_irq_handler(dev_priv, tmp_mask);
2289 2290 2291 2292 2293
					found = true;
				}
			} else if (IS_BROADWELL(dev_priv)) {
				tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
				if (tmp_mask) {
2294
					ilk_hpd_irq_handler(dev_priv, tmp_mask);
2295 2296
					found = true;
				}
2297 2298
			}

2299
			if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
2300
				gmbus_irq_handler(dev_priv);
S
Shashank Sharma 已提交
2301 2302 2303
				found = true;
			}

2304
			if (!found)
2305 2306
				drm_err(&dev_priv->drm,
					"Unexpected DE Port interrupt\n");
2307
		}
2308
		else
2309 2310
			drm_err(&dev_priv->drm,
				"The master control interrupt lied (DE PORT)!\n");
2311 2312
	}

2313
	for_each_pipe(dev_priv, pipe) {
2314
		u32 fault_errors;
2315

2316 2317
		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
			continue;
2318

2319 2320
		iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
		if (!iir) {
2321 2322
			drm_err(&dev_priv->drm,
				"The master control interrupt lied (DE PIPE)!\n");
2323 2324
			continue;
		}
2325

2326 2327
		ret = IRQ_HANDLED;
		I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
2328

2329
		if (iir & GEN8_PIPE_VBLANK)
2330
			intel_handle_vblank(dev_priv, pipe);
2331

2332
		if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
2333
			hsw_pipe_crc_irq_handler(dev_priv, pipe);
2334

2335 2336
		if (iir & GEN8_PIPE_FIFO_UNDERRUN)
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2337

2338
		fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv);
2339
		if (fault_errors)
2340 2341 2342 2343
			drm_err(&dev_priv->drm,
				"Fault errors on pipe %c: 0x%08x\n",
				pipe_name(pipe),
				fault_errors);
2344 2345
	}

2346
	if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2347
	    master_ctl & GEN8_DE_PCH_IRQ) {
2348 2349 2350 2351 2352
		/*
		 * FIXME(BDW): Assume for now that the new interrupt handling
		 * scheme also closed the SDE interrupt handling race we've seen
		 * on older pch-split platforms. But this needs testing.
		 */
2353 2354 2355
		iir = I915_READ(SDEIIR);
		if (iir) {
			I915_WRITE(SDEIIR, iir);
2356
			ret = IRQ_HANDLED;
2357

2358 2359
			if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
				icp_irq_handler(dev_priv, iir);
2360
			else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
2361
				spt_irq_handler(dev_priv, iir);
2362
			else
2363
				cpt_irq_handler(dev_priv, iir);
2364 2365 2366 2367 2368
		} else {
			/*
			 * Like on previous PCH there seems to be something
			 * fishy going on with forwarding PCH interrupts.
			 */
2369 2370
			drm_dbg(&dev_priv->drm,
				"The master control interrupt lied (SDE)!\n");
2371
		}
2372 2373
	}

2374 2375 2376
	return ret;
}

2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394
static inline u32 gen8_master_intr_disable(void __iomem * const regs)
{
	raw_reg_write(regs, GEN8_MASTER_IRQ, 0);

	/*
	 * Now with master disabled, get a sample of level indications
	 * for this interrupt. Indications will be cleared on related acks.
	 * New indications can and will light up during processing,
	 * and will generate new interrupt after enabling master.
	 */
	return raw_reg_read(regs, GEN8_MASTER_IRQ);
}

static inline void gen8_master_intr_enable(void __iomem * const regs)
{
	raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
}

2395 2396
static irqreturn_t gen8_irq_handler(int irq, void *arg)
{
2397
	struct drm_i915_private *dev_priv = arg;
2398
	void __iomem * const regs = dev_priv->uncore.regs;
2399 2400 2401 2402 2403
	u32 master_ctl;

	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

2404 2405 2406
	master_ctl = gen8_master_intr_disable(regs);
	if (!master_ctl) {
		gen8_master_intr_enable(regs);
2407
		return IRQ_NONE;
2408
	}
2409

2410 2411
	/* Find, queue (onto bottom-halves), then clear each source */
	gen8_gt_irq_handler(&dev_priv->gt, master_ctl);
2412 2413 2414

	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
	if (master_ctl & ~GEN8_GT_IRQS) {
2415
		disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2416
		gen8_de_irq_handler(dev_priv, master_ctl);
2417
		enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2418
	}
2419

2420
	gen8_master_intr_enable(regs);
2421

2422
	return IRQ_HANDLED;
2423 2424
}

2425
static u32
2426
gen11_gu_misc_irq_ack(struct intel_gt *gt, const u32 master_ctl)
2427
{
2428
	void __iomem * const regs = gt->uncore->regs;
2429
	u32 iir;
2430 2431

	if (!(master_ctl & GEN11_GU_MISC_IRQ))
2432 2433 2434 2435 2436
		return 0;

	iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
	if (likely(iir))
		raw_reg_write(regs, GEN11_GU_MISC_IIR, iir);
2437

2438
	return iir;
2439 2440 2441
}

static void
2442
gen11_gu_misc_irq_handler(struct intel_gt *gt, const u32 iir)
2443 2444
{
	if (iir & GEN11_GU_MISC_GSE)
2445
		intel_opregion_asle_intr(gt->i915);
2446 2447
}

2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465
static inline u32 gen11_master_intr_disable(void __iomem * const regs)
{
	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);

	/*
	 * Now with master disabled, get a sample of level indications
	 * for this interrupt. Indications will be cleared on related acks.
	 * New indications can and will light up during processing,
	 * and will generate new interrupt after enabling master.
	 */
	return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
}

static inline void gen11_master_intr_enable(void __iomem * const regs)
{
	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
}

2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484
static void
gen11_display_irq_handler(struct drm_i915_private *i915)
{
	void __iomem * const regs = i915->uncore.regs;
	const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);

	disable_rpm_wakeref_asserts(&i915->runtime_pm);
	/*
	 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
	 * for the display related bits.
	 */
	raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 0x0);
	gen8_de_irq_handler(i915, disp_ctl);
	raw_reg_write(regs, GEN11_DISPLAY_INT_CTL,
		      GEN11_DISPLAY_IRQ_ENABLE);

	enable_rpm_wakeref_asserts(&i915->runtime_pm);
}

2485 2486 2487 2488
static __always_inline irqreturn_t
__gen11_irq_handler(struct drm_i915_private * const i915,
		    u32 (*intr_disable)(void __iomem * const regs),
		    void (*intr_enable)(void __iomem * const regs))
M
Mika Kuoppala 已提交
2489
{
2490
	void __iomem * const regs = i915->uncore.regs;
2491
	struct intel_gt *gt = &i915->gt;
M
Mika Kuoppala 已提交
2492
	u32 master_ctl;
2493
	u32 gu_misc_iir;
M
Mika Kuoppala 已提交
2494 2495 2496 2497

	if (!intel_irqs_enabled(i915))
		return IRQ_NONE;

2498
	master_ctl = intr_disable(regs);
2499
	if (!master_ctl) {
2500
		intr_enable(regs);
M
Mika Kuoppala 已提交
2501
		return IRQ_NONE;
2502
	}
M
Mika Kuoppala 已提交
2503

2504
	/* Find, queue (onto bottom-halves), then clear each source */
2505
	gen11_gt_irq_handler(gt, master_ctl);
M
Mika Kuoppala 已提交
2506 2507

	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2508 2509
	if (master_ctl & GEN11_DISPLAY_IRQ)
		gen11_display_irq_handler(i915);
M
Mika Kuoppala 已提交
2510

2511
	gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl);
2512

2513
	intr_enable(regs);
M
Mika Kuoppala 已提交
2514

2515
	gen11_gu_misc_irq_handler(gt, gu_misc_iir);
2516

M
Mika Kuoppala 已提交
2517 2518 2519
	return IRQ_HANDLED;
}

2520 2521 2522 2523 2524 2525 2526
static irqreturn_t gen11_irq_handler(int irq, void *arg)
{
	return __gen11_irq_handler(arg,
				   gen11_master_intr_disable,
				   gen11_master_intr_enable);
}

2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566
static u32 dg1_master_intr_disable_and_ack(void __iomem * const regs)
{
	u32 val;

	/* First disable interrupts */
	raw_reg_write(regs, DG1_MSTR_UNIT_INTR, 0);

	/* Get the indication levels and ack the master unit */
	val = raw_reg_read(regs, DG1_MSTR_UNIT_INTR);
	if (unlikely(!val))
		return 0;

	raw_reg_write(regs, DG1_MSTR_UNIT_INTR, val);

	/*
	 * Now with master disabled, get a sample of level indications
	 * for this interrupt and ack them right away - we keep GEN11_MASTER_IRQ
	 * out as this bit doesn't exist anymore for DG1
	 */
	val = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ) & ~GEN11_MASTER_IRQ;
	if (unlikely(!val))
		return 0;

	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, val);

	return val;
}

static inline void dg1_master_intr_enable(void __iomem * const regs)
{
	raw_reg_write(regs, DG1_MSTR_UNIT_INTR, DG1_MSTR_IRQ);
}

static irqreturn_t dg1_irq_handler(int irq, void *arg)
{
	return __gen11_irq_handler(arg,
				   dg1_master_intr_disable_and_ack,
				   dg1_master_intr_enable);
}

2567 2568 2569
/* Called from drm generic code, passed 'crtc' which
 * we use as a pipe index
 */
2570
int i8xx_enable_vblank(struct drm_crtc *crtc)
2571
{
2572 2573
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2574
	unsigned long irqflags;
2575

2576
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2577
	i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2578
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2579

2580 2581 2582
	return 0;
}

2583
int i915gm_enable_vblank(struct drm_crtc *crtc)
2584
{
2585
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2586

2587 2588 2589 2590 2591 2592 2593 2594
	/*
	 * Vblank interrupts fail to wake the device up from C2+.
	 * Disabling render clock gating during C-states avoids
	 * the problem. There is a small power cost so we do this
	 * only when vblank interrupts are actually enabled.
	 */
	if (dev_priv->vblank_enabled++ == 0)
		I915_WRITE(SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
2595

2596
	return i8xx_enable_vblank(crtc);
2597 2598
}

2599
int i965_enable_vblank(struct drm_crtc *crtc)
2600
{
2601 2602
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2603 2604 2605
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2606 2607
	i915_enable_pipestat(dev_priv, pipe,
			     PIPE_START_VBLANK_INTERRUPT_STATUS);
2608 2609 2610 2611 2612
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);

	return 0;
}

2613
int ilk_enable_vblank(struct drm_crtc *crtc)
J
Jesse Barnes 已提交
2614
{
2615 2616
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
J
Jesse Barnes 已提交
2617
	unsigned long irqflags;
2618
	u32 bit = INTEL_GEN(dev_priv) >= 7 ?
2619
		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
J
Jesse Barnes 已提交
2620 2621

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2622
	ilk_enable_display_irq(dev_priv, bit);
J
Jesse Barnes 已提交
2623 2624
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);

2625 2626 2627 2628
	/* Even though there is no DMC, frame counter can get stuck when
	 * PSR is active as no frames are generated.
	 */
	if (HAS_PSR(dev_priv))
2629
		drm_crtc_vblank_restore(crtc);
2630

J
Jesse Barnes 已提交
2631 2632 2633
	return 0;
}

2634
int bdw_enable_vblank(struct drm_crtc *crtc)
2635
{
2636 2637
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2638 2639 2640
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2641
	bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2642
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2643

2644 2645 2646 2647
	/* Even if there is no DMC, frame counter can get stuck when
	 * PSR is active as no frames are generated, so check only for PSR.
	 */
	if (HAS_PSR(dev_priv))
2648
		drm_crtc_vblank_restore(crtc);
2649

2650 2651 2652
	return 0;
}

2653 2654 2655
/* Called from drm generic code, passed 'crtc' which
 * we use as a pipe index
 */
2656
void i8xx_disable_vblank(struct drm_crtc *crtc)
2657
{
2658 2659
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2660
	unsigned long irqflags;
2661

2662
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2663
	i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2664 2665 2666
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

2667
void i915gm_disable_vblank(struct drm_crtc *crtc)
2668
{
2669
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2670

2671
	i8xx_disable_vblank(crtc);
2672

2673 2674
	if (--dev_priv->vblank_enabled == 0)
		I915_WRITE(SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
2675 2676
}

2677
void i965_disable_vblank(struct drm_crtc *crtc)
2678
{
2679 2680
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2681 2682 2683
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2684 2685
	i915_disable_pipestat(dev_priv, pipe,
			      PIPE_START_VBLANK_INTERRUPT_STATUS);
2686 2687 2688
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

2689
void ilk_disable_vblank(struct drm_crtc *crtc)
J
Jesse Barnes 已提交
2690
{
2691 2692
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
J
Jesse Barnes 已提交
2693
	unsigned long irqflags;
2694
	u32 bit = INTEL_GEN(dev_priv) >= 7 ?
2695
		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
J
Jesse Barnes 已提交
2696 2697

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2698
	ilk_disable_display_irq(dev_priv, bit);
J
Jesse Barnes 已提交
2699 2700 2701
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

2702
void bdw_disable_vblank(struct drm_crtc *crtc)
2703
{
2704 2705
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2706 2707 2708
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2709
	bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2710 2711 2712
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

2713
static void ibx_irq_reset(struct drm_i915_private *dev_priv)
P
Paulo Zanoni 已提交
2714
{
2715 2716
	struct intel_uncore *uncore = &dev_priv->uncore;

2717
	if (HAS_PCH_NOP(dev_priv))
P
Paulo Zanoni 已提交
2718 2719
		return;

2720
	GEN3_IRQ_RESET(uncore, SDE);
2721

2722
	if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
2723
		I915_WRITE(SERR_INT, 0xffffffff);
P
Paulo Zanoni 已提交
2724
}
2725

P
Paulo Zanoni 已提交
2726 2727 2728 2729 2730 2731 2732 2733
/*
 * SDEIER is also touched by the interrupt handler to work around missed PCH
 * interrupts. Hence we can't update it after the interrupt handler is enabled -
 * instead we unconditionally enable all PCH interrupt sources here, but then
 * only unmask them as needed with SDEIMR.
 *
 * This function needs to be called before interrupts are enabled.
 */
2734
static void ibx_irq_pre_postinstall(struct drm_i915_private *dev_priv)
P
Paulo Zanoni 已提交
2735
{
2736
	if (HAS_PCH_NOP(dev_priv))
P
Paulo Zanoni 已提交
2737 2738
		return;

2739
	drm_WARN_ON(&dev_priv->drm, I915_READ(SDEIER) != 0);
P
Paulo Zanoni 已提交
2740 2741 2742 2743
	I915_WRITE(SDEIER, 0xffffffff);
	POSTING_READ(SDEIER);
}

2744 2745
static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
{
2746 2747
	struct intel_uncore *uncore = &dev_priv->uncore;

2748
	if (IS_CHERRYVIEW(dev_priv))
2749
		intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
2750
	else
2751
		intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK);
2752

2753
	i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
2754
	intel_uncore_write(uncore, PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2755

2756
	i9xx_pipestat_irq_reset(dev_priv);
2757

2758
	GEN3_IRQ_RESET(uncore, VLV_);
2759
	dev_priv->irq_mask = ~0u;
2760 2761
}

2762 2763
static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
{
2764 2765
	struct intel_uncore *uncore = &dev_priv->uncore;

2766
	u32 pipestat_mask;
2767
	u32 enable_mask;
2768 2769
	enum pipe pipe;

2770
	pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
2771 2772 2773 2774 2775

	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
	for_each_pipe(dev_priv, pipe)
		i915_enable_pipestat(dev_priv, pipe, pipestat_mask);

2776 2777
	enable_mask = I915_DISPLAY_PORT_INTERRUPT |
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2778 2779 2780 2781
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
		I915_LPE_PIPE_A_INTERRUPT |
		I915_LPE_PIPE_B_INTERRUPT;

2782
	if (IS_CHERRYVIEW(dev_priv))
2783 2784
		enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
			I915_LPE_PIPE_C_INTERRUPT;
2785

2786
	drm_WARN_ON(&dev_priv->drm, dev_priv->irq_mask != ~0u);
2787

2788 2789
	dev_priv->irq_mask = ~enable_mask;

2790
	GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask);
2791 2792 2793 2794
}

/* drm_dma.h hooks
*/
2795
static void ilk_irq_reset(struct drm_i915_private *dev_priv)
2796
{
2797
	struct intel_uncore *uncore = &dev_priv->uncore;
2798

2799
	GEN3_IRQ_RESET(uncore, DE);
2800
	if (IS_GEN(dev_priv, 7))
2801
		intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff);
2802

2803
	if (IS_HASWELL(dev_priv)) {
2804 2805
		intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
		intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
2806 2807
	}

2808
	gen5_gt_irq_reset(&dev_priv->gt);
2809

2810
	ibx_irq_reset(dev_priv);
2811 2812
}

2813
static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
J
Jesse Barnes 已提交
2814
{
2815 2816 2817
	I915_WRITE(VLV_MASTER_IER, 0);
	POSTING_READ(VLV_MASTER_IER);

2818
	gen5_gt_irq_reset(&dev_priv->gt);
J
Jesse Barnes 已提交
2819

2820
	spin_lock_irq(&dev_priv->irq_lock);
2821 2822
	if (dev_priv->display_irqs_enabled)
		vlv_display_irq_reset(dev_priv);
2823
	spin_unlock_irq(&dev_priv->irq_lock);
J
Jesse Barnes 已提交
2824 2825
}

2826
static void gen8_irq_reset(struct drm_i915_private *dev_priv)
2827
{
2828
	struct intel_uncore *uncore = &dev_priv->uncore;
2829
	enum pipe pipe;
2830

2831
	gen8_master_intr_disable(dev_priv->uncore.regs);
2832

2833
	gen8_gt_irq_reset(&dev_priv->gt);
2834

2835 2836
	intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
	intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
2837

2838
	for_each_pipe(dev_priv, pipe)
2839 2840
		if (intel_display_power_is_enabled(dev_priv,
						   POWER_DOMAIN_PIPE(pipe)))
2841
			GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
2842

2843 2844 2845
	GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
	GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
2846

2847
	if (HAS_PCH_SPLIT(dev_priv))
2848
		ibx_irq_reset(dev_priv);
2849
}
2850

2851
static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
M
Mika Kuoppala 已提交
2852
{
2853
	struct intel_uncore *uncore = &dev_priv->uncore;
2854
	enum pipe pipe;
2855 2856
	u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
		BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
M
Mika Kuoppala 已提交
2857

2858
	intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0);
M
Mika Kuoppala 已提交
2859

2860 2861 2862
	if (INTEL_GEN(dev_priv) >= 12) {
		enum transcoder trans;

2863
		for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876
			enum intel_display_power_domain domain;

			domain = POWER_DOMAIN_TRANSCODER(trans);
			if (!intel_display_power_is_enabled(dev_priv, domain))
				continue;

			intel_uncore_write(uncore, TRANS_PSR_IMR(trans), 0xffffffff);
			intel_uncore_write(uncore, TRANS_PSR_IIR(trans), 0xffffffff);
		}
	} else {
		intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
		intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
	}
2877

M
Mika Kuoppala 已提交
2878 2879 2880
	for_each_pipe(dev_priv, pipe)
		if (intel_display_power_is_enabled(dev_priv,
						   POWER_DOMAIN_PIPE(pipe)))
2881
			GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
M
Mika Kuoppala 已提交
2882

2883 2884 2885
	GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
	GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
	GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_);
2886

2887
	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
2888
		GEN3_IRQ_RESET(uncore, SDE);
M
Matt Roper 已提交
2889

2890 2891
	/* Wa_14010685332:icl,jsl,ehl,tgl,rkl */
	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) {
M
Matt Roper 已提交
2892 2893 2894 2895 2896
		intel_uncore_rmw(uncore, SOUTH_CHICKEN1,
				 SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
		intel_uncore_rmw(uncore, SOUTH_CHICKEN1,
				 SBCLK_RUN_REFCLK_DIS, 0);
	}
M
Mika Kuoppala 已提交
2897 2898
}

2899 2900 2901 2902
static void gen11_irq_reset(struct drm_i915_private *dev_priv)
{
	struct intel_uncore *uncore = &dev_priv->uncore;

2903 2904 2905 2906
	if (HAS_MASTER_UNIT_IRQ(dev_priv))
		dg1_master_intr_disable_and_ack(dev_priv->uncore.regs);
	else
		gen11_master_intr_disable(dev_priv->uncore.regs);
2907 2908 2909 2910 2911 2912 2913 2914

	gen11_gt_irq_reset(&dev_priv->gt);
	gen11_display_irq_reset(dev_priv);

	GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
}

2915
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
2916
				     u8 pipe_mask)
2917
{
2918 2919
	struct intel_uncore *uncore = &dev_priv->uncore;

2920
	u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
2921
	enum pipe pipe;
2922

2923
	spin_lock_irq(&dev_priv->irq_lock);
2924 2925 2926 2927 2928 2929

	if (!intel_irqs_enabled(dev_priv)) {
		spin_unlock_irq(&dev_priv->irq_lock);
		return;
	}

2930
	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
2931
		GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
2932 2933
				  dev_priv->de_irq_mask[pipe],
				  ~dev_priv->de_irq_mask[pipe] | extra_ier);
2934

2935
	spin_unlock_irq(&dev_priv->irq_lock);
2936 2937
}

2938
void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
2939
				     u8 pipe_mask)
2940
{
2941
	struct intel_uncore *uncore = &dev_priv->uncore;
2942 2943
	enum pipe pipe;

2944
	spin_lock_irq(&dev_priv->irq_lock);
2945 2946 2947 2948 2949 2950

	if (!intel_irqs_enabled(dev_priv)) {
		spin_unlock_irq(&dev_priv->irq_lock);
		return;
	}

2951
	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
2952
		GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
2953

2954 2955 2956
	spin_unlock_irq(&dev_priv->irq_lock);

	/* make sure we're done processing display irqs */
2957
	intel_synchronize_irq(dev_priv);
2958 2959
}

2960
static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
2961
{
2962
	struct intel_uncore *uncore = &dev_priv->uncore;
2963 2964 2965 2966

	I915_WRITE(GEN8_MASTER_IRQ, 0);
	POSTING_READ(GEN8_MASTER_IRQ);

2967
	gen8_gt_irq_reset(&dev_priv->gt);
2968

2969
	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
2970

2971
	spin_lock_irq(&dev_priv->irq_lock);
2972 2973
	if (dev_priv->display_irqs_enabled)
		vlv_display_irq_reset(dev_priv);
2974
	spin_unlock_irq(&dev_priv->irq_lock);
2975 2976
}

2977
static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
2978 2979 2980 2981 2982
				  const u32 hpd[HPD_NUM_PINS])
{
	struct intel_encoder *encoder;
	u32 enabled_irqs = 0;

2983
	for_each_intel_encoder(&dev_priv->drm, encoder)
2984 2985 2986 2987 2988 2989
		if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
			enabled_irqs |= hpd[encoder->hpd_pin];

	return enabled_irqs;
}

2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001
static u32 intel_hpd_hotplug_irqs(struct drm_i915_private *dev_priv,
				  const u32 hpd[HPD_NUM_PINS])
{
	struct intel_encoder *encoder;
	u32 hotplug_irqs = 0;

	for_each_intel_encoder(&dev_priv->drm, encoder)
		hotplug_irqs |= hpd[encoder->hpd_pin];

	return hotplug_irqs;
}

3002
static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
3003
{
3004
	u32 hotplug;
3005 3006 3007

	/*
	 * Enable digital hotplug on the PCH, and configure the DP short pulse
3008 3009
	 * duration to 2ms (which is the minimum in the Display Port spec).
	 * The pulse duration bits are reserved on LPT+.
3010
	 */
3011
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3012 3013 3014
	hotplug &= ~(PORTB_PULSE_DURATION_MASK |
		     PORTC_PULSE_DURATION_MASK |
		     PORTD_PULSE_DURATION_MASK);
3015
	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3016 3017
	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3018 3019 3020 3021
	/*
	 * When CPU and PCH are on the same package, port A
	 * HPD must be enabled in both north and south.
	 */
3022
	if (HAS_PCH_LPT_LP(dev_priv))
3023
		hotplug |= PORTA_HOTPLUG_ENABLE;
3024
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3025
}
X
Xiong Zhang 已提交
3026

3027 3028 3029 3030
static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug_irqs, enabled_irqs;

3031
	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3032
	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3033 3034 3035 3036 3037 3038

	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);

	ibx_hpd_detection_setup(dev_priv);
}

3039 3040
static void icp_ddi_hpd_detection_setup(struct drm_i915_private *dev_priv,
					u32 enable_mask)
3041 3042 3043 3044
{
	u32 hotplug;

	hotplug = I915_READ(SHOTPLUG_CTL_DDI);
3045
	hotplug |= enable_mask;
3046
	I915_WRITE(SHOTPLUG_CTL_DDI, hotplug);
3047
}
3048

3049 3050 3051 3052 3053 3054 3055 3056
static void icp_tc_hpd_detection_setup(struct drm_i915_private *dev_priv,
				       u32 enable_mask)
{
	u32 hotplug;

	hotplug = I915_READ(SHOTPLUG_CTL_TC);
	hotplug |= enable_mask;
	I915_WRITE(SHOTPLUG_CTL_TC, hotplug);
3057 3058
}

3059
static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv,
3060
			      u32 ddi_enable_mask, u32 tc_enable_mask)
3061 3062 3063
{
	u32 hotplug_irqs, enabled_irqs;

3064
	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3065
	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3066

3067 3068
	if (INTEL_PCH_TYPE(dev_priv) <= PCH_TGP)
		I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
3069

3070 3071
	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);

3072 3073 3074
	icp_ddi_hpd_detection_setup(dev_priv, ddi_enable_mask);
	if (tc_enable_mask)
		icp_tc_hpd_detection_setup(dev_priv, tc_enable_mask);
3075 3076
}

3077 3078 3079 3080
/*
 * EHL doesn't need most of gen11_hpd_irq_setup, it's handling only the
 * equivalent of SDE.
 */
3081 3082
static void mcc_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
3083
	icp_hpd_irq_setup(dev_priv,
3084
			  ICP_DDI_HPD_ENABLE_MASK, ICP_TC_HPD_ENABLE(PORT_TC1));
3085 3086
}

M
Matt Roper 已提交
3087 3088 3089 3090 3091 3092 3093 3094
/*
 * JSP behaves exactly the same as MCC above except that port C is mapped to
 * the DDI-C pins instead of the TC1 pins.  This means we should follow TGP's
 * masks & tables rather than ICP's masks & tables.
 */
static void jsp_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
	icp_hpd_irq_setup(dev_priv,
3095
			  TGP_DDI_HPD_ENABLE_MASK, 0);
M
Matt Roper 已提交
3096 3097
}

3098 3099 3100 3101 3102 3103 3104 3105
static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug;

	hotplug = I915_READ(GEN11_TC_HOTPLUG_CTL);
	hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
3106 3107 3108
		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4) |
		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC5) |
		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC6);
3109
	I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug);
3110 3111 3112 3113 3114

	hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL);
	hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
3115 3116 3117
		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4) |
		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC5) |
		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC6);
3118
	I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug);
3119 3120 3121 3122 3123 3124 3125
}

static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug_irqs, enabled_irqs;
	u32 val;

3126
	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
3127
	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd);
3128 3129 3130

	val = I915_READ(GEN11_DE_HPD_IMR);
	val &= ~hotplug_irqs;
3131
	val |= ~enabled_irqs & hotplug_irqs;
3132 3133 3134 3135
	I915_WRITE(GEN11_DE_HPD_IMR, val);
	POSTING_READ(GEN11_DE_HPD_IMR);

	gen11_hpd_detection_setup(dev_priv);
3136

3137
	if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP)
3138
		icp_hpd_irq_setup(dev_priv,
3139
				  TGP_DDI_HPD_ENABLE_MASK, TGP_TC_HPD_ENABLE_MASK);
3140
	else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3141
		icp_hpd_irq_setup(dev_priv,
3142
				  ICP_DDI_HPD_ENABLE_MASK, ICP_TC_HPD_ENABLE_MASK);
3143 3144
}

3145
static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3146
{
3147 3148 3149 3150 3151 3152 3153 3154 3155
	u32 val, hotplug;

	/* Display WA #1179 WaHardHangonHotPlug: cnp */
	if (HAS_PCH_CNP(dev_priv)) {
		val = I915_READ(SOUTH_CHICKEN1);
		val &= ~CHASSIS_CLK_REQ_DURATION_MASK;
		val |= CHASSIS_CLK_REQ_DURATION(0xf);
		I915_WRITE(SOUTH_CHICKEN1, val);
	}
3156 3157 3158

	/* Enable digital hotplug on the PCH */
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3159 3160 3161 3162
	hotplug |= PORTA_HOTPLUG_ENABLE |
		   PORTB_HOTPLUG_ENABLE |
		   PORTC_HOTPLUG_ENABLE |
		   PORTD_HOTPLUG_ENABLE;
3163 3164 3165 3166 3167
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);

	hotplug = I915_READ(PCH_PORT_HOTPLUG2);
	hotplug |= PORTE_HOTPLUG_ENABLE;
	I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3168 3169
}

3170 3171 3172 3173
static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug_irqs, enabled_irqs;

3174 3175 3176
	if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
		I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);

3177
	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3178
	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3179 3180 3181 3182 3183 3184

	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);

	spt_hpd_detection_setup(dev_priv);
}

3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200
static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug;

	/*
	 * Enable digital hotplug on the CPU, and configure the DP short pulse
	 * duration to 2ms (which is the minimum in the Display Port spec)
	 * The pulse duration bits are reserved on HSW+.
	 */
	hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
	hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
	hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE |
		   DIGITAL_PORTA_PULSE_DURATION_2ms;
	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
}

3201
static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3202
{
3203
	u32 hotplug_irqs, enabled_irqs;
3204

3205 3206
	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd);
3207

3208
	if (INTEL_GEN(dev_priv) >= 8)
3209
		bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3210
	else
3211
		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3212

3213
	ilk_hpd_detection_setup(dev_priv);
3214

3215
	ibx_hpd_irq_setup(dev_priv);
3216 3217
}

3218 3219
static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv,
				      u32 enabled_irqs)
3220
{
3221
	u32 hotplug;
3222

3223
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3224 3225 3226
	hotplug |= PORTA_HOTPLUG_ENABLE |
		   PORTB_HOTPLUG_ENABLE |
		   PORTC_HOTPLUG_ENABLE;
3227

3228 3229 3230
	drm_dbg_kms(&dev_priv->drm,
		    "Invert bit setting: hp_ctl:%x hp_port:%x\n",
		    hotplug, enabled_irqs);
3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246
	hotplug &= ~BXT_DDI_HPD_INVERT_MASK;

	/*
	 * For BXT invert bit has to be set based on AOB design
	 * for HPD detection logic, update it based on VBT fields.
	 */
	if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
		hotplug |= BXT_DDIA_HPD_INVERT;
	if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) &&
	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_B))
		hotplug |= BXT_DDIB_HPD_INVERT;
	if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) &&
	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_C))
		hotplug |= BXT_DDIC_HPD_INVERT;

3247
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3248 3249
}

3250 3251 3252 3253 3254 3255 3256 3257 3258
static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
	__bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK);
}

static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug_irqs, enabled_irqs;

3259
	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
3260
	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd);
3261 3262 3263 3264 3265 3266

	bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);

	__bxt_hpd_detection_setup(dev_priv, enabled_irqs);
}

3267
static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
P
Paulo Zanoni 已提交
3268
{
3269
	u32 mask;
3270

3271
	if (HAS_PCH_NOP(dev_priv))
D
Daniel Vetter 已提交
3272 3273
		return;

3274
	if (HAS_PCH_IBX(dev_priv))
3275
		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3276
	else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3277
		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3278 3279
	else
		mask = SDE_GMBUS_CPT;
3280

3281
	gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
P
Paulo Zanoni 已提交
3282
	I915_WRITE(SDEIMR, ~mask);
3283 3284 3285

	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
	    HAS_PCH_LPT(dev_priv))
3286
		ibx_hpd_detection_setup(dev_priv);
3287 3288
	else
		spt_hpd_detection_setup(dev_priv);
P
Paulo Zanoni 已提交
3289 3290
}

3291
static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
3292
{
3293
	struct intel_uncore *uncore = &dev_priv->uncore;
3294 3295
	u32 display_mask, extra_mask;

3296
	if (INTEL_GEN(dev_priv) >= 7) {
3297
		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3298
				DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
3299
		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3300 3301
			      DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
			      DE_DP_A_HOTPLUG_IVB);
3302 3303
	} else {
		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3304 3305
				DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
				DE_PIPEA_CRC_DONE | DE_POISON);
3306 3307 3308
		extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
			      DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
			      DE_DP_A_HOTPLUG);
3309
	}
3310

3311
	if (IS_HASWELL(dev_priv)) {
3312
		gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
3313 3314 3315
		display_mask |= DE_EDP_PSR_INT_HSW;
	}

3316
	dev_priv->irq_mask = ~display_mask;
3317

3318
	ibx_irq_pre_postinstall(dev_priv);
P
Paulo Zanoni 已提交
3319

3320 3321
	GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
		      display_mask | extra_mask);
3322

3323
	gen5_gt_irq_postinstall(&dev_priv->gt);
3324

3325 3326
	ilk_hpd_detection_setup(dev_priv);

3327
	ibx_irq_postinstall(dev_priv);
3328

3329
	if (IS_IRONLAKE_M(dev_priv)) {
3330 3331 3332
		/* Enable PCU event interrupts
		 *
		 * spinlocking not required here for correctness since interrupt
3333 3334
		 * setup is guaranteed to run in single-threaded context. But we
		 * need it to make the assert_spin_locked happy. */
3335
		spin_lock_irq(&dev_priv->irq_lock);
3336
		ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
3337
		spin_unlock_irq(&dev_priv->irq_lock);
3338
	}
3339 3340
}

3341 3342
void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
{
3343
	lockdep_assert_held(&dev_priv->irq_lock);
3344 3345 3346 3347 3348 3349

	if (dev_priv->display_irqs_enabled)
		return;

	dev_priv->display_irqs_enabled = true;

3350 3351
	if (intel_irqs_enabled(dev_priv)) {
		vlv_display_irq_reset(dev_priv);
3352
		vlv_display_irq_postinstall(dev_priv);
3353
	}
3354 3355 3356 3357
}

void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
{
3358
	lockdep_assert_held(&dev_priv->irq_lock);
3359 3360 3361 3362 3363 3364

	if (!dev_priv->display_irqs_enabled)
		return;

	dev_priv->display_irqs_enabled = false;

3365
	if (intel_irqs_enabled(dev_priv))
3366
		vlv_display_irq_reset(dev_priv);
3367 3368
}

3369

3370
static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
3371
{
3372
	gen5_gt_irq_postinstall(&dev_priv->gt);
J
Jesse Barnes 已提交
3373

3374
	spin_lock_irq(&dev_priv->irq_lock);
3375 3376
	if (dev_priv->display_irqs_enabled)
		vlv_display_irq_postinstall(dev_priv);
3377 3378
	spin_unlock_irq(&dev_priv->irq_lock);

J
Jesse Barnes 已提交
3379
	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3380
	POSTING_READ(VLV_MASTER_IER);
3381 3382
}

3383 3384
static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
{
3385 3386
	struct intel_uncore *uncore = &dev_priv->uncore;

3387 3388
	u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) |
		GEN8_PIPE_CDCLK_CRC_DONE;
3389
	u32 de_pipe_enables;
3390
	u32 de_port_masked = gen8_de_port_aux_mask(dev_priv);
3391
	u32 de_port_enables;
3392
	u32 de_misc_masked = GEN8_DE_EDP_PSR;
3393 3394
	u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
		BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
3395
	enum pipe pipe;
3396

3397 3398 3399
	if (INTEL_GEN(dev_priv) <= 10)
		de_misc_masked |= GEN8_DE_MISC_GSE;

3400 3401
	if (IS_GEN9_LP(dev_priv))
		de_port_masked |= BXT_DE_PORT_GMBUS;
R
Rodrigo Vivi 已提交
3402

3403 3404 3405
	de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
					   GEN8_PIPE_FIFO_UNDERRUN;

3406
	de_port_enables = de_port_masked;
3407
	if (IS_GEN9_LP(dev_priv))
3408 3409
		de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
	else if (IS_BROADWELL(dev_priv))
3410 3411
		de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;

3412 3413 3414
	if (INTEL_GEN(dev_priv) >= 12) {
		enum transcoder trans;

3415
		for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426
			enum intel_display_power_domain domain;

			domain = POWER_DOMAIN_TRANSCODER(trans);
			if (!intel_display_power_is_enabled(dev_priv, domain))
				continue;

			gen3_assert_iir_is_zero(uncore, TRANS_PSR_IIR(trans));
		}
	} else {
		gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
	}
3427

M
Mika Kahola 已提交
3428 3429
	for_each_pipe(dev_priv, pipe) {
		dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
3430

3431
		if (intel_display_power_is_enabled(dev_priv,
3432
				POWER_DOMAIN_PIPE(pipe)))
3433
			GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
3434 3435
					  dev_priv->de_irq_mask[pipe],
					  de_pipe_enables);
M
Mika Kahola 已提交
3436
	}
3437

3438 3439
	GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
	GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
3440

3441 3442
	if (INTEL_GEN(dev_priv) >= 11) {
		u32 de_hpd_masked = 0;
3443 3444
		u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
				     GEN11_DE_TBT_HOTPLUG_MASK;
3445

3446 3447
		GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked,
			      de_hpd_enables);
3448 3449
		gen11_hpd_detection_setup(dev_priv);
	} else if (IS_GEN9_LP(dev_priv)) {
3450
		bxt_hpd_detection_setup(dev_priv);
3451
	} else if (IS_BROADWELL(dev_priv)) {
3452
		ilk_hpd_detection_setup(dev_priv);
3453
	}
3454 3455
}

3456
static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
3457
{
3458
	if (HAS_PCH_SPLIT(dev_priv))
3459
		ibx_irq_pre_postinstall(dev_priv);
P
Paulo Zanoni 已提交
3460

3461
	gen8_gt_irq_postinstall(&dev_priv->gt);
3462 3463
	gen8_de_irq_postinstall(dev_priv);

3464
	if (HAS_PCH_SPLIT(dev_priv))
3465
		ibx_irq_postinstall(dev_priv);
3466

3467
	gen8_master_intr_enable(dev_priv->uncore.regs);
3468 3469
}

3470
static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
3471 3472 3473
{
	u32 mask = SDE_GMBUS_ICP;

3474
	drm_WARN_ON(&dev_priv->drm, I915_READ(SDEIER) != 0);
3475 3476 3477
	I915_WRITE(SDEIER, 0xffffffff);
	POSTING_READ(SDEIER);

3478
	gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
3479 3480
	I915_WRITE(SDEIMR, ~mask);

3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492
	if (HAS_PCH_TGP(dev_priv)) {
		icp_ddi_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK);
		icp_tc_hpd_detection_setup(dev_priv, TGP_TC_HPD_ENABLE_MASK);
	} else if (HAS_PCH_JSP(dev_priv)) {
		icp_ddi_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK);
	} else if (HAS_PCH_MCC(dev_priv)) {
		icp_ddi_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK);
		icp_tc_hpd_detection_setup(dev_priv, ICP_TC_HPD_ENABLE(PORT_TC1));
	} else {
		icp_ddi_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK);
		icp_tc_hpd_detection_setup(dev_priv, ICP_TC_HPD_ENABLE_MASK);
	}
3493 3494
}

3495
static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
M
Mika Kuoppala 已提交
3496
{
3497
	struct intel_uncore *uncore = &dev_priv->uncore;
3498
	u32 gu_misc_masked = GEN11_GU_MISC_GSE;
M
Mika Kuoppala 已提交
3499

3500
	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3501
		icp_irq_postinstall(dev_priv);
3502

3503
	gen11_gt_irq_postinstall(&dev_priv->gt);
M
Mika Kuoppala 已提交
3504 3505
	gen8_de_irq_postinstall(dev_priv);

3506
	GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
3507

M
Mika Kuoppala 已提交
3508 3509
	I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);

3510 3511 3512 3513 3514 3515 3516
	if (HAS_MASTER_UNIT_IRQ(dev_priv)) {
		dg1_master_intr_enable(uncore->regs);
		POSTING_READ(DG1_MSTR_UNIT_INTR);
	} else {
		gen11_master_intr_enable(uncore->regs);
		POSTING_READ(GEN11_GFX_MSTR_IRQ);
	}
M
Mika Kuoppala 已提交
3517 3518
}

3519
static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
3520
{
3521
	gen8_gt_irq_postinstall(&dev_priv->gt);
3522

3523
	spin_lock_irq(&dev_priv->irq_lock);
3524 3525
	if (dev_priv->display_irqs_enabled)
		vlv_display_irq_postinstall(dev_priv);
3526 3527
	spin_unlock_irq(&dev_priv->irq_lock);

3528
	I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3529 3530 3531
	POSTING_READ(GEN8_MASTER_IRQ);
}

3532
static void i8xx_irq_reset(struct drm_i915_private *dev_priv)
L
Linus Torvalds 已提交
3533
{
3534
	struct intel_uncore *uncore = &dev_priv->uncore;
3535

3536 3537
	i9xx_pipestat_irq_reset(dev_priv);

3538
	GEN2_IRQ_RESET(uncore);
C
Chris Wilson 已提交
3539 3540
}

3541
static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv)
C
Chris Wilson 已提交
3542
{
3543
	struct intel_uncore *uncore = &dev_priv->uncore;
3544
	u16 enable_mask;
C
Chris Wilson 已提交
3545

3546 3547 3548 3549
	intel_uncore_write16(uncore,
			     EMR,
			     ~(I915_ERROR_PAGE_TABLE |
			       I915_ERROR_MEMORY_REFRESH));
C
Chris Wilson 已提交
3550 3551 3552 3553

	/* Unmask the interrupts that we always want on. */
	dev_priv->irq_mask =
		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3554 3555
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
		  I915_MASTER_ERROR_INTERRUPT);
C
Chris Wilson 已提交
3556

3557 3558 3559
	enable_mask =
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3560
		I915_MASTER_ERROR_INTERRUPT |
3561 3562
		I915_USER_INTERRUPT;

3563
	GEN2_IRQ_INIT(uncore, dev_priv->irq_mask, enable_mask);
C
Chris Wilson 已提交
3564

3565 3566
	/* Interrupt setup is already guaranteed to be single-threaded, this is
	 * just to make the assert_spin_locked check happy. */
3567
	spin_lock_irq(&dev_priv->irq_lock);
3568 3569
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3570
	spin_unlock_irq(&dev_priv->irq_lock);
C
Chris Wilson 已提交
3571 3572
}

3573
static void i8xx_error_irq_ack(struct drm_i915_private *i915,
3574 3575
			       u16 *eir, u16 *eir_stuck)
{
3576
	struct intel_uncore *uncore = &i915->uncore;
3577 3578
	u16 emr;

3579
	*eir = intel_uncore_read16(uncore, EIR);
3580 3581

	if (*eir)
3582
		intel_uncore_write16(uncore, EIR, *eir);
3583

3584
	*eir_stuck = intel_uncore_read16(uncore, EIR);
3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597
	if (*eir_stuck == 0)
		return;

	/*
	 * Toggle all EMR bits to make sure we get an edge
	 * in the ISR master error bit if we don't clear
	 * all the EIR bits. Otherwise the edge triggered
	 * IIR on i965/g4x wouldn't notice that an interrupt
	 * is still pending. Also some EIR bits can't be
	 * cleared except by handling the underlying error
	 * (or by a GPU reset) so we mask any bit that
	 * remains set.
	 */
3598 3599 3600
	emr = intel_uncore_read16(uncore, EMR);
	intel_uncore_write16(uncore, EMR, 0xffff);
	intel_uncore_write16(uncore, EMR, emr | *eir_stuck);
3601 3602 3603 3604 3605 3606 3607 3608
}

static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
				   u16 eir, u16 eir_stuck)
{
	DRM_DEBUG("Master Error: EIR 0x%04x\n", eir);

	if (eir_stuck)
3609 3610
		drm_dbg(&dev_priv->drm, "EIR stuck: 0x%04x, masked\n",
			eir_stuck);
3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646
}

static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
			       u32 *eir, u32 *eir_stuck)
{
	u32 emr;

	*eir = I915_READ(EIR);

	I915_WRITE(EIR, *eir);

	*eir_stuck = I915_READ(EIR);
	if (*eir_stuck == 0)
		return;

	/*
	 * Toggle all EMR bits to make sure we get an edge
	 * in the ISR master error bit if we don't clear
	 * all the EIR bits. Otherwise the edge triggered
	 * IIR on i965/g4x wouldn't notice that an interrupt
	 * is still pending. Also some EIR bits can't be
	 * cleared except by handling the underlying error
	 * (or by a GPU reset) so we mask any bit that
	 * remains set.
	 */
	emr = I915_READ(EMR);
	I915_WRITE(EMR, 0xffffffff);
	I915_WRITE(EMR, emr | *eir_stuck);
}

static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
				   u32 eir, u32 eir_stuck)
{
	DRM_DEBUG("Master Error, EIR 0x%08x\n", eir);

	if (eir_stuck)
3647 3648
		drm_dbg(&dev_priv->drm, "EIR stuck: 0x%08x, masked\n",
			eir_stuck);
3649 3650
}

3651
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
C
Chris Wilson 已提交
3652
{
3653
	struct drm_i915_private *dev_priv = arg;
3654
	irqreturn_t ret = IRQ_NONE;
C
Chris Wilson 已提交
3655

3656 3657 3658
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

3659
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
3660
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3661

3662
	do {
3663
		u32 pipe_stats[I915_MAX_PIPES] = {};
3664
		u16 eir = 0, eir_stuck = 0;
3665
		u16 iir;
3666

3667
		iir = intel_uncore_read16(&dev_priv->uncore, GEN2_IIR);
3668 3669 3670 3671
		if (iir == 0)
			break;

		ret = IRQ_HANDLED;
C
Chris Wilson 已提交
3672

3673 3674 3675
		/* Call regardless, as some status bits might not be
		 * signalled in iir */
		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
C
Chris Wilson 已提交
3676

3677 3678 3679
		if (iir & I915_MASTER_ERROR_INTERRUPT)
			i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);

3680
		intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir);
C
Chris Wilson 已提交
3681 3682

		if (iir & I915_USER_INTERRUPT)
3683
			intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]);
C
Chris Wilson 已提交
3684

3685 3686
		if (iir & I915_MASTER_ERROR_INTERRUPT)
			i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
C
Chris Wilson 已提交
3687

3688 3689
		i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
	} while (0);
3690

3691
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
C
Chris Wilson 已提交
3692

3693
	return ret;
C
Chris Wilson 已提交
3694 3695
}

3696
static void i915_irq_reset(struct drm_i915_private *dev_priv)
3697
{
3698
	struct intel_uncore *uncore = &dev_priv->uncore;
3699

3700
	if (I915_HAS_HOTPLUG(dev_priv)) {
3701
		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3702 3703 3704
		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
	}

3705 3706
	i9xx_pipestat_irq_reset(dev_priv);

3707
	GEN3_IRQ_RESET(uncore, GEN2_);
3708 3709
}

3710
static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
3711
{
3712
	struct intel_uncore *uncore = &dev_priv->uncore;
3713
	u32 enable_mask;
3714

3715 3716
	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE |
			  I915_ERROR_MEMORY_REFRESH));
3717 3718 3719 3720 3721

	/* Unmask the interrupts that we always want on. */
	dev_priv->irq_mask =
		~(I915_ASLE_INTERRUPT |
		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3722 3723
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
		  I915_MASTER_ERROR_INTERRUPT);
3724 3725 3726 3727 3728

	enable_mask =
		I915_ASLE_INTERRUPT |
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3729
		I915_MASTER_ERROR_INTERRUPT |
3730 3731
		I915_USER_INTERRUPT;

3732
	if (I915_HAS_HOTPLUG(dev_priv)) {
3733 3734 3735 3736 3737 3738
		/* Enable in IER... */
		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
		/* and unmask in IMR */
		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
	}

3739
	GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
3740

3741 3742
	/* Interrupt setup is already guaranteed to be single-threaded, this is
	 * just to make the assert_spin_locked check happy. */
3743
	spin_lock_irq(&dev_priv->irq_lock);
3744 3745
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3746
	spin_unlock_irq(&dev_priv->irq_lock);
3747

3748
	i915_enable_asle_pipestat(dev_priv);
3749 3750
}

3751
static irqreturn_t i915_irq_handler(int irq, void *arg)
3752
{
3753
	struct drm_i915_private *dev_priv = arg;
3754
	irqreturn_t ret = IRQ_NONE;
3755

3756 3757 3758
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

3759
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
3760
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3761

3762
	do {
3763
		u32 pipe_stats[I915_MAX_PIPES] = {};
3764
		u32 eir = 0, eir_stuck = 0;
3765 3766
		u32 hotplug_status = 0;
		u32 iir;
3767

3768
		iir = I915_READ(GEN2_IIR);
3769 3770 3771 3772 3773 3774 3775 3776
		if (iir == 0)
			break;

		ret = IRQ_HANDLED;

		if (I915_HAS_HOTPLUG(dev_priv) &&
		    iir & I915_DISPLAY_PORT_INTERRUPT)
			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
3777

3778 3779 3780
		/* Call regardless, as some status bits might not be
		 * signalled in iir */
		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
3781

3782 3783 3784
		if (iir & I915_MASTER_ERROR_INTERRUPT)
			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);

3785
		I915_WRITE(GEN2_IIR, iir);
3786 3787

		if (iir & I915_USER_INTERRUPT)
3788
			intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]);
3789

3790 3791
		if (iir & I915_MASTER_ERROR_INTERRUPT)
			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
3792

3793 3794 3795 3796 3797
		if (hotplug_status)
			i9xx_hpd_irq_handler(dev_priv, hotplug_status);

		i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
	} while (0);
3798

3799
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3800

3801 3802 3803
	return ret;
}

3804
static void i965_irq_reset(struct drm_i915_private *dev_priv)
3805
{
3806
	struct intel_uncore *uncore = &dev_priv->uncore;
3807

3808
	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3809
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3810

3811 3812
	i9xx_pipestat_irq_reset(dev_priv);

3813
	GEN3_IRQ_RESET(uncore, GEN2_);
3814 3815
}

3816
static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
3817
{
3818
	struct intel_uncore *uncore = &dev_priv->uncore;
3819
	u32 enable_mask;
3820 3821
	u32 error_mask;

3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836
	/*
	 * Enable some error detection, note the instruction error mask
	 * bit is reserved, so we leave it masked.
	 */
	if (IS_G4X(dev_priv)) {
		error_mask = ~(GM45_ERROR_PAGE_TABLE |
			       GM45_ERROR_MEM_PRIV |
			       GM45_ERROR_CP_PRIV |
			       I915_ERROR_MEMORY_REFRESH);
	} else {
		error_mask = ~(I915_ERROR_PAGE_TABLE |
			       I915_ERROR_MEMORY_REFRESH);
	}
	I915_WRITE(EMR, error_mask);

3837
	/* Unmask the interrupts that we always want on. */
3838 3839 3840 3841 3842
	dev_priv->irq_mask =
		~(I915_ASLE_INTERRUPT |
		  I915_DISPLAY_PORT_INTERRUPT |
		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3843
		  I915_MASTER_ERROR_INTERRUPT);
3844

3845 3846 3847 3848 3849
	enable_mask =
		I915_ASLE_INTERRUPT |
		I915_DISPLAY_PORT_INTERRUPT |
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3850
		I915_MASTER_ERROR_INTERRUPT |
3851
		I915_USER_INTERRUPT;
3852

3853
	if (IS_G4X(dev_priv))
3854
		enable_mask |= I915_BSD_USER_INTERRUPT;
3855

3856
	GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
3857

3858 3859
	/* Interrupt setup is already guaranteed to be single-threaded, this is
	 * just to make the assert_spin_locked check happy. */
3860
	spin_lock_irq(&dev_priv->irq_lock);
3861 3862 3863
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3864
	spin_unlock_irq(&dev_priv->irq_lock);
3865

3866
	i915_enable_asle_pipestat(dev_priv);
3867 3868
}

3869
static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
3870 3871 3872
{
	u32 hotplug_en;

3873
	lockdep_assert_held(&dev_priv->irq_lock);
3874

3875 3876
	/* Note HDMI and DP share hotplug bits */
	/* enable bits are the same for all generations */
3877
	hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
3878 3879 3880 3881
	/* Programming the CRT detection parameters tends
	   to generate a spurious hotplug event about three
	   seconds later.  So just do it once.
	*/
3882
	if (IS_G4X(dev_priv))
3883 3884 3885 3886
		hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
	hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;

	/* Ignore TV since it's buggy */
3887
	i915_hotplug_interrupt_update_locked(dev_priv,
3888 3889 3890 3891
					     HOTPLUG_INT_EN_MASK |
					     CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
					     CRT_HOTPLUG_ACTIVATION_PERIOD_64,
					     hotplug_en);
3892 3893
}

3894
static irqreturn_t i965_irq_handler(int irq, void *arg)
3895
{
3896
	struct drm_i915_private *dev_priv = arg;
3897
	irqreturn_t ret = IRQ_NONE;
3898

3899 3900 3901
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

3902
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
3903
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3904

3905
	do {
3906
		u32 pipe_stats[I915_MAX_PIPES] = {};
3907
		u32 eir = 0, eir_stuck = 0;
3908 3909
		u32 hotplug_status = 0;
		u32 iir;
3910

3911
		iir = I915_READ(GEN2_IIR);
3912
		if (iir == 0)
3913 3914 3915 3916
			break;

		ret = IRQ_HANDLED;

3917 3918 3919 3920 3921 3922
		if (iir & I915_DISPLAY_PORT_INTERRUPT)
			hotplug_status = i9xx_hpd_irq_ack(dev_priv);

		/* Call regardless, as some status bits might not be
		 * signalled in iir */
		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
3923

3924 3925 3926
		if (iir & I915_MASTER_ERROR_INTERRUPT)
			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);

3927
		I915_WRITE(GEN2_IIR, iir);
3928 3929

		if (iir & I915_USER_INTERRUPT)
3930
			intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]);
3931

3932
		if (iir & I915_BSD_USER_INTERRUPT)
3933
			intel_engine_signal_breadcrumbs(dev_priv->gt.engine[VCS0]);
3934

3935 3936
		if (iir & I915_MASTER_ERROR_INTERRUPT)
			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
3937

3938 3939 3940 3941 3942
		if (hotplug_status)
			i9xx_hpd_irq_handler(dev_priv, hotplug_status);

		i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
	} while (0);
3943

3944
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3945

3946 3947 3948
	return ret;
}

3949 3950 3951 3952 3953 3954 3955
/**
 * intel_irq_init - initializes irq support
 * @dev_priv: i915 device instance
 *
 * This function initializes all the irq support including work items, timers
 * and all the vtables. It does not setup the interrupt itself though.
 */
3956
void intel_irq_init(struct drm_i915_private *dev_priv)
3957
{
3958
	struct drm_device *dev = &dev_priv->drm;
3959
	int i;
3960

3961 3962
	intel_hpd_init_pins(dev_priv);

3963 3964
	intel_hpd_init_work(dev_priv);

3965
	INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work);
3966 3967
	for (i = 0; i < MAX_L3_SLICES; ++i)
		dev_priv->l3_parity.remap_info[i] = NULL;
3968

3969
	/* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */
3970
	if (HAS_GT_UC(dev_priv) && INTEL_GEN(dev_priv) < 11)
3971
		dev_priv->gt.pm_guc_events = GUC_INTR_GUC2HOST << 16;
3972

3973
	dev->vblank_disable_immediate = true;
3974

3975 3976 3977 3978 3979 3980 3981 3982 3983 3984
	/* Most platforms treat the display irq block as an always-on
	 * power domain. vlv/chv can disable it at runtime and need
	 * special care to avoid writing any of the display block registers
	 * outside of the power domain. We defer setting up the display irqs
	 * in this case to the runtime pm.
	 */
	dev_priv->display_irqs_enabled = true;
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
		dev_priv->display_irqs_enabled = false;

L
Lyude 已提交
3985
	dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
3986 3987 3988 3989 3990 3991 3992
	/* If we have MST support, we want to avoid doing short HPD IRQ storm
	 * detection, as short HPD storms will occur as a natural part of
	 * sideband messaging with MST.
	 * On older platforms however, IRQ storms can occur with both long and
	 * short pulses, as seen on some G4x systems.
	 */
	dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
L
Lyude 已提交
3993

3994 3995 3996 3997
	if (HAS_GMCH(dev_priv)) {
		if (I915_HAS_HOTPLUG(dev_priv))
			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
	} else {
M
Matt Roper 已提交
3998 3999 4000
		if (HAS_PCH_JSP(dev_priv))
			dev_priv->display.hpd_irq_setup = jsp_hpd_irq_setup;
		else if (HAS_PCH_MCC(dev_priv))
4001 4002
			dev_priv->display.hpd_irq_setup = mcc_hpd_irq_setup;
		else if (INTEL_GEN(dev_priv) >= 11)
4003 4004
			dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
		else if (IS_GEN9_LP(dev_priv))
4005
			dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
4006
		else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
4007 4008
			dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
		else
4009
			dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4010 4011
	}
}
4012

4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026
/**
 * intel_irq_fini - deinitializes IRQ support
 * @i915: i915 device instance
 *
 * This function deinitializes all the IRQ support.
 */
void intel_irq_fini(struct drm_i915_private *i915)
{
	int i;

	for (i = 0; i < MAX_L3_SLICES; ++i)
		kfree(i915->l3_parity.remap_info[i]);
}

4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040
static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
{
	if (HAS_GMCH(dev_priv)) {
		if (IS_CHERRYVIEW(dev_priv))
			return cherryview_irq_handler;
		else if (IS_VALLEYVIEW(dev_priv))
			return valleyview_irq_handler;
		else if (IS_GEN(dev_priv, 4))
			return i965_irq_handler;
		else if (IS_GEN(dev_priv, 3))
			return i915_irq_handler;
		else
			return i8xx_irq_handler;
	} else {
4041 4042
		if (HAS_MASTER_UNIT_IRQ(dev_priv))
			return dg1_irq_handler;
4043 4044 4045 4046 4047
		if (INTEL_GEN(dev_priv) >= 11)
			return gen11_irq_handler;
		else if (INTEL_GEN(dev_priv) >= 8)
			return gen8_irq_handler;
		else
4048
			return ilk_irq_handler;
4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070
	}
}

static void intel_irq_reset(struct drm_i915_private *dev_priv)
{
	if (HAS_GMCH(dev_priv)) {
		if (IS_CHERRYVIEW(dev_priv))
			cherryview_irq_reset(dev_priv);
		else if (IS_VALLEYVIEW(dev_priv))
			valleyview_irq_reset(dev_priv);
		else if (IS_GEN(dev_priv, 4))
			i965_irq_reset(dev_priv);
		else if (IS_GEN(dev_priv, 3))
			i915_irq_reset(dev_priv);
		else
			i8xx_irq_reset(dev_priv);
	} else {
		if (INTEL_GEN(dev_priv) >= 11)
			gen11_irq_reset(dev_priv);
		else if (INTEL_GEN(dev_priv) >= 8)
			gen8_irq_reset(dev_priv);
		else
4071
			ilk_irq_reset(dev_priv);
4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093
	}
}

static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
{
	if (HAS_GMCH(dev_priv)) {
		if (IS_CHERRYVIEW(dev_priv))
			cherryview_irq_postinstall(dev_priv);
		else if (IS_VALLEYVIEW(dev_priv))
			valleyview_irq_postinstall(dev_priv);
		else if (IS_GEN(dev_priv, 4))
			i965_irq_postinstall(dev_priv);
		else if (IS_GEN(dev_priv, 3))
			i915_irq_postinstall(dev_priv);
		else
			i8xx_irq_postinstall(dev_priv);
	} else {
		if (INTEL_GEN(dev_priv) >= 11)
			gen11_irq_postinstall(dev_priv);
		else if (INTEL_GEN(dev_priv) >= 8)
			gen8_irq_postinstall(dev_priv);
		else
4094
			ilk_irq_postinstall(dev_priv);
4095 4096 4097
	}
}

4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108
/**
 * intel_irq_install - enables the hardware interrupt
 * @dev_priv: i915 device instance
 *
 * This function enables the hardware interrupt handling, but leaves the hotplug
 * handling still disabled. It is called after intel_irq_init().
 *
 * In the driver load and resume code we need working interrupts in a few places
 * but don't want to deal with the hassle of concurrent probe and hotplug
 * workers. Hence the split into this two-stage approach.
 */
4109 4110
int intel_irq_install(struct drm_i915_private *dev_priv)
{
4111 4112 4113
	int irq = dev_priv->drm.pdev->irq;
	int ret;

4114 4115 4116 4117 4118
	/*
	 * We enable some interrupt sources in our postinstall hooks, so mark
	 * interrupts as enabled _before_ actually enabling them to avoid
	 * special cases in our ordering checks.
	 */
4119
	dev_priv->runtime_pm.irqs_enabled = true;
4120

4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134
	dev_priv->drm.irq_enabled = true;

	intel_irq_reset(dev_priv);

	ret = request_irq(irq, intel_irq_handler(dev_priv),
			  IRQF_SHARED, DRIVER_NAME, dev_priv);
	if (ret < 0) {
		dev_priv->drm.irq_enabled = false;
		return ret;
	}

	intel_irq_postinstall(dev_priv);

	return ret;
4135 4136
}

4137 4138 4139 4140 4141 4142 4143
/**
 * intel_irq_uninstall - finilizes all irq handling
 * @dev_priv: i915 device instance
 *
 * This stops interrupt and hotplug handling and unregisters and frees all
 * resources acquired in the init functions.
 */
4144 4145
void intel_irq_uninstall(struct drm_i915_private *dev_priv)
{
4146 4147 4148
	int irq = dev_priv->drm.pdev->irq;

	/*
4149 4150 4151 4152
	 * FIXME we can get called twice during driver probe
	 * error handling as well as during driver remove due to
	 * intel_modeset_driver_remove() calling us out of sequence.
	 * Would be nice if it didn't do that...
4153 4154 4155 4156 4157 4158 4159 4160 4161 4162
	 */
	if (!dev_priv->drm.irq_enabled)
		return;

	dev_priv->drm.irq_enabled = false;

	intel_irq_reset(dev_priv);

	free_irq(irq, dev_priv);

4163
	intel_hpd_cancel_work(dev_priv);
4164
	dev_priv->runtime_pm.irqs_enabled = false;
4165 4166
}

4167 4168 4169 4170 4171 4172 4173
/**
 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
 * @dev_priv: i915 device instance
 *
 * This function is used to disable interrupts at runtime, both in the runtime
 * pm and the system suspend/resume code.
 */
4174
void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4175
{
4176
	intel_irq_reset(dev_priv);
4177
	dev_priv->runtime_pm.irqs_enabled = false;
4178
	intel_synchronize_irq(dev_priv);
4179 4180
}

4181 4182 4183 4184 4185 4186 4187
/**
 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
 * @dev_priv: i915 device instance
 *
 * This function is used to enable interrupts at runtime, both in the runtime
 * pm and the system suspend/resume code.
 */
4188
void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4189
{
4190
	dev_priv->runtime_pm.irqs_enabled = true;
4191 4192
	intel_irq_reset(dev_priv);
	intel_irq_postinstall(dev_priv);
4193
}
4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207

bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
{
	/*
	 * We only use drm_irq_uninstall() at unload and VT switch, so
	 * this is the only thing we need to check.
	 */
	return dev_priv->runtime_pm.irqs_enabled;
}

void intel_synchronize_irq(struct drm_i915_private *i915)
{
	synchronize_irq(i915->drm.pdev->irq);
}