i915_irq.c 124.2 KB
Newer Older
D
Dave Airlie 已提交
1
/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
L
Linus Torvalds 已提交
2
 */
D
Dave Airlie 已提交
3
/*
L
Linus Torvalds 已提交
4 5
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
 * All Rights Reserved.
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
D
Dave Airlie 已提交
27
 */
L
Linus Torvalds 已提交
28

29 30
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

31
#include <linux/circ_buf.h>
32 33 34
#include <linux/slab.h>
#include <linux/sysrq.h>

35
#include <drm/drm_drv.h>
36
#include <drm/drm_irq.h>
37
#include <drm/i915_drm.h>
38

39
#include "display/intel_display_types.h"
40 41 42 43 44
#include "display/intel_fifo_underrun.h"
#include "display/intel_hotplug.h"
#include "display/intel_lpe_audio.h"
#include "display/intel_psr.h"

45
#include "gt/intel_gt.h"
46
#include "gt/intel_gt_irq.h"
47
#include "gt/intel_gt_pm_irq.h"
48

L
Linus Torvalds 已提交
49
#include "i915_drv.h"
50
#include "i915_irq.h"
C
Chris Wilson 已提交
51
#include "i915_trace.h"
52
#include "intel_pm.h"
L
Linus Torvalds 已提交
53

54 55 56 57 58 59 60 61
/**
 * DOC: interrupt handling
 *
 * These functions provide the basic support for enabling and disabling the
 * interrupt handling support. There's a lot more functionality in i915_irq.c
 * and related files, but that will be described in separate chapters.
 */

62 63
typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val);

64 65 66 67
static const u32 hpd_ilk[HPD_NUM_PINS] = {
	[HPD_PORT_A] = DE_DP_A_HOTPLUG,
};

68 69 70 71
static const u32 hpd_ivb[HPD_NUM_PINS] = {
	[HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
};

72 73 74 75
static const u32 hpd_bdw[HPD_NUM_PINS] = {
	[HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
};

76
static const u32 hpd_ibx[HPD_NUM_PINS] = {
77 78 79 80 81 82 83
	[HPD_CRT] = SDE_CRT_HOTPLUG,
	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG
};

84
static const u32 hpd_cpt[HPD_NUM_PINS] = {
85
	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
86
	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
87 88 89 90 91
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
};

X
Xiong Zhang 已提交
92
static const u32 hpd_spt[HPD_NUM_PINS] = {
93
	[HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
X
Xiong Zhang 已提交
94 95 96 97 98 99
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
	[HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
};

100
static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
101 102 103 104 105 106 107 108
	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
};

109
static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
110 111 112 113 114 115 116 117
	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
};

118
static const u32 hpd_status_i915[HPD_NUM_PINS] = {
119 120 121 122 123 124 125 126
	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
};

127 128
/* BXT hpd list */
static const u32 hpd_bxt[HPD_NUM_PINS] = {
129
	[HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
130 131 132 133
	[HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
	[HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
};

134 135 136 137 138
static const u32 hpd_gen11[HPD_NUM_PINS] = {
	[HPD_PORT_C] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG,
	[HPD_PORT_D] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG,
	[HPD_PORT_E] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG,
	[HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG
139 140
};

141 142 143 144 145 146 147 148 149
static const u32 hpd_gen12[HPD_NUM_PINS] = {
	[HPD_PORT_D] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG,
	[HPD_PORT_E] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG,
	[HPD_PORT_F] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG,
	[HPD_PORT_G] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG,
	[HPD_PORT_H] = GEN12_TC5_HOTPLUG | GEN12_TBT5_HOTPLUG,
	[HPD_PORT_I] = GEN12_TC6_HOTPLUG | GEN12_TBT6_HOTPLUG
};

150
static const u32 hpd_icp[HPD_NUM_PINS] = {
151 152 153 154 155 156
	[HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(PORT_A),
	[HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(PORT_B),
	[HPD_PORT_C] = SDE_TC_HOTPLUG_ICP(PORT_TC1),
	[HPD_PORT_D] = SDE_TC_HOTPLUG_ICP(PORT_TC2),
	[HPD_PORT_E] = SDE_TC_HOTPLUG_ICP(PORT_TC3),
	[HPD_PORT_F] = SDE_TC_HOTPLUG_ICP(PORT_TC4),
157 158
};

159
static const u32 hpd_tgp[HPD_NUM_PINS] = {
160 161 162 163 164 165 166 167 168
	[HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(PORT_A),
	[HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(PORT_B),
	[HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(PORT_C),
	[HPD_PORT_D] = SDE_TC_HOTPLUG_ICP(PORT_TC1),
	[HPD_PORT_E] = SDE_TC_HOTPLUG_ICP(PORT_TC2),
	[HPD_PORT_F] = SDE_TC_HOTPLUG_ICP(PORT_TC3),
	[HPD_PORT_G] = SDE_TC_HOTPLUG_ICP(PORT_TC4),
	[HPD_PORT_H] = SDE_TC_HOTPLUG_ICP(PORT_TC5),
	[HPD_PORT_I] = SDE_TC_HOTPLUG_ICP(PORT_TC6),
169 170
};

171 172
void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
		    i915_reg_t iir, i915_reg_t ier)
173
{
174 175
	intel_uncore_write(uncore, imr, 0xffffffff);
	intel_uncore_posting_read(uncore, imr);
176

177
	intel_uncore_write(uncore, ier, 0);
178 179

	/* IIR can theoretically queue up two events. Be paranoid. */
180 181 182 183
	intel_uncore_write(uncore, iir, 0xffffffff);
	intel_uncore_posting_read(uncore, iir);
	intel_uncore_write(uncore, iir, 0xffffffff);
	intel_uncore_posting_read(uncore, iir);
184 185
}

186
void gen2_irq_reset(struct intel_uncore *uncore)
187
{
188 189
	intel_uncore_write16(uncore, GEN2_IMR, 0xffff);
	intel_uncore_posting_read16(uncore, GEN2_IMR);
190

191
	intel_uncore_write16(uncore, GEN2_IER, 0);
192 193

	/* IIR can theoretically queue up two events. Be paranoid. */
194 195 196 197
	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
	intel_uncore_posting_read16(uncore, GEN2_IIR);
	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
	intel_uncore_posting_read16(uncore, GEN2_IIR);
198 199
}

200 201 202
/*
 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
 */
203
static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
204
{
205
	u32 val = intel_uncore_read(uncore, reg);
206 207 208 209 210

	if (val == 0)
		return;

	WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
211
	     i915_mmio_reg_offset(reg), val);
212 213 214 215
	intel_uncore_write(uncore, reg, 0xffffffff);
	intel_uncore_posting_read(uncore, reg);
	intel_uncore_write(uncore, reg, 0xffffffff);
	intel_uncore_posting_read(uncore, reg);
216
}
217

218
static void gen2_assert_iir_is_zero(struct intel_uncore *uncore)
219
{
220
	u16 val = intel_uncore_read16(uncore, GEN2_IIR);
221 222 223 224 225

	if (val == 0)
		return;

	WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
226
	     i915_mmio_reg_offset(GEN2_IIR), val);
227 228 229 230
	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
	intel_uncore_posting_read16(uncore, GEN2_IIR);
	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
	intel_uncore_posting_read16(uncore, GEN2_IIR);
231 232
}

233 234 235 236
void gen3_irq_init(struct intel_uncore *uncore,
		   i915_reg_t imr, u32 imr_val,
		   i915_reg_t ier, u32 ier_val,
		   i915_reg_t iir)
237
{
238
	gen3_assert_iir_is_zero(uncore, iir);
239

240 241 242
	intel_uncore_write(uncore, ier, ier_val);
	intel_uncore_write(uncore, imr, imr_val);
	intel_uncore_posting_read(uncore, imr);
243 244
}

245 246
void gen2_irq_init(struct intel_uncore *uncore,
		   u32 imr_val, u32 ier_val)
247
{
248
	gen2_assert_iir_is_zero(uncore);
249

250 251 252
	intel_uncore_write16(uncore, GEN2_IER, ier_val);
	intel_uncore_write16(uncore, GEN2_IMR, imr_val);
	intel_uncore_posting_read16(uncore, GEN2_IMR);
253 254
}

255 256 257
/* For display hotplug interrupt */
static inline void
i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
258 259
				     u32 mask,
				     u32 bits)
260
{
261
	u32 val;
262

263
	lockdep_assert_held(&dev_priv->irq_lock);
264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284
	WARN_ON(bits & ~mask);

	val = I915_READ(PORT_HOTPLUG_EN);
	val &= ~mask;
	val |= bits;
	I915_WRITE(PORT_HOTPLUG_EN, val);
}

/**
 * i915_hotplug_interrupt_update - update hotplug interrupt enable
 * @dev_priv: driver private
 * @mask: bits to update
 * @bits: bits to enable
 * NOTE: the HPD enable bits are modified both inside and outside
 * of an interrupt context. To avoid that read-modify-write cycles
 * interfer, these bits are protected by a spinlock. Since this
 * function is usually not called from a context where the lock is
 * held already, this function acquires the lock itself. A non-locking
 * version is also available.
 */
void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
285 286
				   u32 mask,
				   u32 bits)
287 288 289 290 291 292
{
	spin_lock_irq(&dev_priv->irq_lock);
	i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
	spin_unlock_irq(&dev_priv->irq_lock);
}

293 294 295 296 297 298
/**
 * ilk_update_display_irq - update DEIMR
 * @dev_priv: driver private
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
299
void ilk_update_display_irq(struct drm_i915_private *dev_priv,
300 301
			    u32 interrupt_mask,
			    u32 enabled_irq_mask)
302
{
303
	u32 new_val;
304

305
	lockdep_assert_held(&dev_priv->irq_lock);
306

307 308
	WARN_ON(enabled_irq_mask & ~interrupt_mask);

309
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
310 311
		return;

312 313 314 315 316 317
	new_val = dev_priv->irq_mask;
	new_val &= ~interrupt_mask;
	new_val |= (~enabled_irq_mask & interrupt_mask);

	if (new_val != dev_priv->irq_mask) {
		dev_priv->irq_mask = new_val;
318
		I915_WRITE(DEIMR, dev_priv->irq_mask);
319
		POSTING_READ(DEIMR);
320 321 322
	}
}

323
static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
324
{
325 326
	WARN_ON_ONCE(INTEL_GEN(dev_priv) >= 11);

327
	return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
328 329
}

330 331
void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv)
{
332
	struct intel_gt *gt = &dev_priv->gt;
333

334 335
	spin_lock_irq(&gt->irq_lock);

336
	while (gen11_gt_reset_one_iir(gt, 0, GEN11_GTPM))
337
		;
338 339 340

	dev_priv->gt_pm.rps.pm_iir = 0;

341
	spin_unlock_irq(&gt->irq_lock);
342 343
}

344 345
void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
{
346 347 348 349
	struct intel_gt *gt = &dev_priv->gt;

	spin_lock_irq(&gt->irq_lock);
	gen6_gt_pm_reset_iir(gt, GEN6_PM_RPS_EVENTS);
350
	dev_priv->gt_pm.rps.pm_iir = 0;
351
	spin_unlock_irq(&gt->irq_lock);
I
Imre Deak 已提交
352 353
}

354
void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
355
{
356
	struct intel_gt *gt = &dev_priv->gt;
357 358 359
	struct intel_rps *rps = &dev_priv->gt_pm.rps;

	if (READ_ONCE(rps->interrupts_enabled))
360 361
		return;

362
	spin_lock_irq(&gt->irq_lock);
363
	WARN_ON_ONCE(rps->pm_iir);
364

365
	if (INTEL_GEN(dev_priv) >= 11)
366
		WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GTPM));
367 368
	else
		WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
369

370
	rps->interrupts_enabled = true;
371
	gen6_gt_pm_enable_irq(gt, dev_priv->pm_rps_events);
372

373
	spin_unlock_irq(&gt->irq_lock);
374 375
}

376 377 378 379 380
u32 gen6_sanitize_rps_pm_mask(const struct drm_i915_private *i915, u32 mask)
{
	return mask & ~i915->gt_pm.rps.pm_intrmsk_mbz;
}

381
void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
382
{
383
	struct intel_rps *rps = &dev_priv->gt_pm.rps;
384
	struct intel_gt *gt = &dev_priv->gt;
385 386

	if (!READ_ONCE(rps->interrupts_enabled))
387 388
		return;

389
	spin_lock_irq(&gt->irq_lock);
390
	rps->interrupts_enabled = false;
391

392
	I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
393

394
	gen6_gt_pm_disable_irq(gt, GEN6_PM_RPS_EVENTS);
395

396
	spin_unlock_irq(&gt->irq_lock);
397
	intel_synchronize_irq(dev_priv);
398 399

	/* Now that we will not be generating any more work, flush any
400
	 * outstanding tasks. As we are called on the RPS idle path,
401 402 403
	 * we will reset the GPU to minimum frequencies, so the current
	 * state of the worker can be discarded.
	 */
404
	cancel_work_sync(&rps->work);
405 406 407 408
	if (INTEL_GEN(dev_priv) >= 11)
		gen11_reset_rps_interrupts(dev_priv);
	else
		gen6_reset_rps_interrupts(dev_priv);
409 410
}

411
void gen9_reset_guc_interrupts(struct intel_guc *guc)
412
{
413
	struct intel_gt *gt = guc_to_gt(guc);
414

415
	assert_rpm_wakelock_held(gt->uncore->rpm);
416

417 418 419
	spin_lock_irq(&gt->irq_lock);
	gen6_gt_pm_reset_iir(gt, gt->pm_guc_events);
	spin_unlock_irq(&gt->irq_lock);
420 421
}

422
void gen9_enable_guc_interrupts(struct intel_guc *guc)
423
{
424
	struct intel_gt *gt = guc_to_gt(guc);
425

426
	assert_rpm_wakelock_held(gt->uncore->rpm);
427

428
	spin_lock_irq(&gt->irq_lock);
429
	if (!guc->interrupts.enabled) {
430 431
		WARN_ON_ONCE(intel_uncore_read(gt->uncore,
					       gen6_pm_iir(gt->i915)) &
432
			     gt->pm_guc_events);
433
		guc->interrupts.enabled = true;
434
		gen6_gt_pm_enable_irq(gt, gt->pm_guc_events);
435
	}
436
	spin_unlock_irq(&gt->irq_lock);
437 438
}

439
void gen9_disable_guc_interrupts(struct intel_guc *guc)
440
{
441
	struct intel_gt *gt = guc_to_gt(guc);
442

443
	assert_rpm_wakelock_held(gt->uncore->rpm);
444

445
	spin_lock_irq(&gt->irq_lock);
446
	guc->interrupts.enabled = false;
447

448
	gen6_gt_pm_disable_irq(gt, gt->pm_guc_events);
449

450 451
	spin_unlock_irq(&gt->irq_lock);
	intel_synchronize_irq(gt->i915);
452

453
	gen9_reset_guc_interrupts(guc);
454 455
}

456
void gen11_reset_guc_interrupts(struct intel_guc *guc)
457
{
458
	struct intel_gt *gt = guc_to_gt(guc);
459

460
	spin_lock_irq(&gt->irq_lock);
461
	gen11_gt_reset_one_iir(gt, 0, GEN11_GUC);
462
	spin_unlock_irq(&gt->irq_lock);
463 464
}

465
void gen11_enable_guc_interrupts(struct intel_guc *guc)
466
{
467
	struct intel_gt *gt = guc_to_gt(guc);
468

469
	spin_lock_irq(&gt->irq_lock);
470
	if (!guc->interrupts.enabled) {
471
		u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST);
472

473
		WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GUC));
474 475
		intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, events);
		intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~events);
476
		guc->interrupts.enabled = true;
477
	}
478
	spin_unlock_irq(&gt->irq_lock);
479 480
}

481
void gen11_disable_guc_interrupts(struct intel_guc *guc)
482
{
483
	struct intel_gt *gt = guc_to_gt(guc);
484

485
	spin_lock_irq(&gt->irq_lock);
486
	guc->interrupts.enabled = false;
487

488 489
	intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~0);
	intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
490

491 492
	spin_unlock_irq(&gt->irq_lock);
	intel_synchronize_irq(gt->i915);
493

494
	gen11_reset_guc_interrupts(guc);
495 496
}

497
/**
498 499 500 501 502
 * bdw_update_port_irq - update DE port interrupt
 * @dev_priv: driver private
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
503
static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
504 505
				u32 interrupt_mask,
				u32 enabled_irq_mask)
506
{
507 508
	u32 new_val;
	u32 old_val;
509

510
	lockdep_assert_held(&dev_priv->irq_lock);
511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528

	WARN_ON(enabled_irq_mask & ~interrupt_mask);

	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
		return;

	old_val = I915_READ(GEN8_DE_PORT_IMR);

	new_val = old_val;
	new_val &= ~interrupt_mask;
	new_val |= (~enabled_irq_mask & interrupt_mask);

	if (new_val != old_val) {
		I915_WRITE(GEN8_DE_PORT_IMR, new_val);
		POSTING_READ(GEN8_DE_PORT_IMR);
	}
}

529 530 531 532 533 534 535 536 537
/**
 * bdw_update_pipe_irq - update DE pipe interrupt
 * @dev_priv: driver private
 * @pipe: pipe whose interrupt to update
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
			 enum pipe pipe,
538 539
			 u32 interrupt_mask,
			 u32 enabled_irq_mask)
540
{
541
	u32 new_val;
542

543
	lockdep_assert_held(&dev_priv->irq_lock);
544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560

	WARN_ON(enabled_irq_mask & ~interrupt_mask);

	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
		return;

	new_val = dev_priv->de_irq_mask[pipe];
	new_val &= ~interrupt_mask;
	new_val |= (~enabled_irq_mask & interrupt_mask);

	if (new_val != dev_priv->de_irq_mask[pipe]) {
		dev_priv->de_irq_mask[pipe] = new_val;
		I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
		POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
	}
}

561 562 563 564 565 566
/**
 * ibx_display_interrupt_update - update SDEIMR
 * @dev_priv: driver private
 * @interrupt_mask: mask of interrupt bits to update
 * @enabled_irq_mask: mask of interrupt bits to enable
 */
567
void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
568 569
				  u32 interrupt_mask,
				  u32 enabled_irq_mask)
570
{
571
	u32 sdeimr = I915_READ(SDEIMR);
572 573 574
	sdeimr &= ~interrupt_mask;
	sdeimr |= (~enabled_irq_mask & interrupt_mask);

575 576
	WARN_ON(enabled_irq_mask & ~interrupt_mask);

577
	lockdep_assert_held(&dev_priv->irq_lock);
578

579
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
580 581
		return;

582 583 584
	I915_WRITE(SDEIMR, sdeimr);
	POSTING_READ(SDEIMR);
}
585

586 587
u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
			      enum pipe pipe)
588
{
589 590
	u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
	u32 enable_mask = status_mask << 16;
591

592
	lockdep_assert_held(&dev_priv->irq_lock);
593

594 595
	if (INTEL_GEN(dev_priv) < 5)
		goto out;
596 597

	/*
598 599
	 * On pipe A we don't support the PSR interrupt yet,
	 * on pipe B and C the same bit MBZ.
600 601 602
	 */
	if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
		return 0;
603 604 605 606 607 608
	/*
	 * On pipe B and C we don't support the PSR interrupt yet, on pipe
	 * A the same bit is for perf counters which we don't use either.
	 */
	if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
		return 0;
609 610 611 612 613 614 615 616 617

	enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
			 SPRITE0_FLIP_DONE_INT_EN_VLV |
			 SPRITE1_FLIP_DONE_INT_EN_VLV);
	if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
		enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
	if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
		enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;

618 619 620 621 622 623
out:
	WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
		  status_mask & ~PIPESTAT_INT_STATUS_MASK,
		  "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
		  pipe_name(pipe), enable_mask, status_mask);

624 625 626
	return enable_mask;
}

627 628
void i915_enable_pipestat(struct drm_i915_private *dev_priv,
			  enum pipe pipe, u32 status_mask)
629
{
630
	i915_reg_t reg = PIPESTAT(pipe);
631 632
	u32 enable_mask;

633 634 635 636 637 638 639 640 641 642 643 644 645 646 647
	WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
		  "pipe %c: status_mask=0x%x\n",
		  pipe_name(pipe), status_mask);

	lockdep_assert_held(&dev_priv->irq_lock);
	WARN_ON(!intel_irqs_enabled(dev_priv));

	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
		return;

	dev_priv->pipestat_irq_mask[pipe] |= status_mask;
	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);

	I915_WRITE(reg, enable_mask | status_mask);
	POSTING_READ(reg);
648 649
}

650 651
void i915_disable_pipestat(struct drm_i915_private *dev_priv,
			   enum pipe pipe, u32 status_mask)
652
{
653
	i915_reg_t reg = PIPESTAT(pipe);
654 655
	u32 enable_mask;

656 657 658 659 660 661 662 663 664 665 666 667 668 669 670
	WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
		  "pipe %c: status_mask=0x%x\n",
		  pipe_name(pipe), status_mask);

	lockdep_assert_held(&dev_priv->irq_lock);
	WARN_ON(!intel_irqs_enabled(dev_priv));

	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
		return;

	dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);

	I915_WRITE(reg, enable_mask | status_mask);
	POSTING_READ(reg);
671 672
}

673 674 675 676 677 678 679 680
static bool i915_has_asle(struct drm_i915_private *dev_priv)
{
	if (!dev_priv->opregion.asle)
		return false;

	return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
}

681
/**
682
 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
683
 * @dev_priv: i915 device private
684
 */
685
static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
686
{
687
	if (!i915_has_asle(dev_priv))
688 689
		return;

690
	spin_lock_irq(&dev_priv->irq_lock);
691

692
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
693
	if (INTEL_GEN(dev_priv) >= 4)
694
		i915_enable_pipestat(dev_priv, PIPE_A,
695
				     PIPE_LEGACY_BLC_EVENT_STATUS);
696

697
	spin_unlock_irq(&dev_priv->irq_lock);
698 699
}

700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749
/*
 * This timing diagram depicts the video signal in and
 * around the vertical blanking period.
 *
 * Assumptions about the fictitious mode used in this example:
 *  vblank_start >= 3
 *  vsync_start = vblank_start + 1
 *  vsync_end = vblank_start + 2
 *  vtotal = vblank_start + 3
 *
 *           start of vblank:
 *           latch double buffered registers
 *           increment frame counter (ctg+)
 *           generate start of vblank interrupt (gen4+)
 *           |
 *           |          frame start:
 *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
 *           |          may be shifted forward 1-3 extra lines via PIPECONF
 *           |          |
 *           |          |  start of vsync:
 *           |          |  generate vsync interrupt
 *           |          |  |
 * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
 *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
 * ----va---> <-----------------vb--------------------> <--------va-------------
 *       |          |       <----vs----->                     |
 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
 *       |          |                                         |
 *       last visible pixel                                   first visible pixel
 *                  |                                         increment frame counter (gen3/4)
 *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
 *
 * x  = horizontal active
 * _  = horizontal blanking
 * hs = horizontal sync
 * va = vertical active
 * vb = vertical blanking
 * vs = vertical sync
 * vbs = vblank_start (number)
 *
 * Summary:
 * - most events happen at the start of horizontal sync
 * - frame start happens at the start of horizontal blank, 1-4 lines
 *   (depending on PIPECONF settings) after the start of vblank
 * - gen3/4 pixel and frame counter are synchronized with the start
 *   of horizontal active on the first line of vertical active
 */

750 751 752
/* Called from drm generic code, passed a 'crtc', which
 * we use as a pipe index
 */
753
u32 i915_get_vblank_counter(struct drm_crtc *crtc)
754
{
755 756
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
757
	const struct drm_display_mode *mode = &vblank->hwmode;
758
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
759
	i915_reg_t high_frame, low_frame;
760
	u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
761
	unsigned long irqflags;
762

763 764 765 766 767 768 769 770 771 772 773 774 775 776
	/*
	 * On i965gm TV output the frame counter only works up to
	 * the point when we enable the TV encoder. After that the
	 * frame counter ceases to work and reads zero. We need a
	 * vblank wait before enabling the TV encoder and so we
	 * have to enable vblank interrupts while the frame counter
	 * is still in a working state. However the core vblank code
	 * does not like us returning non-zero frame counter values
	 * when we've told it that we don't have a working frame
	 * counter. Thus we must stop non-zero values leaking out.
	 */
	if (!vblank->max_vblank_count)
		return 0;

777 778 779 780 781
	htotal = mode->crtc_htotal;
	hsync_start = mode->crtc_hsync_start;
	vbl_start = mode->crtc_vblank_start;
	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
		vbl_start = DIV_ROUND_UP(vbl_start, 2);
782

783 784 785 786 787 788
	/* Convert to pixel count */
	vbl_start *= htotal;

	/* Start of vblank event occurs at start of hsync */
	vbl_start -= htotal - hsync_start;

789 790
	high_frame = PIPEFRAME(pipe);
	low_frame = PIPEFRAMEPIXEL(pipe);
791

792 793
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);

794 795 796 797 798 799
	/*
	 * High & low register fields aren't synchronized, so make sure
	 * we get a low value that's stable across two reads of the high
	 * register.
	 */
	do {
800 801 802
		high1 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
		low   = I915_READ_FW(low_frame);
		high2 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
803 804
	} while (high1 != high2);

805 806
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);

807
	high1 >>= PIPE_FRAME_HIGH_SHIFT;
808
	pixel = low & PIPE_PIXEL_MASK;
809
	low >>= PIPE_FRAME_LOW_SHIFT;
810 811 812 813 814 815

	/*
	 * The frame counter increments at beginning of active.
	 * Cook up a vblank counter by also checking the pixel
	 * counter against vblank start.
	 */
816
	return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
817 818
}

819
u32 g4x_get_vblank_counter(struct drm_crtc *crtc)
820
{
821 822
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
823

824
	return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
825 826
}

827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877
/*
 * On certain encoders on certain platforms, pipe
 * scanline register will not work to get the scanline,
 * since the timings are driven from the PORT or issues
 * with scanline register updates.
 * This function will use Framestamp and current
 * timestamp registers to calculate the scanline.
 */
static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
{
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
	struct drm_vblank_crtc *vblank =
		&crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
	const struct drm_display_mode *mode = &vblank->hwmode;
	u32 vblank_start = mode->crtc_vblank_start;
	u32 vtotal = mode->crtc_vtotal;
	u32 htotal = mode->crtc_htotal;
	u32 clock = mode->crtc_clock;
	u32 scanline, scan_prev_time, scan_curr_time, scan_post_time;

	/*
	 * To avoid the race condition where we might cross into the
	 * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR
	 * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR
	 * during the same frame.
	 */
	do {
		/*
		 * This field provides read back of the display
		 * pipe frame time stamp. The time stamp value
		 * is sampled at every start of vertical blank.
		 */
		scan_prev_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));

		/*
		 * The TIMESTAMP_CTR register has the current
		 * time stamp value.
		 */
		scan_curr_time = I915_READ_FW(IVB_TIMESTAMP_CTR);

		scan_post_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
	} while (scan_post_time != scan_prev_time);

	scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
					clock), 1000 * htotal);
	scanline = min(scanline, vtotal - 1);
	scanline = (scanline + vblank_start) % vtotal;

	return scanline;
}

878
/* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
879 880 881
static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
{
	struct drm_device *dev = crtc->base.dev;
882
	struct drm_i915_private *dev_priv = to_i915(dev);
883 884
	const struct drm_display_mode *mode;
	struct drm_vblank_crtc *vblank;
885
	enum pipe pipe = crtc->pipe;
886
	int position, vtotal;
887

888 889 890
	if (!crtc->active)
		return -1;

891 892 893
	vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
	mode = &vblank->hwmode;

894 895 896
	if (mode->private_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP)
		return __intel_get_crtc_scanline_from_timestamp(crtc);

897
	vtotal = mode->crtc_vtotal;
898 899 900
	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
		vtotal /= 2;

901
	if (IS_GEN(dev_priv, 2))
902
		position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
903
	else
904
		position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
905

906 907 908 909 910 911 912 913 914 915 916 917
	/*
	 * On HSW, the DSL reg (0x70000) appears to return 0 if we
	 * read it just before the start of vblank.  So try it again
	 * so we don't accidentally end up spanning a vblank frame
	 * increment, causing the pipe_update_end() code to squak at us.
	 *
	 * The nature of this problem means we can't simply check the ISR
	 * bit and return the vblank start value; nor can we use the scanline
	 * debug register in the transcoder as it appears to have the same
	 * problem.  We may need to extend this to include other platforms,
	 * but so far testing only shows the problem on HSW.
	 */
918
	if (HAS_DDI(dev_priv) && !position) {
919 920 921 922
		int i, temp;

		for (i = 0; i < 100; i++) {
			udelay(1);
923
			temp = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
924 925 926 927 928 929 930
			if (temp != position) {
				position = temp;
				break;
			}
		}
	}

931
	/*
932 933
	 * See update_scanline_offset() for the details on the
	 * scanline_offset adjustment.
934
	 */
935
	return (position + crtc->scanline_offset) % vtotal;
936 937
}

938
bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int index,
939 940 941
			      bool in_vblank_irq, int *vpos, int *hpos,
			      ktime_t *stime, ktime_t *etime,
			      const struct drm_display_mode *mode)
942
{
943
	struct drm_i915_private *dev_priv = to_i915(dev);
944 945
	struct intel_crtc *crtc = to_intel_crtc(drm_crtc_from_index(dev, index));
	enum pipe pipe = crtc->pipe;
946
	int position;
947
	int vbl_start, vbl_end, hsync_start, htotal, vtotal;
948
	unsigned long irqflags;
949 950 951
	bool use_scanline_counter = INTEL_GEN(dev_priv) >= 5 ||
		IS_G4X(dev_priv) || IS_GEN(dev_priv, 2) ||
		mode->private_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER;
952

953
	if (WARN_ON(!mode->crtc_clock)) {
954
		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
955
				 "pipe %c\n", pipe_name(pipe));
956
		return false;
957 958
	}

959
	htotal = mode->crtc_htotal;
960
	hsync_start = mode->crtc_hsync_start;
961 962 963
	vtotal = mode->crtc_vtotal;
	vbl_start = mode->crtc_vblank_start;
	vbl_end = mode->crtc_vblank_end;
964

965 966 967 968 969 970
	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
		vbl_start = DIV_ROUND_UP(vbl_start, 2);
		vbl_end /= 2;
		vtotal /= 2;
	}

971 972 973 974 975 976
	/*
	 * Lock uncore.lock, as we will do multiple timing critical raw
	 * register reads, potentially with preemption disabled, so the
	 * following code must not block on uncore.lock.
	 */
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
977

978 979 980 981 982 983
	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */

	/* Get optional system timestamp before query. */
	if (stime)
		*stime = ktime_get();

984
	if (use_scanline_counter) {
985 986 987
		/* No obvious pixelcount register. Only query vertical
		 * scanout position from Display scan line register.
		 */
988
		position = __intel_get_crtc_scanline(crtc);
989 990 991 992 993
	} else {
		/* Have access to pixelcount since start of frame.
		 * We can split this into vertical and horizontal
		 * scanout position.
		 */
994
		position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
995

996 997 998 999
		/* convert to pixel counts */
		vbl_start *= htotal;
		vbl_end *= htotal;
		vtotal *= htotal;
1000

1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012
		/*
		 * In interlaced modes, the pixel counter counts all pixels,
		 * so one field will have htotal more pixels. In order to avoid
		 * the reported position from jumping backwards when the pixel
		 * counter is beyond the length of the shorter field, just
		 * clamp the position the length of the shorter field. This
		 * matches how the scanline counter based position works since
		 * the scanline counter doesn't count the two half lines.
		 */
		if (position >= vtotal)
			position = vtotal - 1;

1013 1014 1015 1016 1017 1018 1019 1020 1021 1022
		/*
		 * Start of vblank interrupt is triggered at start of hsync,
		 * just prior to the first active line of vblank. However we
		 * consider lines to start at the leading edge of horizontal
		 * active. So, should we get here before we've crossed into
		 * the horizontal active of the first line in vblank, we would
		 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
		 * always add htotal-hsync_start to the current pixel position.
		 */
		position = (position + htotal - hsync_start) % vtotal;
1023 1024
	}

1025 1026 1027 1028 1029 1030 1031 1032
	/* Get optional system timestamp after query. */
	if (etime)
		*etime = ktime_get();

	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */

	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);

1033 1034 1035 1036 1037 1038 1039 1040 1041 1042
	/*
	 * While in vblank, position will be negative
	 * counting up towards 0 at vbl_end. And outside
	 * vblank, position will be positive counting
	 * up since vbl_end.
	 */
	if (position >= vbl_start)
		position -= vbl_end;
	else
		position += vtotal - vbl_end;
1043

1044
	if (use_scanline_counter) {
1045 1046 1047 1048 1049 1050
		*vpos = position;
		*hpos = 0;
	} else {
		*vpos = position / htotal;
		*hpos = position - (*vpos * htotal);
	}
1051

1052
	return true;
1053 1054
}

1055 1056
int intel_get_crtc_scanline(struct intel_crtc *crtc)
{
1057
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1058 1059 1060 1061 1062 1063 1064 1065 1066 1067
	unsigned long irqflags;
	int position;

	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
	position = __intel_get_crtc_scanline(crtc);
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);

	return position;
}

1068
static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
1069
{
1070
	struct intel_uncore *uncore = &dev_priv->uncore;
1071
	u32 busy_up, busy_down, max_avg, min_avg;
1072 1073
	u8 new_delay;

1074
	spin_lock(&mchdev_lock);
1075

1076 1077 1078
	intel_uncore_write16(uncore,
			     MEMINTRSTS,
			     intel_uncore_read(uncore, MEMINTRSTS));
1079

1080
	new_delay = dev_priv->ips.cur_delay;
1081

1082 1083 1084 1085 1086
	intel_uncore_write16(uncore, MEMINTRSTS, MEMINT_EVAL_CHG);
	busy_up = intel_uncore_read(uncore, RCPREVBSYTUPAVG);
	busy_down = intel_uncore_read(uncore, RCPREVBSYTDNAVG);
	max_avg = intel_uncore_read(uncore, RCBMAXAVG);
	min_avg = intel_uncore_read(uncore, RCBMINAVG);
1087 1088

	/* Handle RCS change request from hw */
1089
	if (busy_up > max_avg) {
1090 1091 1092 1093
		if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
			new_delay = dev_priv->ips.cur_delay - 1;
		if (new_delay < dev_priv->ips.max_delay)
			new_delay = dev_priv->ips.max_delay;
1094
	} else if (busy_down < min_avg) {
1095 1096 1097 1098
		if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
			new_delay = dev_priv->ips.cur_delay + 1;
		if (new_delay > dev_priv->ips.min_delay)
			new_delay = dev_priv->ips.min_delay;
1099 1100
	}

1101
	if (ironlake_set_drps(dev_priv, new_delay))
1102
		dev_priv->ips.cur_delay = new_delay;
1103

1104
	spin_unlock(&mchdev_lock);
1105

1106 1107 1108
	return;
}

1109 1110
static void vlv_c0_read(struct drm_i915_private *dev_priv,
			struct intel_rps_ei *ei)
1111
{
1112
	ei->ktime = ktime_get_raw();
1113 1114 1115
	ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
	ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
}
1116

1117
void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
1118
{
1119
	memset(&dev_priv->gt_pm.rps.ei, 0, sizeof(dev_priv->gt_pm.rps.ei));
1120
}
1121

1122 1123
static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
{
1124 1125
	struct intel_rps *rps = &dev_priv->gt_pm.rps;
	const struct intel_rps_ei *prev = &rps->ei;
1126 1127
	struct intel_rps_ei now;
	u32 events = 0;
1128

1129
	if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
1130
		return 0;
1131

1132
	vlv_c0_read(dev_priv, &now);
1133

1134
	if (prev->ktime) {
1135
		u64 time, c0;
1136
		u32 render, media;
1137

1138
		time = ktime_us_delta(now.ktime, prev->ktime);
1139

1140 1141 1142 1143 1144 1145 1146
		time *= dev_priv->czclk_freq;

		/* Workload can be split between render + media,
		 * e.g. SwapBuffers being blitted in X after being rendered in
		 * mesa. To account for this we need to combine both engines
		 * into our activity counter.
		 */
1147 1148 1149
		render = now.render_c0 - prev->render_c0;
		media = now.media_c0 - prev->media_c0;
		c0 = max(render, media);
1150
		c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */
1151

C
Chris Wilson 已提交
1152
		if (c0 > time * rps->power.up_threshold)
1153
			events = GEN6_PM_RP_UP_THRESHOLD;
C
Chris Wilson 已提交
1154
		else if (c0 < time * rps->power.down_threshold)
1155
			events = GEN6_PM_RP_DOWN_THRESHOLD;
1156 1157
	}

1158
	rps->ei = now;
1159
	return events;
1160 1161
}

1162
static void gen6_pm_rps_work(struct work_struct *work)
1163
{
1164
	struct drm_i915_private *dev_priv =
1165
		container_of(work, struct drm_i915_private, gt_pm.rps.work);
1166
	struct intel_gt *gt = &dev_priv->gt;
1167
	struct intel_rps *rps = &dev_priv->gt_pm.rps;
1168
	bool client_boost = false;
1169
	int new_delay, adj, min, max;
1170
	u32 pm_iir = 0;
1171

1172
	spin_lock_irq(&gt->irq_lock);
1173 1174 1175
	if (rps->interrupts_enabled) {
		pm_iir = fetch_and_zero(&rps->pm_iir);
		client_boost = atomic_read(&rps->num_waiters);
I
Imre Deak 已提交
1176
	}
1177
	spin_unlock_irq(&gt->irq_lock);
1178

1179
	/* Make sure we didn't queue anything we're not going to process. */
1180
	WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1181
	if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
1182
		goto out;
1183

1184
	mutex_lock(&rps->lock);
1185

1186 1187
	pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);

1188 1189 1190 1191
	adj = rps->last_adj;
	new_delay = rps->cur_freq;
	min = rps->min_freq_softlimit;
	max = rps->max_freq_softlimit;
1192
	if (client_boost)
1193 1194 1195
		max = rps->max_freq;
	if (client_boost && new_delay < rps->boost_freq) {
		new_delay = rps->boost_freq;
1196 1197
		adj = 0;
	} else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1198 1199
		if (adj > 0)
			adj *= 2;
1200 1201
		else /* CHV needs even encode values */
			adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
1202

1203
		if (new_delay >= rps->max_freq_softlimit)
1204
			adj = 0;
1205
	} else if (client_boost) {
1206
		adj = 0;
1207
	} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1208 1209 1210 1211
		if (rps->cur_freq > rps->efficient_freq)
			new_delay = rps->efficient_freq;
		else if (rps->cur_freq > rps->min_freq_softlimit)
			new_delay = rps->min_freq_softlimit;
1212 1213 1214 1215
		adj = 0;
	} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
		if (adj < 0)
			adj *= 2;
1216 1217
		else /* CHV needs even encode values */
			adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
1218

1219
		if (new_delay <= rps->min_freq_softlimit)
1220
			adj = 0;
1221
	} else { /* unknown event */
1222
		adj = 0;
1223
	}
1224

1225
	rps->last_adj = adj;
1226

C
Chris Wilson 已提交
1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238
	/*
	 * Limit deboosting and boosting to keep ourselves at the extremes
	 * when in the respective power modes (i.e. slowly decrease frequencies
	 * while in the HIGH_POWER zone and slowly increase frequencies while
	 * in the LOW_POWER zone). On idle, we will hit the timeout and drop
	 * to the next level quickly, and conversely if busy we expect to
	 * hit a waitboost and rapidly switch into max power.
	 */
	if ((adj < 0 && rps->power.mode == HIGH_POWER) ||
	    (adj > 0 && rps->power.mode == LOW_POWER))
		rps->last_adj = 0;

1239 1240 1241
	/* sysfs frequency interfaces may have snuck in while servicing the
	 * interrupt
	 */
1242
	new_delay += adj;
1243
	new_delay = clamp_t(int, new_delay, min, max);
1244

1245 1246
	if (intel_set_rps(dev_priv, new_delay)) {
		DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n");
1247
		rps->last_adj = 0;
1248
	}
1249

1250
	mutex_unlock(&rps->lock);
1251 1252 1253

out:
	/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
1254
	spin_lock_irq(&gt->irq_lock);
1255
	if (rps->interrupts_enabled)
1256 1257
		gen6_gt_pm_unmask_irq(gt, dev_priv->pm_rps_events);
	spin_unlock_irq(&gt->irq_lock);
1258 1259
}

1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271

/**
 * ivybridge_parity_work - Workqueue called when a parity error interrupt
 * occurred.
 * @work: workqueue struct
 *
 * Doesn't actually do anything except notify userspace. As a consequence of
 * this event, userspace should try to remap the bad rows since statistically
 * it is likely the same row is more likely to go bad again.
 */
static void ivybridge_parity_work(struct work_struct *work)
{
1272
	struct drm_i915_private *dev_priv =
1273
		container_of(work, typeof(*dev_priv), l3_parity.error_work);
1274
	struct intel_gt *gt = &dev_priv->gt;
1275
	u32 error_status, row, bank, subbank;
1276
	char *parity_event[6];
1277 1278
	u32 misccpctl;
	u8 slice = 0;
1279 1280 1281 1282 1283

	/* We must turn off DOP level clock gating to access the L3 registers.
	 * In order to prevent a get/put style interface, acquire struct mutex
	 * any time we access those registers.
	 */
1284
	mutex_lock(&dev_priv->drm.struct_mutex);
1285

1286 1287 1288 1289
	/* If we've screwed up tracking, just let the interrupt fire again */
	if (WARN_ON(!dev_priv->l3_parity.which_slice))
		goto out;

1290 1291 1292 1293
	misccpctl = I915_READ(GEN7_MISCCPCTL);
	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
	POSTING_READ(GEN7_MISCCPCTL);

1294
	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1295
		i915_reg_t reg;
1296

1297
		slice--;
1298
		if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv)))
1299
			break;
1300

1301
		dev_priv->l3_parity.which_slice &= ~(1<<slice);
1302

1303
		reg = GEN7_L3CDERRST1(slice);
1304

1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319
		error_status = I915_READ(reg);
		row = GEN7_PARITY_ERROR_ROW(error_status);
		bank = GEN7_PARITY_ERROR_BANK(error_status);
		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);

		I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
		POSTING_READ(reg);

		parity_event[0] = I915_L3_PARITY_UEVENT "=1";
		parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
		parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
		parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
		parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
		parity_event[5] = NULL;

1320
		kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
1321
				   KOBJ_CHANGE, parity_event);
1322

1323 1324
		DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
			  slice, row, bank, subbank);
1325

1326 1327 1328 1329 1330
		kfree(parity_event[4]);
		kfree(parity_event[3]);
		kfree(parity_event[2]);
		kfree(parity_event[1]);
	}
1331

1332
	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1333

1334 1335
out:
	WARN_ON(dev_priv->l3_parity.which_slice);
1336 1337 1338
	spin_lock_irq(&gt->irq_lock);
	gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv));
	spin_unlock_irq(&gt->irq_lock);
1339

1340
	mutex_unlock(&dev_priv->drm.struct_mutex);
1341 1342
}

1343
static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1344
{
1345 1346
	switch (pin) {
	case HPD_PORT_C:
1347
		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1);
1348
	case HPD_PORT_D:
1349
		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2);
1350
	case HPD_PORT_E:
1351
		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3);
1352
	case HPD_PORT_F:
1353 1354 1355 1356 1357 1358
		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4);
	default:
		return false;
	}
}

1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378
static bool gen12_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
{
	switch (pin) {
	case HPD_PORT_D:
		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1);
	case HPD_PORT_E:
		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2);
	case HPD_PORT_F:
		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3);
	case HPD_PORT_G:
		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4);
	case HPD_PORT_H:
		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC5);
	case HPD_PORT_I:
		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC6);
	default:
		return false;
	}
}

1379
static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1380
{
1381 1382
	switch (pin) {
	case HPD_PORT_A:
1383
		return val & PORTA_HOTPLUG_LONG_DETECT;
1384
	case HPD_PORT_B:
1385
		return val & PORTB_HOTPLUG_LONG_DETECT;
1386
	case HPD_PORT_C:
1387 1388 1389 1390 1391 1392
		return val & PORTC_HOTPLUG_LONG_DETECT;
	default:
		return false;
	}
}

1393
static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1394
{
1395 1396
	switch (pin) {
	case HPD_PORT_A:
1397
		return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_A);
1398
	case HPD_PORT_B:
1399
		return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_B);
1400
	case HPD_PORT_C:
1401
		return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_C);
1402 1403 1404 1405 1406
	default:
		return false;
	}
}

1407
static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1408
{
1409 1410
	switch (pin) {
	case HPD_PORT_C:
1411
		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1);
1412
	case HPD_PORT_D:
1413
		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2);
1414
	case HPD_PORT_E:
1415
		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3);
1416
	case HPD_PORT_F:
1417 1418 1419 1420 1421 1422
		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4);
	default:
		return false;
	}
}

1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442
static bool tgp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
{
	switch (pin) {
	case HPD_PORT_D:
		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1);
	case HPD_PORT_E:
		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2);
	case HPD_PORT_F:
		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3);
	case HPD_PORT_G:
		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4);
	case HPD_PORT_H:
		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC5);
	case HPD_PORT_I:
		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC6);
	default:
		return false;
	}
}

1443
static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val)
1444
{
1445 1446
	switch (pin) {
	case HPD_PORT_E:
1447 1448 1449 1450 1451 1452
		return val & PORTE_HOTPLUG_LONG_DETECT;
	default:
		return false;
	}
}

1453
static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1454
{
1455 1456
	switch (pin) {
	case HPD_PORT_A:
1457
		return val & PORTA_HOTPLUG_LONG_DETECT;
1458
	case HPD_PORT_B:
1459
		return val & PORTB_HOTPLUG_LONG_DETECT;
1460
	case HPD_PORT_C:
1461
		return val & PORTC_HOTPLUG_LONG_DETECT;
1462
	case HPD_PORT_D:
1463 1464 1465 1466 1467 1468
		return val & PORTD_HOTPLUG_LONG_DETECT;
	default:
		return false;
	}
}

1469
static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1470
{
1471 1472
	switch (pin) {
	case HPD_PORT_A:
1473 1474 1475 1476 1477 1478
		return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
	default:
		return false;
	}
}

1479
static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1480
{
1481 1482
	switch (pin) {
	case HPD_PORT_B:
1483
		return val & PORTB_HOTPLUG_LONG_DETECT;
1484
	case HPD_PORT_C:
1485
		return val & PORTC_HOTPLUG_LONG_DETECT;
1486
	case HPD_PORT_D:
1487 1488 1489
		return val & PORTD_HOTPLUG_LONG_DETECT;
	default:
		return false;
1490 1491 1492
	}
}

1493
static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1494
{
1495 1496
	switch (pin) {
	case HPD_PORT_B:
1497
		return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1498
	case HPD_PORT_C:
1499
		return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1500
	case HPD_PORT_D:
1501 1502 1503
		return val & PORTD_HOTPLUG_INT_LONG_PULSE;
	default:
		return false;
1504 1505 1506
	}
}

1507 1508 1509 1510 1511 1512 1513
/*
 * Get a bit mask of pins that have triggered, and which ones may be long.
 * This can be called multiple times with the same masks to accumulate
 * hotplug detection results from several registers.
 *
 * Note that the caller is expected to zero out the masks initially.
 */
1514 1515 1516 1517
static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
			       u32 *pin_mask, u32 *long_mask,
			       u32 hotplug_trigger, u32 dig_hotplug_reg,
			       const u32 hpd[HPD_NUM_PINS],
1518
			       bool long_pulse_detect(enum hpd_pin pin, u32 val))
1519
{
1520
	enum hpd_pin pin;
1521

1522 1523
	BUILD_BUG_ON(BITS_PER_TYPE(*pin_mask) < HPD_NUM_PINS);

1524 1525
	for_each_hpd_pin(pin) {
		if ((hpd[pin] & hotplug_trigger) == 0)
1526
			continue;
1527

1528
		*pin_mask |= BIT(pin);
1529

1530
		if (long_pulse_detect(pin, dig_hotplug_reg))
1531
			*long_mask |= BIT(pin);
1532 1533
	}

1534 1535
	DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
			 hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
1536 1537 1538

}

1539
static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
1540
{
1541
	wake_up_all(&dev_priv->gmbus_wait_queue);
1542 1543
}

1544
static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
1545
{
1546
	wake_up_all(&dev_priv->gmbus_wait_queue);
1547 1548
}

1549
#if defined(CONFIG_DEBUG_FS)
1550 1551
static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
					 enum pipe pipe,
1552 1553 1554
					 u32 crc0, u32 crc1,
					 u32 crc2, u32 crc3,
					 u32 crc4)
1555 1556
{
	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
T
Tomeu Vizoso 已提交
1557
	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
1558 1559 1560
	u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };

	trace_intel_pipe_crc(crtc, crcs);
1561

1562
	spin_lock(&pipe_crc->lock);
1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573
	/*
	 * For some not yet identified reason, the first CRC is
	 * bonkers. So let's just wait for the next vblank and read
	 * out the buggy result.
	 *
	 * On GEN8+ sometimes the second CRC is bonkers as well, so
	 * don't trust that one either.
	 */
	if (pipe_crc->skipped <= 0 ||
	    (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
		pipe_crc->skipped++;
T
Tomeu Vizoso 已提交
1574
		spin_unlock(&pipe_crc->lock);
1575
		return;
T
Tomeu Vizoso 已提交
1576
	}
1577 1578 1579 1580 1581
	spin_unlock(&pipe_crc->lock);

	drm_crtc_add_crc_entry(&crtc->base, true,
				drm_crtc_accurate_vblank_count(&crtc->base),
				crcs);
1582
}
1583 1584
#else
static inline void
1585 1586
display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
			     enum pipe pipe,
1587 1588 1589
			     u32 crc0, u32 crc1,
			     u32 crc2, u32 crc3,
			     u32 crc4) {}
1590 1591
#endif

1592

1593 1594
static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
				     enum pipe pipe)
D
Daniel Vetter 已提交
1595
{
1596
	display_pipe_crc_irq_handler(dev_priv, pipe,
1597 1598
				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
				     0, 0, 0, 0);
D
Daniel Vetter 已提交
1599 1600
}

1601 1602
static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
				     enum pipe pipe)
1603
{
1604
	display_pipe_crc_irq_handler(dev_priv, pipe,
1605 1606 1607 1608 1609
				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
				     I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
				     I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
				     I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
				     I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1610
}
1611

1612 1613
static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
				      enum pipe pipe)
1614
{
1615
	u32 res1, res2;
1616

1617
	if (INTEL_GEN(dev_priv) >= 3)
1618 1619 1620 1621
		res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
	else
		res1 = 0;

1622
	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1623 1624 1625
		res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
	else
		res2 = 0;
1626

1627
	display_pipe_crc_irq_handler(dev_priv, pipe,
1628 1629 1630 1631
				     I915_READ(PIPE_CRC_RES_RED(pipe)),
				     I915_READ(PIPE_CRC_RES_GREEN(pipe)),
				     I915_READ(PIPE_CRC_RES_BLUE(pipe)),
				     res1, res2);
1632
}
1633

1634 1635 1636
/* The RPS events need forcewake, so we add them to a work queue and mask their
 * IMR bits until the work is done. Other interrupts can be processed without
 * the work queue. */
1637
void gen11_rps_irq_handler(struct intel_gt *gt, u32 pm_iir)
1638
{
1639
	struct drm_i915_private *i915 = gt->i915;
1640 1641 1642
	struct intel_rps *rps = &i915->gt_pm.rps;
	const u32 events = i915->pm_rps_events & pm_iir;

1643
	lockdep_assert_held(&gt->irq_lock);
1644 1645 1646 1647

	if (unlikely(!events))
		return;

1648
	gen6_gt_pm_mask_irq(gt, events);
1649 1650 1651 1652 1653 1654 1655 1656

	if (!rps->interrupts_enabled)
		return;

	rps->pm_iir |= events;
	schedule_work(&rps->work);
}

1657
void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1658
{
1659
	struct intel_rps *rps = &dev_priv->gt_pm.rps;
1660
	struct intel_gt *gt = &dev_priv->gt;
1661

1662
	if (pm_iir & dev_priv->pm_rps_events) {
1663 1664
		spin_lock(&gt->irq_lock);
		gen6_gt_pm_mask_irq(gt, pm_iir & dev_priv->pm_rps_events);
1665 1666 1667
		if (rps->interrupts_enabled) {
			rps->pm_iir |= pm_iir & dev_priv->pm_rps_events;
			schedule_work(&rps->work);
I
Imre Deak 已提交
1668
		}
1669
		spin_unlock(&gt->irq_lock);
1670 1671
	}

1672
	if (INTEL_GEN(dev_priv) >= 8)
1673 1674
		return;

1675
	if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1676
		intel_engine_breadcrumbs_irq(dev_priv->engine[VECS0]);
B
Ben Widawsky 已提交
1677

1678 1679
	if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
		DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
1680 1681
}

1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694
static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
{
	enum pipe pipe;

	for_each_pipe(dev_priv, pipe) {
		I915_WRITE(PIPESTAT(pipe),
			   PIPESTAT_INT_STATUS_MASK |
			   PIPE_FIFO_UNDERRUN_STATUS);

		dev_priv->pipestat_irq_mask[pipe] = 0;
	}
}

1695 1696
static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
				  u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1697
{
1698
	enum pipe pipe;
1699

1700
	spin_lock(&dev_priv->irq_lock);
1701 1702 1703 1704 1705 1706

	if (!dev_priv->display_irqs_enabled) {
		spin_unlock(&dev_priv->irq_lock);
		return;
	}

1707
	for_each_pipe(dev_priv, pipe) {
1708
		i915_reg_t reg;
1709
		u32 status_mask, enable_mask, iir_bit = 0;
1710

1711 1712 1713 1714 1715 1716 1717
		/*
		 * PIPESTAT bits get signalled even when the interrupt is
		 * disabled with the mask bits, and some of the status bits do
		 * not generate interrupts at all (like the underrun bit). Hence
		 * we need to be careful that we only handle what we want to
		 * handle.
		 */
1718 1719

		/* fifo underruns are filterered in the underrun handler. */
1720
		status_mask = PIPE_FIFO_UNDERRUN_STATUS;
1721 1722

		switch (pipe) {
1723
		default:
1724 1725 1726 1727 1728 1729
		case PIPE_A:
			iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
			break;
		case PIPE_B:
			iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
			break;
1730 1731 1732
		case PIPE_C:
			iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
			break;
1733 1734
		}
		if (iir & iir_bit)
1735
			status_mask |= dev_priv->pipestat_irq_mask[pipe];
1736

1737
		if (!status_mask)
1738 1739 1740
			continue;

		reg = PIPESTAT(pipe);
1741 1742
		pipe_stats[pipe] = I915_READ(reg) & status_mask;
		enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
1743 1744 1745

		/*
		 * Clear the PIPE*STAT regs before the IIR
1746 1747 1748 1749 1750 1751
		 *
		 * Toggle the enable bits to make sure we get an
		 * edge in the ISR pipe event bit if we don't clear
		 * all the enabled status bits. Otherwise the edge
		 * triggered IIR on i965/g4x wouldn't notice that
		 * an interrupt is still pending.
1752
		 */
1753 1754 1755 1756
		if (pipe_stats[pipe]) {
			I915_WRITE(reg, pipe_stats[pipe]);
			I915_WRITE(reg, enable_mask);
		}
1757
	}
1758
	spin_unlock(&dev_priv->irq_lock);
1759 1760
}

1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828
static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
				      u16 iir, u32 pipe_stats[I915_MAX_PIPES])
{
	enum pipe pipe;

	for_each_pipe(dev_priv, pipe) {
		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
			drm_handle_vblank(&dev_priv->drm, pipe);

		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
			i9xx_pipe_crc_irq_handler(dev_priv, pipe);

		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
	}
}

static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
{
	bool blc_event = false;
	enum pipe pipe;

	for_each_pipe(dev_priv, pipe) {
		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
			drm_handle_vblank(&dev_priv->drm, pipe);

		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
			blc_event = true;

		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
			i9xx_pipe_crc_irq_handler(dev_priv, pipe);

		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
	}

	if (blc_event || (iir & I915_ASLE_INTERRUPT))
		intel_opregion_asle_intr(dev_priv);
}

static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
{
	bool blc_event = false;
	enum pipe pipe;

	for_each_pipe(dev_priv, pipe) {
		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
			drm_handle_vblank(&dev_priv->drm, pipe);

		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
			blc_event = true;

		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
			i9xx_pipe_crc_irq_handler(dev_priv, pipe);

		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
	}

	if (blc_event || (iir & I915_ASLE_INTERRUPT))
		intel_opregion_asle_intr(dev_priv);

	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
		gmbus_irq_handler(dev_priv);
}

1829
static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1830 1831 1832
					    u32 pipe_stats[I915_MAX_PIPES])
{
	enum pipe pipe;
1833

1834
	for_each_pipe(dev_priv, pipe) {
1835 1836
		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
			drm_handle_vblank(&dev_priv->drm, pipe);
1837 1838

		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1839
			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1840

1841 1842
		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1843 1844 1845
	}

	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1846
		gmbus_irq_handler(dev_priv);
1847 1848
}

1849
static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
1850
{
1851 1852 1853 1854 1855 1856 1857 1858 1859
	u32 hotplug_status = 0, hotplug_status_mask;
	int i;

	if (IS_G4X(dev_priv) ||
	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
		hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
			DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
	else
		hotplug_status_mask = HOTPLUG_INT_STATUS_I915;
1860

1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876
	/*
	 * We absolutely have to clear all the pending interrupt
	 * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port
	 * interrupt bit won't have an edge, and the i965/g4x
	 * edge triggered IIR will not notice that an interrupt
	 * is still pending. We can't use PORT_HOTPLUG_EN to
	 * guarantee the edge as the act of toggling the enable
	 * bits can itself generate a new hotplug interrupt :(
	 */
	for (i = 0; i < 10; i++) {
		u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask;

		if (tmp == 0)
			return hotplug_status;

		hotplug_status |= tmp;
1877
		I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1878 1879 1880 1881 1882
	}

	WARN_ONCE(1,
		  "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
		  I915_READ(PORT_HOTPLUG_STAT));
1883

1884 1885 1886
	return hotplug_status;
}

1887
static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1888 1889 1890
				 u32 hotplug_status)
{
	u32 pin_mask = 0, long_mask = 0;
1891

1892 1893
	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
	    IS_CHERRYVIEW(dev_priv)) {
1894
		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1895

1896
		if (hotplug_trigger) {
1897 1898 1899
			intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
					   hotplug_trigger, hotplug_trigger,
					   hpd_status_g4x,
1900 1901
					   i9xx_port_hotplug_long_detect);

1902
			intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1903
		}
1904 1905

		if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1906
			dp_aux_irq_handler(dev_priv);
1907 1908
	} else {
		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1909

1910
		if (hotplug_trigger) {
1911 1912 1913
			intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
					   hotplug_trigger, hotplug_trigger,
					   hpd_status_i915,
1914
					   i9xx_port_hotplug_long_detect);
1915
			intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1916
		}
1917
	}
1918 1919
}

1920
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
J
Jesse Barnes 已提交
1921
{
1922
	struct drm_i915_private *dev_priv = arg;
J
Jesse Barnes 已提交
1923 1924
	irqreturn_t ret = IRQ_NONE;

1925 1926 1927
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

1928
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
1929
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1930

1931
	do {
1932
		u32 iir, gt_iir, pm_iir;
1933
		u32 pipe_stats[I915_MAX_PIPES] = {};
1934
		u32 hotplug_status = 0;
1935
		u32 ier = 0;
1936

J
Jesse Barnes 已提交
1937 1938
		gt_iir = I915_READ(GTIIR);
		pm_iir = I915_READ(GEN6_PMIIR);
1939
		iir = I915_READ(VLV_IIR);
J
Jesse Barnes 已提交
1940 1941

		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1942
			break;
J
Jesse Barnes 已提交
1943 1944 1945

		ret = IRQ_HANDLED;

1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958
		/*
		 * Theory on interrupt generation, based on empirical evidence:
		 *
		 * x = ((VLV_IIR & VLV_IER) ||
		 *      (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
		 *       (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
		 *
		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
		 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
		 * guarantee the CPU interrupt will be raised again even if we
		 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
		 * bits this time around.
		 */
1959
		I915_WRITE(VLV_MASTER_IER, 0);
1960 1961
		ier = I915_READ(VLV_IER);
		I915_WRITE(VLV_IER, 0);
1962 1963 1964 1965 1966 1967

		if (gt_iir)
			I915_WRITE(GTIIR, gt_iir);
		if (pm_iir)
			I915_WRITE(GEN6_PMIIR, pm_iir);

1968
		if (iir & I915_DISPLAY_PORT_INTERRUPT)
1969
			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1970

1971 1972
		/* Call regardless, as some status bits might not be
		 * signalled in iir */
1973
		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1974

1975 1976 1977 1978
		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
			   I915_LPE_PIPE_B_INTERRUPT))
			intel_lpe_audio_irq_handler(dev_priv);

1979 1980 1981 1982 1983 1984
		/*
		 * VLV_IIR is single buffered, and reflects the level
		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
		 */
		if (iir)
			I915_WRITE(VLV_IIR, iir);
1985

1986
		I915_WRITE(VLV_IER, ier);
1987
		I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
1988

1989
		if (gt_iir)
1990
			gen6_gt_irq_handler(&dev_priv->gt, gt_iir);
1991 1992 1993
		if (pm_iir)
			gen6_rps_irq_handler(dev_priv, pm_iir);

1994
		if (hotplug_status)
1995
			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1996

1997
		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1998
	} while (0);
J
Jesse Barnes 已提交
1999

2000
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2001

J
Jesse Barnes 已提交
2002 2003 2004
	return ret;
}

2005 2006
static irqreturn_t cherryview_irq_handler(int irq, void *arg)
{
2007
	struct drm_i915_private *dev_priv = arg;
2008 2009
	irqreturn_t ret = IRQ_NONE;

2010 2011 2012
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

2013
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2014
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2015

2016
	do {
2017
		u32 master_ctl, iir;
2018
		u32 pipe_stats[I915_MAX_PIPES] = {};
2019
		u32 hotplug_status = 0;
2020
		u32 gt_iir[4];
2021 2022
		u32 ier = 0;

2023 2024
		master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
		iir = I915_READ(VLV_IIR);
2025

2026 2027
		if (master_ctl == 0 && iir == 0)
			break;
2028

2029 2030
		ret = IRQ_HANDLED;

2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043
		/*
		 * Theory on interrupt generation, based on empirical evidence:
		 *
		 * x = ((VLV_IIR & VLV_IER) ||
		 *      ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
		 *       (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
		 *
		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
		 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
		 * guarantee the CPU interrupt will be raised again even if we
		 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
		 * bits this time around.
		 */
2044
		I915_WRITE(GEN8_MASTER_IRQ, 0);
2045 2046
		ier = I915_READ(VLV_IER);
		I915_WRITE(VLV_IER, 0);
2047

2048
		gen8_gt_irq_ack(&dev_priv->gt, master_ctl, gt_iir);
2049

2050
		if (iir & I915_DISPLAY_PORT_INTERRUPT)
2051
			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
2052

2053 2054
		/* Call regardless, as some status bits might not be
		 * signalled in iir */
2055
		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
2056

2057 2058 2059 2060 2061
		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
			   I915_LPE_PIPE_B_INTERRUPT |
			   I915_LPE_PIPE_C_INTERRUPT))
			intel_lpe_audio_irq_handler(dev_priv);

2062 2063 2064 2065 2066 2067 2068
		/*
		 * VLV_IIR is single buffered, and reflects the level
		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
		 */
		if (iir)
			I915_WRITE(VLV_IIR, iir);

2069
		I915_WRITE(VLV_IER, ier);
2070
		I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2071

2072
		gen8_gt_irq_handler(&dev_priv->gt, master_ctl, gt_iir);
2073

2074
		if (hotplug_status)
2075
			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
2076

2077
		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
2078
	} while (0);
2079

2080
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2081

2082 2083 2084
	return ret;
}

2085 2086
static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
				u32 hotplug_trigger,
2087 2088 2089 2090
				const u32 hpd[HPD_NUM_PINS])
{
	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;

2091 2092 2093 2094 2095 2096
	/*
	 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
	 * unless we touch the hotplug register, even if hotplug_trigger is
	 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
	 * errors.
	 */
2097
	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2098 2099 2100 2101 2102 2103 2104 2105
	if (!hotplug_trigger) {
		u32 mask = PORTA_HOTPLUG_STATUS_MASK |
			PORTD_HOTPLUG_STATUS_MASK |
			PORTC_HOTPLUG_STATUS_MASK |
			PORTB_HOTPLUG_STATUS_MASK;
		dig_hotplug_reg &= ~mask;
	}

2106
	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2107 2108
	if (!hotplug_trigger)
		return;
2109

2110
	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
2111 2112 2113
			   dig_hotplug_reg, hpd,
			   pch_port_hotplug_long_detect);

2114
	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2115 2116
}

2117
static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2118
{
2119
	enum pipe pipe;
2120
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
2121

2122
	ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
2123

2124 2125 2126
	if (pch_iir & SDE_AUDIO_POWER_MASK) {
		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
			       SDE_AUDIO_POWER_SHIFT);
2127
		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
2128 2129
				 port_name(port));
	}
2130

2131
	if (pch_iir & SDE_AUX_MASK)
2132
		dp_aux_irq_handler(dev_priv);
2133

2134
	if (pch_iir & SDE_GMBUS)
2135
		gmbus_irq_handler(dev_priv);
2136 2137 2138 2139 2140 2141 2142 2143 2144 2145

	if (pch_iir & SDE_AUDIO_HDCP_MASK)
		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");

	if (pch_iir & SDE_AUDIO_TRANS_MASK)
		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");

	if (pch_iir & SDE_POISON)
		DRM_ERROR("PCH poison interrupt\n");

2146
	if (pch_iir & SDE_FDI_MASK)
2147
		for_each_pipe(dev_priv, pipe)
2148 2149 2150
			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
					 pipe_name(pipe),
					 I915_READ(FDI_RX_IIR(pipe)));
2151 2152 2153 2154 2155 2156 2157 2158

	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");

	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");

	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
2159
		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
2160 2161

	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
2162
		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
2163 2164
}

2165
static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
2166 2167
{
	u32 err_int = I915_READ(GEN7_ERR_INT);
D
Daniel Vetter 已提交
2168
	enum pipe pipe;
2169

2170 2171 2172
	if (err_int & ERR_INT_POISON)
		DRM_ERROR("Poison interrupt\n");

2173
	for_each_pipe(dev_priv, pipe) {
2174 2175
		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2176

D
Daniel Vetter 已提交
2177
		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
2178 2179
			if (IS_IVYBRIDGE(dev_priv))
				ivb_pipe_crc_irq_handler(dev_priv, pipe);
D
Daniel Vetter 已提交
2180
			else
2181
				hsw_pipe_crc_irq_handler(dev_priv, pipe);
D
Daniel Vetter 已提交
2182 2183
		}
	}
2184

2185 2186 2187
	I915_WRITE(GEN7_ERR_INT, err_int);
}

2188
static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
2189 2190
{
	u32 serr_int = I915_READ(SERR_INT);
2191
	enum pipe pipe;
2192

2193 2194 2195
	if (serr_int & SERR_INT_POISON)
		DRM_ERROR("PCH poison interrupt\n");

2196 2197 2198
	for_each_pipe(dev_priv, pipe)
		if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
			intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
2199 2200

	I915_WRITE(SERR_INT, serr_int);
2201 2202
}

2203
static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2204
{
2205
	enum pipe pipe;
2206
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
2207

2208
	ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
2209

2210 2211 2212 2213 2214 2215
	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
			       SDE_AUDIO_POWER_SHIFT_CPT);
		DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
				 port_name(port));
	}
2216 2217

	if (pch_iir & SDE_AUX_MASK_CPT)
2218
		dp_aux_irq_handler(dev_priv);
2219 2220

	if (pch_iir & SDE_GMBUS_CPT)
2221
		gmbus_irq_handler(dev_priv);
2222 2223 2224 2225 2226 2227 2228 2229

	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
		DRM_DEBUG_DRIVER("Audio CP request interrupt\n");

	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
		DRM_DEBUG_DRIVER("Audio CP change interrupt\n");

	if (pch_iir & SDE_FDI_MASK_CPT)
2230
		for_each_pipe(dev_priv, pipe)
2231 2232 2233
			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
					 pipe_name(pipe),
					 I915_READ(FDI_RX_IIR(pipe)));
2234 2235

	if (pch_iir & SDE_ERROR_CPT)
2236
		cpt_serr_int_handler(dev_priv);
2237 2238
}

2239
static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2240
{
2241
	u32 ddi_hotplug_trigger, tc_hotplug_trigger;
2242
	u32 pin_mask = 0, long_mask = 0;
2243 2244
	bool (*tc_port_hotplug_long_detect)(enum hpd_pin pin, u32 val);
	const u32 *pins;
2245

2246 2247 2248 2249 2250
	if (HAS_PCH_TGP(dev_priv)) {
		ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP;
		tc_hotplug_trigger = pch_iir & SDE_TC_MASK_TGP;
		tc_port_hotplug_long_detect = tgp_tc_port_hotplug_long_detect;
		pins = hpd_tgp;
M
Matt Roper 已提交
2251 2252 2253 2254
	} else if (HAS_PCH_JSP(dev_priv)) {
		ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP;
		tc_hotplug_trigger = 0;
		pins = hpd_tgp;
2255
	} else if (HAS_PCH_MCC(dev_priv)) {
2256 2257
		ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
		tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_ICP(PORT_TC1);
2258
		tc_port_hotplug_long_detect = icp_tc_port_hotplug_long_detect;
2259
		pins = hpd_icp;
2260
	} else {
M
Matt Roper 已提交
2261 2262 2263
		WARN(!HAS_PCH_ICP(dev_priv),
		     "Unrecognized PCH type 0x%x\n", INTEL_PCH_TYPE(dev_priv));

2264 2265
		ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
		tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP;
2266 2267
		tc_port_hotplug_long_detect = icp_tc_port_hotplug_long_detect;
		pins = hpd_icp;
2268 2269
	}

2270 2271 2272 2273 2274 2275 2276 2277
	if (ddi_hotplug_trigger) {
		u32 dig_hotplug_reg;

		dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI);
		I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg);

		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
				   ddi_hotplug_trigger,
2278
				   dig_hotplug_reg, pins,
2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289
				   icp_ddi_port_hotplug_long_detect);
	}

	if (tc_hotplug_trigger) {
		u32 dig_hotplug_reg;

		dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC);
		I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg);

		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
				   tc_hotplug_trigger,
2290
				   dig_hotplug_reg, pins,
2291
				   tc_port_hotplug_long_detect);
2292 2293 2294 2295 2296 2297 2298 2299 2300
	}

	if (pin_mask)
		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);

	if (pch_iir & SDE_GMBUS_ICP)
		gmbus_irq_handler(dev_priv);
}

2301
static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313
{
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
		~SDE_PORTE_HOTPLUG_SPT;
	u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
	u32 pin_mask = 0, long_mask = 0;

	if (hotplug_trigger) {
		u32 dig_hotplug_reg;

		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
		I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);

2314 2315
		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
				   hotplug_trigger, dig_hotplug_reg, hpd_spt,
2316
				   spt_port_hotplug_long_detect);
2317 2318 2319 2320 2321 2322 2323 2324
	}

	if (hotplug2_trigger) {
		u32 dig_hotplug_reg;

		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
		I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);

2325 2326
		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
				   hotplug2_trigger, dig_hotplug_reg, hpd_spt,
2327 2328 2329 2330
				   spt_port_hotplug2_long_detect);
	}

	if (pin_mask)
2331
		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2332 2333

	if (pch_iir & SDE_GMBUS_CPT)
2334
		gmbus_irq_handler(dev_priv);
2335 2336
}

2337 2338
static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
				u32 hotplug_trigger,
2339 2340 2341 2342 2343 2344 2345
				const u32 hpd[HPD_NUM_PINS])
{
	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;

	dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);

2346
	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
2347 2348 2349
			   dig_hotplug_reg, hpd,
			   ilk_port_hotplug_long_detect);

2350
	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2351 2352
}

2353 2354
static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
				    u32 de_iir)
2355
{
2356
	enum pipe pipe;
2357 2358
	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;

2359
	if (hotplug_trigger)
2360
		ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk);
2361 2362

	if (de_iir & DE_AUX_CHANNEL_A)
2363
		dp_aux_irq_handler(dev_priv);
2364 2365

	if (de_iir & DE_GSE)
2366
		intel_opregion_asle_intr(dev_priv);
2367 2368 2369 2370

	if (de_iir & DE_POISON)
		DRM_ERROR("Poison interrupt\n");

2371
	for_each_pipe(dev_priv, pipe) {
2372 2373
		if (de_iir & DE_PIPE_VBLANK(pipe))
			drm_handle_vblank(&dev_priv->drm, pipe);
2374

2375
		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2376
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2377

2378
		if (de_iir & DE_PIPE_CRC_DONE(pipe))
2379
			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2380 2381 2382 2383 2384 2385
	}

	/* check event from PCH */
	if (de_iir & DE_PCH_EVENT) {
		u32 pch_iir = I915_READ(SDEIIR);

2386 2387
		if (HAS_PCH_CPT(dev_priv))
			cpt_irq_handler(dev_priv, pch_iir);
2388
		else
2389
			ibx_irq_handler(dev_priv, pch_iir);
2390 2391 2392 2393 2394

		/* should clear PCH hotplug event before clear CPU irq */
		I915_WRITE(SDEIIR, pch_iir);
	}

2395
	if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT)
2396
		ironlake_rps_change_irq_handler(dev_priv);
2397 2398
}

2399 2400
static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
				    u32 de_iir)
2401
{
2402
	enum pipe pipe;
2403 2404
	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;

2405
	if (hotplug_trigger)
2406
		ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb);
2407 2408

	if (de_iir & DE_ERR_INT_IVB)
2409
		ivb_err_int_handler(dev_priv);
2410

2411 2412 2413 2414 2415 2416
	if (de_iir & DE_EDP_PSR_INT_HSW) {
		u32 psr_iir = I915_READ(EDP_PSR_IIR);

		intel_psr_irq_handler(dev_priv, psr_iir);
		I915_WRITE(EDP_PSR_IIR, psr_iir);
	}
2417

2418
	if (de_iir & DE_AUX_CHANNEL_A_IVB)
2419
		dp_aux_irq_handler(dev_priv);
2420 2421

	if (de_iir & DE_GSE_IVB)
2422
		intel_opregion_asle_intr(dev_priv);
2423

2424
	for_each_pipe(dev_priv, pipe) {
2425 2426
		if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
			drm_handle_vblank(&dev_priv->drm, pipe);
2427 2428 2429
	}

	/* check event from PCH */
2430
	if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
2431 2432
		u32 pch_iir = I915_READ(SDEIIR);

2433
		cpt_irq_handler(dev_priv, pch_iir);
2434 2435 2436 2437 2438 2439

		/* clear PCH hotplug event before clear CPU irq */
		I915_WRITE(SDEIIR, pch_iir);
	}
}

2440 2441 2442 2443 2444 2445 2446 2447
/*
 * To handle irqs with the minimum potential races with fresh interrupts, we:
 * 1 - Disable Master Interrupt Control.
 * 2 - Find the source(s) of the interrupt.
 * 3 - Clear the Interrupt Identity bits (IIR).
 * 4 - Process the interrupt(s) that had bits set in the IIRs.
 * 5 - Re-enable Master Interrupt Control.
 */
2448
static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2449
{
2450
	struct drm_i915_private *dev_priv = arg;
2451
	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2452
	irqreturn_t ret = IRQ_NONE;
2453

2454 2455 2456
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

2457
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2458
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2459

2460 2461 2462 2463
	/* disable master interrupt before clearing iir  */
	de_ier = I915_READ(DEIER);
	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);

2464 2465 2466 2467 2468
	/* Disable south interrupts. We'll only write to SDEIIR once, so further
	 * interrupts will will be stored on its back queue, and then we'll be
	 * able to process them after we restore SDEIER (as soon as we restore
	 * it, we'll get an interrupt if SDEIIR still has something to process
	 * due to its back queue). */
2469
	if (!HAS_PCH_NOP(dev_priv)) {
2470 2471 2472
		sde_ier = I915_READ(SDEIER);
		I915_WRITE(SDEIER, 0);
	}
2473

2474 2475
	/* Find, clear, then process each source of interrupt */

2476
	gt_iir = I915_READ(GTIIR);
2477
	if (gt_iir) {
2478 2479
		I915_WRITE(GTIIR, gt_iir);
		ret = IRQ_HANDLED;
2480
		if (INTEL_GEN(dev_priv) >= 6)
2481
			gen6_gt_irq_handler(&dev_priv->gt, gt_iir);
2482
		else
2483
			gen5_gt_irq_handler(&dev_priv->gt, gt_iir);
2484 2485
	}

2486 2487
	de_iir = I915_READ(DEIIR);
	if (de_iir) {
2488 2489
		I915_WRITE(DEIIR, de_iir);
		ret = IRQ_HANDLED;
2490 2491
		if (INTEL_GEN(dev_priv) >= 7)
			ivb_display_irq_handler(dev_priv, de_iir);
2492
		else
2493
			ilk_display_irq_handler(dev_priv, de_iir);
2494 2495
	}

2496
	if (INTEL_GEN(dev_priv) >= 6) {
2497 2498 2499 2500
		u32 pm_iir = I915_READ(GEN6_PMIIR);
		if (pm_iir) {
			I915_WRITE(GEN6_PMIIR, pm_iir);
			ret = IRQ_HANDLED;
2501
			gen6_rps_irq_handler(dev_priv, pm_iir);
2502
		}
2503
	}
2504 2505

	I915_WRITE(DEIER, de_ier);
2506
	if (!HAS_PCH_NOP(dev_priv))
2507
		I915_WRITE(SDEIER, sde_ier);
2508

2509
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2510
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2511

2512 2513 2514
	return ret;
}

2515 2516
static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
				u32 hotplug_trigger,
2517
				const u32 hpd[HPD_NUM_PINS])
2518
{
2519
	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2520

2521 2522
	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2523

2524
	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
2525
			   dig_hotplug_reg, hpd,
2526
			   bxt_port_hotplug_long_detect);
2527

2528
	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2529 2530
}

2531 2532 2533
static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
{
	u32 pin_mask = 0, long_mask = 0;
2534 2535
	u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
	u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
2536 2537 2538 2539 2540 2541 2542 2543 2544 2545
	long_pulse_detect_func long_pulse_detect;
	const u32 *hpd;

	if (INTEL_GEN(dev_priv) >= 12) {
		long_pulse_detect = gen12_port_hotplug_long_detect;
		hpd = hpd_gen12;
	} else {
		long_pulse_detect = gen11_port_hotplug_long_detect;
		hpd = hpd_gen11;
	}
2546 2547

	if (trigger_tc) {
2548 2549
		u32 dig_hotplug_reg;

2550 2551 2552 2553
		dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL);
		I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);

		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tc,
2554
				   dig_hotplug_reg, hpd, long_pulse_detect);
2555 2556 2557 2558 2559 2560 2561 2562 2563
	}

	if (trigger_tbt) {
		u32 dig_hotplug_reg;

		dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL);
		I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);

		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tbt,
2564
				   dig_hotplug_reg, hpd, long_pulse_detect);
2565 2566 2567
	}

	if (pin_mask)
2568
		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2569
	else
2570 2571 2572
		DRM_ERROR("Unexpected DE HPD interrupt 0x%08x\n", iir);
}

2573 2574
static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
{
2575
	u32 mask;
2576

2577 2578 2579 2580 2581 2582 2583
	if (INTEL_GEN(dev_priv) >= 12)
		/* TODO: Add AUX entries for USBC */
		return TGL_DE_PORT_AUX_DDIA |
			TGL_DE_PORT_AUX_DDIB |
			TGL_DE_PORT_AUX_DDIC;

	mask = GEN8_AUX_CHANNEL_A;
2584 2585 2586 2587 2588
	if (INTEL_GEN(dev_priv) >= 9)
		mask |= GEN9_AUX_CHANNEL_B |
			GEN9_AUX_CHANNEL_C |
			GEN9_AUX_CHANNEL_D;

2589
	if (IS_CNL_WITH_PORT_F(dev_priv) || IS_GEN(dev_priv, 11))
2590 2591
		mask |= CNL_AUX_CHANNEL_F;

2592 2593
	if (IS_GEN(dev_priv, 11))
		mask |= ICL_AUX_CHANNEL_E;
2594 2595 2596 2597

	return mask;
}

2598 2599 2600 2601 2602 2603 2604 2605
static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
{
	if (INTEL_GEN(dev_priv) >= 9)
		return GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
	else
		return GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
}

2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616
static void
gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
{
	bool found = false;

	if (iir & GEN8_DE_MISC_GSE) {
		intel_opregion_asle_intr(dev_priv);
		found = true;
	}

	if (iir & GEN8_DE_EDP_PSR) {
2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629
		u32 psr_iir;
		i915_reg_t iir_reg;

		if (INTEL_GEN(dev_priv) >= 12)
			iir_reg = TRANS_PSR_IIR(dev_priv->psr.transcoder);
		else
			iir_reg = EDP_PSR_IIR;

		psr_iir = I915_READ(iir_reg);
		I915_WRITE(iir_reg, psr_iir);

		if (psr_iir)
			found = true;
2630 2631 2632 2633 2634 2635 2636 2637

		intel_psr_irq_handler(dev_priv, psr_iir);
	}

	if (!found)
		DRM_ERROR("Unexpected DE Misc interrupt\n");
}

2638 2639
static irqreturn_t
gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2640 2641
{
	irqreturn_t ret = IRQ_NONE;
2642
	u32 iir;
2643
	enum pipe pipe;
J
Jesse Barnes 已提交
2644

2645
	if (master_ctl & GEN8_DE_MISC_IRQ) {
2646 2647 2648
		iir = I915_READ(GEN8_DE_MISC_IIR);
		if (iir) {
			I915_WRITE(GEN8_DE_MISC_IIR, iir);
2649
			ret = IRQ_HANDLED;
2650 2651
			gen8_de_misc_irq_handler(dev_priv, iir);
		} else {
2652
			DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2653
		}
2654 2655
	}

2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666
	if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
		iir = I915_READ(GEN11_DE_HPD_IIR);
		if (iir) {
			I915_WRITE(GEN11_DE_HPD_IIR, iir);
			ret = IRQ_HANDLED;
			gen11_hpd_irq_handler(dev_priv, iir);
		} else {
			DRM_ERROR("The master control interrupt lied, (DE HPD)!\n");
		}
	}

2667
	if (master_ctl & GEN8_DE_PORT_IRQ) {
2668 2669 2670
		iir = I915_READ(GEN8_DE_PORT_IIR);
		if (iir) {
			u32 tmp_mask;
2671
			bool found = false;
2672

2673
			I915_WRITE(GEN8_DE_PORT_IIR, iir);
2674
			ret = IRQ_HANDLED;
J
Jesse Barnes 已提交
2675

2676
			if (iir & gen8_de_port_aux_mask(dev_priv)) {
2677
				dp_aux_irq_handler(dev_priv);
2678 2679 2680
				found = true;
			}

2681
			if (IS_GEN9_LP(dev_priv)) {
2682 2683
				tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
				if (tmp_mask) {
2684 2685
					bxt_hpd_irq_handler(dev_priv, tmp_mask,
							    hpd_bxt);
2686 2687 2688 2689 2690
					found = true;
				}
			} else if (IS_BROADWELL(dev_priv)) {
				tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
				if (tmp_mask) {
2691 2692
					ilk_hpd_irq_handler(dev_priv,
							    tmp_mask, hpd_bdw);
2693 2694
					found = true;
				}
2695 2696
			}

2697
			if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
2698
				gmbus_irq_handler(dev_priv);
S
Shashank Sharma 已提交
2699 2700 2701
				found = true;
			}

2702
			if (!found)
2703
				DRM_ERROR("Unexpected DE Port interrupt\n");
2704
		}
2705 2706
		else
			DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2707 2708
	}

2709
	for_each_pipe(dev_priv, pipe) {
2710
		u32 fault_errors;
2711

2712 2713
		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
			continue;
2714

2715 2716 2717 2718 2719
		iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
		if (!iir) {
			DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
			continue;
		}
2720

2721 2722
		ret = IRQ_HANDLED;
		I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
2723

2724 2725
		if (iir & GEN8_PIPE_VBLANK)
			drm_handle_vblank(&dev_priv->drm, pipe);
2726

2727
		if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
2728
			hsw_pipe_crc_irq_handler(dev_priv, pipe);
2729

2730 2731
		if (iir & GEN8_PIPE_FIFO_UNDERRUN)
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2732

2733
		fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv);
2734
		if (fault_errors)
2735
			DRM_ERROR("Fault errors on pipe %c: 0x%08x\n",
2736 2737
				  pipe_name(pipe),
				  fault_errors);
2738 2739
	}

2740
	if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2741
	    master_ctl & GEN8_DE_PCH_IRQ) {
2742 2743 2744 2745 2746
		/*
		 * FIXME(BDW): Assume for now that the new interrupt handling
		 * scheme also closed the SDE interrupt handling race we've seen
		 * on older pch-split platforms. But this needs testing.
		 */
2747 2748 2749
		iir = I915_READ(SDEIIR);
		if (iir) {
			I915_WRITE(SDEIIR, iir);
2750
			ret = IRQ_HANDLED;
2751

2752 2753
			if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
				icp_irq_handler(dev_priv, iir);
2754
			else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
2755
				spt_irq_handler(dev_priv, iir);
2756
			else
2757
				cpt_irq_handler(dev_priv, iir);
2758 2759 2760 2761 2762 2763 2764
		} else {
			/*
			 * Like on previous PCH there seems to be something
			 * fishy going on with forwarding PCH interrupts.
			 */
			DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
		}
2765 2766
	}

2767 2768 2769
	return ret;
}

2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787
static inline u32 gen8_master_intr_disable(void __iomem * const regs)
{
	raw_reg_write(regs, GEN8_MASTER_IRQ, 0);

	/*
	 * Now with master disabled, get a sample of level indications
	 * for this interrupt. Indications will be cleared on related acks.
	 * New indications can and will light up during processing,
	 * and will generate new interrupt after enabling master.
	 */
	return raw_reg_read(regs, GEN8_MASTER_IRQ);
}

static inline void gen8_master_intr_enable(void __iomem * const regs)
{
	raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
}

2788 2789
static irqreturn_t gen8_irq_handler(int irq, void *arg)
{
2790
	struct drm_i915_private *dev_priv = arg;
2791
	void __iomem * const regs = dev_priv->uncore.regs;
2792
	u32 master_ctl;
2793
	u32 gt_iir[4];
2794 2795 2796 2797

	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

2798 2799 2800
	master_ctl = gen8_master_intr_disable(regs);
	if (!master_ctl) {
		gen8_master_intr_enable(regs);
2801
		return IRQ_NONE;
2802
	}
2803 2804

	/* Find, clear, then process each source of interrupt */
2805
	gen8_gt_irq_ack(&dev_priv->gt, master_ctl, gt_iir);
2806 2807 2808

	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
	if (master_ctl & ~GEN8_GT_IRQS) {
2809
		disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2810
		gen8_de_irq_handler(dev_priv, master_ctl);
2811
		enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2812
	}
2813

2814
	gen8_master_intr_enable(regs);
2815

2816
	gen8_gt_irq_handler(&dev_priv->gt, master_ctl, gt_iir);
2817

2818
	return IRQ_HANDLED;
2819 2820
}

2821
static u32
2822
gen11_gu_misc_irq_ack(struct intel_gt *gt, const u32 master_ctl)
2823
{
2824
	void __iomem * const regs = gt->uncore->regs;
2825
	u32 iir;
2826 2827

	if (!(master_ctl & GEN11_GU_MISC_IRQ))
2828 2829 2830 2831 2832
		return 0;

	iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
	if (likely(iir))
		raw_reg_write(regs, GEN11_GU_MISC_IIR, iir);
2833

2834
	return iir;
2835 2836 2837
}

static void
2838
gen11_gu_misc_irq_handler(struct intel_gt *gt, const u32 iir)
2839 2840
{
	if (iir & GEN11_GU_MISC_GSE)
2841
		intel_opregion_asle_intr(gt->i915);
2842 2843
}

2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861
static inline u32 gen11_master_intr_disable(void __iomem * const regs)
{
	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);

	/*
	 * Now with master disabled, get a sample of level indications
	 * for this interrupt. Indications will be cleared on related acks.
	 * New indications can and will light up during processing,
	 * and will generate new interrupt after enabling master.
	 */
	return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
}

static inline void gen11_master_intr_enable(void __iomem * const regs)
{
	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
}

M
Mika Kuoppala 已提交
2862 2863
static irqreturn_t gen11_irq_handler(int irq, void *arg)
{
2864
	struct drm_i915_private * const i915 = arg;
2865
	void __iomem * const regs = i915->uncore.regs;
2866
	struct intel_gt *gt = &i915->gt;
M
Mika Kuoppala 已提交
2867
	u32 master_ctl;
2868
	u32 gu_misc_iir;
M
Mika Kuoppala 已提交
2869 2870 2871 2872

	if (!intel_irqs_enabled(i915))
		return IRQ_NONE;

2873 2874 2875
	master_ctl = gen11_master_intr_disable(regs);
	if (!master_ctl) {
		gen11_master_intr_enable(regs);
M
Mika Kuoppala 已提交
2876
		return IRQ_NONE;
2877
	}
M
Mika Kuoppala 已提交
2878 2879

	/* Find, clear, then process each source of interrupt. */
2880
	gen11_gt_irq_handler(gt, master_ctl);
M
Mika Kuoppala 已提交
2881 2882 2883 2884 2885

	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
	if (master_ctl & GEN11_DISPLAY_IRQ) {
		const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);

2886
		disable_rpm_wakeref_asserts(&i915->runtime_pm);
M
Mika Kuoppala 已提交
2887 2888 2889 2890 2891
		/*
		 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
		 * for the display related bits.
		 */
		gen8_de_irq_handler(i915, disp_ctl);
2892
		enable_rpm_wakeref_asserts(&i915->runtime_pm);
M
Mika Kuoppala 已提交
2893 2894
	}

2895
	gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl);
2896

2897
	gen11_master_intr_enable(regs);
M
Mika Kuoppala 已提交
2898

2899
	gen11_gu_misc_irq_handler(gt, gu_misc_iir);
2900

M
Mika Kuoppala 已提交
2901 2902 2903
	return IRQ_HANDLED;
}

2904 2905 2906
/* Called from drm generic code, passed 'crtc' which
 * we use as a pipe index
 */
2907
int i8xx_enable_vblank(struct drm_crtc *crtc)
2908
{
2909 2910
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2911
	unsigned long irqflags;
2912

2913
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2914
	i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2915
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2916

2917 2918 2919
	return 0;
}

2920
int i915gm_enable_vblank(struct drm_crtc *crtc)
2921
{
2922
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2923

2924 2925 2926 2927 2928 2929 2930 2931
	/*
	 * Vblank interrupts fail to wake the device up from C2+.
	 * Disabling render clock gating during C-states avoids
	 * the problem. There is a small power cost so we do this
	 * only when vblank interrupts are actually enabled.
	 */
	if (dev_priv->vblank_enabled++ == 0)
		I915_WRITE(SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
2932

2933
	return i8xx_enable_vblank(crtc);
2934 2935
}

2936
int i965_enable_vblank(struct drm_crtc *crtc)
2937
{
2938 2939
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2940 2941 2942
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2943 2944
	i915_enable_pipestat(dev_priv, pipe,
			     PIPE_START_VBLANK_INTERRUPT_STATUS);
2945 2946 2947 2948 2949
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);

	return 0;
}

2950
int ilk_enable_vblank(struct drm_crtc *crtc)
J
Jesse Barnes 已提交
2951
{
2952 2953
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
J
Jesse Barnes 已提交
2954
	unsigned long irqflags;
2955
	u32 bit = INTEL_GEN(dev_priv) >= 7 ?
2956
		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
J
Jesse Barnes 已提交
2957 2958

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2959
	ilk_enable_display_irq(dev_priv, bit);
J
Jesse Barnes 已提交
2960 2961
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);

2962 2963 2964 2965
	/* Even though there is no DMC, frame counter can get stuck when
	 * PSR is active as no frames are generated.
	 */
	if (HAS_PSR(dev_priv))
2966
		drm_crtc_vblank_restore(crtc);
2967

J
Jesse Barnes 已提交
2968 2969 2970
	return 0;
}

2971
int bdw_enable_vblank(struct drm_crtc *crtc)
2972
{
2973 2974
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2975 2976 2977
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2978
	bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2979
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2980

2981 2982 2983 2984
	/* Even if there is no DMC, frame counter can get stuck when
	 * PSR is active as no frames are generated, so check only for PSR.
	 */
	if (HAS_PSR(dev_priv))
2985
		drm_crtc_vblank_restore(crtc);
2986

2987 2988 2989
	return 0;
}

2990 2991 2992
/* Called from drm generic code, passed 'crtc' which
 * we use as a pipe index
 */
2993
void i8xx_disable_vblank(struct drm_crtc *crtc)
2994
{
2995 2996
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2997
	unsigned long irqflags;
2998

2999
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3000
	i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
3001 3002 3003
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

3004
void i915gm_disable_vblank(struct drm_crtc *crtc)
3005
{
3006
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
3007

3008
	i8xx_disable_vblank(crtc);
3009

3010 3011
	if (--dev_priv->vblank_enabled == 0)
		I915_WRITE(SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
3012 3013
}

3014
void i965_disable_vblank(struct drm_crtc *crtc)
3015
{
3016 3017
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
3018 3019 3020
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3021 3022
	i915_disable_pipestat(dev_priv, pipe,
			      PIPE_START_VBLANK_INTERRUPT_STATUS);
3023 3024 3025
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

3026
void ilk_disable_vblank(struct drm_crtc *crtc)
J
Jesse Barnes 已提交
3027
{
3028 3029
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
J
Jesse Barnes 已提交
3030
	unsigned long irqflags;
3031
	u32 bit = INTEL_GEN(dev_priv) >= 7 ?
3032
		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
J
Jesse Barnes 已提交
3033 3034

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3035
	ilk_disable_display_irq(dev_priv, bit);
J
Jesse Barnes 已提交
3036 3037 3038
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

3039
void bdw_disable_vblank(struct drm_crtc *crtc)
3040
{
3041 3042
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
3043 3044 3045
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3046
	bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
3047 3048 3049
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}

3050
static void ibx_irq_reset(struct drm_i915_private *dev_priv)
P
Paulo Zanoni 已提交
3051
{
3052 3053
	struct intel_uncore *uncore = &dev_priv->uncore;

3054
	if (HAS_PCH_NOP(dev_priv))
P
Paulo Zanoni 已提交
3055 3056
		return;

3057
	GEN3_IRQ_RESET(uncore, SDE);
3058

3059
	if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3060
		I915_WRITE(SERR_INT, 0xffffffff);
P
Paulo Zanoni 已提交
3061
}
3062

P
Paulo Zanoni 已提交
3063 3064 3065 3066 3067 3068 3069 3070
/*
 * SDEIER is also touched by the interrupt handler to work around missed PCH
 * interrupts. Hence we can't update it after the interrupt handler is enabled -
 * instead we unconditionally enable all PCH interrupt sources here, but then
 * only unmask them as needed with SDEIMR.
 *
 * This function needs to be called before interrupts are enabled.
 */
3071
static void ibx_irq_pre_postinstall(struct drm_i915_private *dev_priv)
P
Paulo Zanoni 已提交
3072
{
3073
	if (HAS_PCH_NOP(dev_priv))
P
Paulo Zanoni 已提交
3074 3075 3076
		return;

	WARN_ON(I915_READ(SDEIER) != 0);
P
Paulo Zanoni 已提交
3077 3078 3079 3080
	I915_WRITE(SDEIER, 0xffffffff);
	POSTING_READ(SDEIER);
}

3081 3082
static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
{
3083 3084
	struct intel_uncore *uncore = &dev_priv->uncore;

3085
	if (IS_CHERRYVIEW(dev_priv))
3086
		intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3087
	else
3088
		intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK);
3089

3090
	i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
3091
	intel_uncore_write(uncore, PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3092

3093
	i9xx_pipestat_irq_reset(dev_priv);
3094

3095
	GEN3_IRQ_RESET(uncore, VLV_);
3096
	dev_priv->irq_mask = ~0u;
3097 3098
}

3099 3100
static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
{
3101 3102
	struct intel_uncore *uncore = &dev_priv->uncore;

3103
	u32 pipestat_mask;
3104
	u32 enable_mask;
3105 3106
	enum pipe pipe;

3107
	pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
3108 3109 3110 3111 3112

	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
	for_each_pipe(dev_priv, pipe)
		i915_enable_pipestat(dev_priv, pipe, pipestat_mask);

3113 3114
	enable_mask = I915_DISPLAY_PORT_INTERRUPT |
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3115 3116 3117 3118
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
		I915_LPE_PIPE_A_INTERRUPT |
		I915_LPE_PIPE_B_INTERRUPT;

3119
	if (IS_CHERRYVIEW(dev_priv))
3120 3121
		enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
			I915_LPE_PIPE_C_INTERRUPT;
3122

3123
	WARN_ON(dev_priv->irq_mask != ~0u);
3124

3125 3126
	dev_priv->irq_mask = ~enable_mask;

3127
	GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask);
3128 3129 3130 3131
}

/* drm_dma.h hooks
*/
3132
static void ironlake_irq_reset(struct drm_i915_private *dev_priv)
3133
{
3134
	struct intel_uncore *uncore = &dev_priv->uncore;
3135

3136
	GEN3_IRQ_RESET(uncore, DE);
3137
	if (IS_GEN(dev_priv, 7))
3138
		intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff);
3139

3140
	if (IS_HASWELL(dev_priv)) {
3141 3142
		intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
		intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
3143 3144
	}

3145
	gen5_gt_irq_reset(&dev_priv->gt);
3146

3147
	ibx_irq_reset(dev_priv);
3148 3149
}

3150
static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
J
Jesse Barnes 已提交
3151
{
3152 3153 3154
	I915_WRITE(VLV_MASTER_IER, 0);
	POSTING_READ(VLV_MASTER_IER);

3155
	gen5_gt_irq_reset(&dev_priv->gt);
J
Jesse Barnes 已提交
3156

3157
	spin_lock_irq(&dev_priv->irq_lock);
3158 3159
	if (dev_priv->display_irqs_enabled)
		vlv_display_irq_reset(dev_priv);
3160
	spin_unlock_irq(&dev_priv->irq_lock);
J
Jesse Barnes 已提交
3161 3162
}

3163
static void gen8_irq_reset(struct drm_i915_private *dev_priv)
3164
{
3165
	struct intel_uncore *uncore = &dev_priv->uncore;
3166
	enum pipe pipe;
3167

3168
	gen8_master_intr_disable(dev_priv->uncore.regs);
3169

3170
	gen8_gt_irq_reset(&dev_priv->gt);
3171

3172 3173
	intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
	intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
3174

3175
	for_each_pipe(dev_priv, pipe)
3176 3177
		if (intel_display_power_is_enabled(dev_priv,
						   POWER_DOMAIN_PIPE(pipe)))
3178
			GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3179

3180 3181 3182
	GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
	GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3183

3184
	if (HAS_PCH_SPLIT(dev_priv))
3185
		ibx_irq_reset(dev_priv);
3186
}
3187

3188
static void gen11_irq_reset(struct drm_i915_private *dev_priv)
M
Mika Kuoppala 已提交
3189
{
3190
	struct intel_uncore *uncore = &dev_priv->uncore;
3191
	enum pipe pipe;
M
Mika Kuoppala 已提交
3192

3193
	gen11_master_intr_disable(dev_priv->uncore.regs);
M
Mika Kuoppala 已提交
3194

3195
	gen11_gt_irq_reset(&dev_priv->gt);
M
Mika Kuoppala 已提交
3196

3197
	intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0);
M
Mika Kuoppala 已提交
3198

3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215
	if (INTEL_GEN(dev_priv) >= 12) {
		enum transcoder trans;

		for (trans = TRANSCODER_A; trans <= TRANSCODER_D; trans++) {
			enum intel_display_power_domain domain;

			domain = POWER_DOMAIN_TRANSCODER(trans);
			if (!intel_display_power_is_enabled(dev_priv, domain))
				continue;

			intel_uncore_write(uncore, TRANS_PSR_IMR(trans), 0xffffffff);
			intel_uncore_write(uncore, TRANS_PSR_IIR(trans), 0xffffffff);
		}
	} else {
		intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
		intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
	}
3216

M
Mika Kuoppala 已提交
3217 3218 3219
	for_each_pipe(dev_priv, pipe)
		if (intel_display_power_is_enabled(dev_priv,
						   POWER_DOMAIN_PIPE(pipe)))
3220
			GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
M
Mika Kuoppala 已提交
3221

3222 3223 3224 3225 3226
	GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
	GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
	GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_);
	GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3227

3228
	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3229
		GEN3_IRQ_RESET(uncore, SDE);
M
Mika Kuoppala 已提交
3230 3231
}

3232
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3233
				     u8 pipe_mask)
3234
{
3235 3236
	struct intel_uncore *uncore = &dev_priv->uncore;

3237
	u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3238
	enum pipe pipe;
3239

3240
	spin_lock_irq(&dev_priv->irq_lock);
3241 3242 3243 3244 3245 3246

	if (!intel_irqs_enabled(dev_priv)) {
		spin_unlock_irq(&dev_priv->irq_lock);
		return;
	}

3247
	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3248
		GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
3249 3250
				  dev_priv->de_irq_mask[pipe],
				  ~dev_priv->de_irq_mask[pipe] | extra_ier);
3251

3252
	spin_unlock_irq(&dev_priv->irq_lock);
3253 3254
}

3255
void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
3256
				     u8 pipe_mask)
3257
{
3258
	struct intel_uncore *uncore = &dev_priv->uncore;
3259 3260
	enum pipe pipe;

3261
	spin_lock_irq(&dev_priv->irq_lock);
3262 3263 3264 3265 3266 3267

	if (!intel_irqs_enabled(dev_priv)) {
		spin_unlock_irq(&dev_priv->irq_lock);
		return;
	}

3268
	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3269
		GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3270

3271 3272 3273
	spin_unlock_irq(&dev_priv->irq_lock);

	/* make sure we're done processing display irqs */
3274
	intel_synchronize_irq(dev_priv);
3275 3276
}

3277
static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
3278
{
3279
	struct intel_uncore *uncore = &dev_priv->uncore;
3280 3281 3282 3283

	I915_WRITE(GEN8_MASTER_IRQ, 0);
	POSTING_READ(GEN8_MASTER_IRQ);

3284
	gen8_gt_irq_reset(&dev_priv->gt);
3285

3286
	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3287

3288
	spin_lock_irq(&dev_priv->irq_lock);
3289 3290
	if (dev_priv->display_irqs_enabled)
		vlv_display_irq_reset(dev_priv);
3291
	spin_unlock_irq(&dev_priv->irq_lock);
3292 3293
}

3294
static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
3295 3296 3297 3298 3299
				  const u32 hpd[HPD_NUM_PINS])
{
	struct intel_encoder *encoder;
	u32 enabled_irqs = 0;

3300
	for_each_intel_encoder(&dev_priv->drm, encoder)
3301 3302 3303 3304 3305 3306
		if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
			enabled_irqs |= hpd[encoder->hpd_pin];

	return enabled_irqs;
}

3307
static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
3308
{
3309
	u32 hotplug;
3310 3311 3312

	/*
	 * Enable digital hotplug on the PCH, and configure the DP short pulse
3313 3314
	 * duration to 2ms (which is the minimum in the Display Port spec).
	 * The pulse duration bits are reserved on LPT+.
3315
	 */
3316
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3317 3318 3319
	hotplug &= ~(PORTB_PULSE_DURATION_MASK |
		     PORTC_PULSE_DURATION_MASK |
		     PORTD_PULSE_DURATION_MASK);
3320
	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3321 3322
	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3323 3324 3325 3326
	/*
	 * When CPU and PCH are on the same package, port A
	 * HPD must be enabled in both north and south.
	 */
3327
	if (HAS_PCH_LPT_LP(dev_priv))
3328
		hotplug |= PORTA_HOTPLUG_ENABLE;
3329
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3330
}
X
Xiong Zhang 已提交
3331

3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348
static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug_irqs, enabled_irqs;

	if (HAS_PCH_IBX(dev_priv)) {
		hotplug_irqs = SDE_HOTPLUG_MASK;
		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
	} else {
		hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
	}

	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);

	ibx_hpd_detection_setup(dev_priv);
}

3349 3350 3351
static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv,
				    u32 ddi_hotplug_enable_mask,
				    u32 tc_hotplug_enable_mask)
3352 3353 3354 3355
{
	u32 hotplug;

	hotplug = I915_READ(SHOTPLUG_CTL_DDI);
3356
	hotplug |= ddi_hotplug_enable_mask;
3357 3358
	I915_WRITE(SHOTPLUG_CTL_DDI, hotplug);

3359 3360 3361 3362 3363
	if (tc_hotplug_enable_mask) {
		hotplug = I915_READ(SHOTPLUG_CTL_TC);
		hotplug |= tc_hotplug_enable_mask;
		I915_WRITE(SHOTPLUG_CTL_TC, hotplug);
	}
3364 3365
}

3366 3367 3368 3369
static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv,
			      u32 sde_ddi_mask, u32 sde_tc_mask,
			      u32 ddi_enable_mask, u32 tc_enable_mask,
			      const u32 *pins)
3370 3371 3372
{
	u32 hotplug_irqs, enabled_irqs;

3373 3374
	hotplug_irqs = sde_ddi_mask | sde_tc_mask;
	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, pins);
3375 3376 3377

	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);

3378
	icp_hpd_detection_setup(dev_priv, ddi_enable_mask, tc_enable_mask);
3379 3380
}

3381 3382 3383 3384
/*
 * EHL doesn't need most of gen11_hpd_irq_setup, it's handling only the
 * equivalent of SDE.
 */
3385 3386
static void mcc_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
3387
	icp_hpd_irq_setup(dev_priv,
3388 3389
			  SDE_DDI_MASK_ICP, SDE_TC_HOTPLUG_ICP(PORT_TC1),
			  ICP_DDI_HPD_ENABLE_MASK, ICP_TC_HPD_ENABLE(PORT_TC1),
3390
			  hpd_icp);
3391 3392
}

M
Matt Roper 已提交
3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405
/*
 * JSP behaves exactly the same as MCC above except that port C is mapped to
 * the DDI-C pins instead of the TC1 pins.  This means we should follow TGP's
 * masks & tables rather than ICP's masks & tables.
 */
static void jsp_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
	icp_hpd_irq_setup(dev_priv,
			  SDE_DDI_MASK_TGP, 0,
			  TGP_DDI_HPD_ENABLE_MASK, 0,
			  hpd_tgp);
}

3406 3407 3408 3409 3410 3411 3412 3413 3414 3415
static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug;

	hotplug = I915_READ(GEN11_TC_HOTPLUG_CTL);
	hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
	I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug);
3416 3417 3418 3419 3420 3421 3422

	hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL);
	hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
	I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug);
3423 3424 3425 3426 3427
}

static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug_irqs, enabled_irqs;
3428
	const u32 *hpd;
3429 3430
	u32 val;

3431 3432
	hpd = INTEL_GEN(dev_priv) >= 12 ? hpd_gen12 : hpd_gen11;
	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd);
3433
	hotplug_irqs = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK;
3434 3435 3436 3437 3438 3439 3440

	val = I915_READ(GEN11_DE_HPD_IMR);
	val &= ~hotplug_irqs;
	I915_WRITE(GEN11_DE_HPD_IMR, val);
	POSTING_READ(GEN11_DE_HPD_IMR);

	gen11_hpd_detection_setup(dev_priv);
3441

3442
	if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP)
3443 3444 3445
		icp_hpd_irq_setup(dev_priv, SDE_DDI_MASK_TGP, SDE_TC_MASK_TGP,
				  TGP_DDI_HPD_ENABLE_MASK,
				  TGP_TC_HPD_ENABLE_MASK, hpd_tgp);
3446
	else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3447 3448 3449
		icp_hpd_irq_setup(dev_priv, SDE_DDI_MASK_ICP, SDE_TC_MASK_ICP,
				  ICP_DDI_HPD_ENABLE_MASK,
				  ICP_TC_HPD_ENABLE_MASK, hpd_icp);
3450 3451
}

3452
static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3453
{
3454 3455 3456 3457 3458 3459 3460 3461 3462
	u32 val, hotplug;

	/* Display WA #1179 WaHardHangonHotPlug: cnp */
	if (HAS_PCH_CNP(dev_priv)) {
		val = I915_READ(SOUTH_CHICKEN1);
		val &= ~CHASSIS_CLK_REQ_DURATION_MASK;
		val |= CHASSIS_CLK_REQ_DURATION(0xf);
		I915_WRITE(SOUTH_CHICKEN1, val);
	}
3463 3464 3465

	/* Enable digital hotplug on the PCH */
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3466 3467 3468 3469
	hotplug |= PORTA_HOTPLUG_ENABLE |
		   PORTB_HOTPLUG_ENABLE |
		   PORTC_HOTPLUG_ENABLE |
		   PORTD_HOTPLUG_ENABLE;
3470 3471 3472 3473 3474
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);

	hotplug = I915_READ(PCH_PORT_HOTPLUG2);
	hotplug |= PORTE_HOTPLUG_ENABLE;
	I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3475 3476
}

3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488
static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug_irqs, enabled_irqs;

	hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);

	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);

	spt_hpd_detection_setup(dev_priv);
}

3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504
static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug;

	/*
	 * Enable digital hotplug on the CPU, and configure the DP short pulse
	 * duration to 2ms (which is the minimum in the Display Port spec)
	 * The pulse duration bits are reserved on HSW+.
	 */
	hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
	hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
	hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE |
		   DIGITAL_PORTA_PULSE_DURATION_2ms;
	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
}

3505
static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3506
{
3507
	u32 hotplug_irqs, enabled_irqs;
3508

3509
	if (INTEL_GEN(dev_priv) >= 8) {
3510
		hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
3511
		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw);
3512 3513

		bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3514
	} else if (INTEL_GEN(dev_priv) >= 7) {
3515
		hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
3516
		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb);
3517 3518

		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3519 3520
	} else {
		hotplug_irqs = DE_DP_A_HOTPLUG;
3521
		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk);
3522

3523 3524
		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
	}
3525

3526
	ilk_hpd_detection_setup(dev_priv);
3527

3528
	ibx_hpd_irq_setup(dev_priv);
3529 3530
}

3531 3532
static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv,
				      u32 enabled_irqs)
3533
{
3534
	u32 hotplug;
3535

3536
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3537 3538 3539
	hotplug |= PORTA_HOTPLUG_ENABLE |
		   PORTB_HOTPLUG_ENABLE |
		   PORTC_HOTPLUG_ENABLE;
3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558

	DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n",
		      hotplug, enabled_irqs);
	hotplug &= ~BXT_DDI_HPD_INVERT_MASK;

	/*
	 * For BXT invert bit has to be set based on AOB design
	 * for HPD detection logic, update it based on VBT fields.
	 */
	if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
		hotplug |= BXT_DDIA_HPD_INVERT;
	if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) &&
	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_B))
		hotplug |= BXT_DDIB_HPD_INVERT;
	if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) &&
	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_C))
		hotplug |= BXT_DDIC_HPD_INVERT;

3559
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3560 3561
}

3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578
static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
	__bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK);
}

static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
	u32 hotplug_irqs, enabled_irqs;

	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
	hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;

	bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);

	__bxt_hpd_detection_setup(dev_priv, enabled_irqs);
}

3579
static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
P
Paulo Zanoni 已提交
3580
{
3581
	u32 mask;
3582

3583
	if (HAS_PCH_NOP(dev_priv))
D
Daniel Vetter 已提交
3584 3585
		return;

3586
	if (HAS_PCH_IBX(dev_priv))
3587
		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3588
	else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3589
		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3590 3591
	else
		mask = SDE_GMBUS_CPT;
3592

3593
	gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
P
Paulo Zanoni 已提交
3594
	I915_WRITE(SDEIMR, ~mask);
3595 3596 3597

	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
	    HAS_PCH_LPT(dev_priv))
3598
		ibx_hpd_detection_setup(dev_priv);
3599 3600
	else
		spt_hpd_detection_setup(dev_priv);
P
Paulo Zanoni 已提交
3601 3602
}

3603
static void ironlake_irq_postinstall(struct drm_i915_private *dev_priv)
3604
{
3605
	struct intel_uncore *uncore = &dev_priv->uncore;
3606 3607
	u32 display_mask, extra_mask;

3608
	if (INTEL_GEN(dev_priv) >= 7) {
3609
		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3610
				DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
3611
		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3612 3613
			      DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
			      DE_DP_A_HOTPLUG_IVB);
3614 3615
	} else {
		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3616 3617
				DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
				DE_PIPEA_CRC_DONE | DE_POISON);
3618 3619 3620
		extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
			      DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
			      DE_DP_A_HOTPLUG);
3621
	}
3622

3623
	if (IS_HASWELL(dev_priv)) {
3624
		gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
3625 3626 3627
		display_mask |= DE_EDP_PSR_INT_HSW;
	}

3628
	dev_priv->irq_mask = ~display_mask;
3629

3630
	ibx_irq_pre_postinstall(dev_priv);
P
Paulo Zanoni 已提交
3631

3632 3633
	GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
		      display_mask | extra_mask);
3634

3635
	gen5_gt_irq_postinstall(&dev_priv->gt);
3636

3637 3638
	ilk_hpd_detection_setup(dev_priv);

3639
	ibx_irq_postinstall(dev_priv);
3640

3641
	if (IS_IRONLAKE_M(dev_priv)) {
3642 3643 3644
		/* Enable PCU event interrupts
		 *
		 * spinlocking not required here for correctness since interrupt
3645 3646
		 * setup is guaranteed to run in single-threaded context. But we
		 * need it to make the assert_spin_locked happy. */
3647
		spin_lock_irq(&dev_priv->irq_lock);
3648
		ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
3649
		spin_unlock_irq(&dev_priv->irq_lock);
3650
	}
3651 3652
}

3653 3654
void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
{
3655
	lockdep_assert_held(&dev_priv->irq_lock);
3656 3657 3658 3659 3660 3661

	if (dev_priv->display_irqs_enabled)
		return;

	dev_priv->display_irqs_enabled = true;

3662 3663
	if (intel_irqs_enabled(dev_priv)) {
		vlv_display_irq_reset(dev_priv);
3664
		vlv_display_irq_postinstall(dev_priv);
3665
	}
3666 3667 3668 3669
}

void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
{
3670
	lockdep_assert_held(&dev_priv->irq_lock);
3671 3672 3673 3674 3675 3676

	if (!dev_priv->display_irqs_enabled)
		return;

	dev_priv->display_irqs_enabled = false;

3677
	if (intel_irqs_enabled(dev_priv))
3678
		vlv_display_irq_reset(dev_priv);
3679 3680
}

3681

3682
static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
3683
{
3684
	gen5_gt_irq_postinstall(&dev_priv->gt);
J
Jesse Barnes 已提交
3685

3686
	spin_lock_irq(&dev_priv->irq_lock);
3687 3688
	if (dev_priv->display_irqs_enabled)
		vlv_display_irq_postinstall(dev_priv);
3689 3690
	spin_unlock_irq(&dev_priv->irq_lock);

J
Jesse Barnes 已提交
3691
	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3692
	POSTING_READ(VLV_MASTER_IER);
3693 3694
}

3695 3696
static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
{
3697 3698
	struct intel_uncore *uncore = &dev_priv->uncore;

3699 3700
	u32 de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
	u32 de_pipe_enables;
3701 3702
	u32 de_port_masked = GEN8_AUX_CHANNEL_A;
	u32 de_port_enables;
3703
	u32 de_misc_masked = GEN8_DE_EDP_PSR;
3704
	enum pipe pipe;
3705

3706 3707 3708
	if (INTEL_GEN(dev_priv) <= 10)
		de_misc_masked |= GEN8_DE_MISC_GSE;

3709
	if (INTEL_GEN(dev_priv) >= 9) {
3710
		de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3711 3712
		de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
				  GEN9_AUX_CHANNEL_D;
3713
		if (IS_GEN9_LP(dev_priv))
3714 3715
			de_port_masked |= BXT_DE_PORT_GMBUS;
	} else {
3716
		de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3717
	}
3718

3719 3720 3721
	if (INTEL_GEN(dev_priv) >= 11)
		de_port_masked |= ICL_AUX_CHANNEL_E;

3722
	if (IS_CNL_WITH_PORT_F(dev_priv) || INTEL_GEN(dev_priv) >= 11)
R
Rodrigo Vivi 已提交
3723 3724
		de_port_masked |= CNL_AUX_CHANNEL_F;

3725 3726 3727
	de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
					   GEN8_PIPE_FIFO_UNDERRUN;

3728
	de_port_enables = de_port_masked;
3729
	if (IS_GEN9_LP(dev_priv))
3730 3731
		de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
	else if (IS_BROADWELL(dev_priv))
3732 3733
		de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;

3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748
	if (INTEL_GEN(dev_priv) >= 12) {
		enum transcoder trans;

		for (trans = TRANSCODER_A; trans <= TRANSCODER_D; trans++) {
			enum intel_display_power_domain domain;

			domain = POWER_DOMAIN_TRANSCODER(trans);
			if (!intel_display_power_is_enabled(dev_priv, domain))
				continue;

			gen3_assert_iir_is_zero(uncore, TRANS_PSR_IIR(trans));
		}
	} else {
		gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
	}
3749

M
Mika Kahola 已提交
3750 3751
	for_each_pipe(dev_priv, pipe) {
		dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
3752

3753
		if (intel_display_power_is_enabled(dev_priv,
3754
				POWER_DOMAIN_PIPE(pipe)))
3755
			GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
3756 3757
					  dev_priv->de_irq_mask[pipe],
					  de_pipe_enables);
M
Mika Kahola 已提交
3758
	}
3759

3760 3761
	GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
	GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
3762

3763 3764
	if (INTEL_GEN(dev_priv) >= 11) {
		u32 de_hpd_masked = 0;
3765 3766
		u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
				     GEN11_DE_TBT_HOTPLUG_MASK;
3767

3768 3769
		GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked,
			      de_hpd_enables);
3770 3771
		gen11_hpd_detection_setup(dev_priv);
	} else if (IS_GEN9_LP(dev_priv)) {
3772
		bxt_hpd_detection_setup(dev_priv);
3773
	} else if (IS_BROADWELL(dev_priv)) {
3774
		ilk_hpd_detection_setup(dev_priv);
3775
	}
3776 3777
}

3778
static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
3779
{
3780
	if (HAS_PCH_SPLIT(dev_priv))
3781
		ibx_irq_pre_postinstall(dev_priv);
P
Paulo Zanoni 已提交
3782

3783
	gen8_gt_irq_postinstall(&dev_priv->gt);
3784 3785
	gen8_de_irq_postinstall(dev_priv);

3786
	if (HAS_PCH_SPLIT(dev_priv))
3787
		ibx_irq_postinstall(dev_priv);
3788

3789
	gen8_master_intr_enable(dev_priv->uncore.regs);
3790 3791
}

3792
static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
3793 3794 3795 3796 3797 3798 3799
{
	u32 mask = SDE_GMBUS_ICP;

	WARN_ON(I915_READ(SDEIER) != 0);
	I915_WRITE(SDEIER, 0xffffffff);
	POSTING_READ(SDEIER);

3800
	gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
3801 3802
	I915_WRITE(SDEIMR, ~mask);

3803 3804 3805
	if (HAS_PCH_TGP(dev_priv))
		icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK,
					TGP_TC_HPD_ENABLE_MASK);
3806
	else if (HAS_PCH_JSP(dev_priv))
3807
		icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK, 0);
3808 3809 3810
	else if (HAS_PCH_MCC(dev_priv))
		icp_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK,
					ICP_TC_HPD_ENABLE(PORT_TC1));
3811 3812 3813
	else
		icp_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK,
					ICP_TC_HPD_ENABLE_MASK);
3814 3815
}

3816
static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
M
Mika Kuoppala 已提交
3817
{
3818
	struct intel_uncore *uncore = &dev_priv->uncore;
3819
	u32 gu_misc_masked = GEN11_GU_MISC_GSE;
M
Mika Kuoppala 已提交
3820

3821
	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3822
		icp_irq_postinstall(dev_priv);
3823

3824
	gen11_gt_irq_postinstall(&dev_priv->gt);
M
Mika Kuoppala 已提交
3825 3826
	gen8_de_irq_postinstall(dev_priv);

3827
	GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
3828

M
Mika Kuoppala 已提交
3829 3830
	I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);

3831
	gen11_master_intr_enable(uncore->regs);
3832
	POSTING_READ(GEN11_GFX_MSTR_IRQ);
M
Mika Kuoppala 已提交
3833 3834
}

3835
static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
3836
{
3837
	gen8_gt_irq_postinstall(&dev_priv->gt);
3838

3839
	spin_lock_irq(&dev_priv->irq_lock);
3840 3841
	if (dev_priv->display_irqs_enabled)
		vlv_display_irq_postinstall(dev_priv);
3842 3843
	spin_unlock_irq(&dev_priv->irq_lock);

3844
	I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3845 3846 3847
	POSTING_READ(GEN8_MASTER_IRQ);
}

3848
static void i8xx_irq_reset(struct drm_i915_private *dev_priv)
L
Linus Torvalds 已提交
3849
{
3850
	struct intel_uncore *uncore = &dev_priv->uncore;
3851

3852 3853
	i9xx_pipestat_irq_reset(dev_priv);

3854
	GEN2_IRQ_RESET(uncore);
C
Chris Wilson 已提交
3855 3856
}

3857
static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv)
C
Chris Wilson 已提交
3858
{
3859
	struct intel_uncore *uncore = &dev_priv->uncore;
3860
	u16 enable_mask;
C
Chris Wilson 已提交
3861

3862 3863 3864 3865
	intel_uncore_write16(uncore,
			     EMR,
			     ~(I915_ERROR_PAGE_TABLE |
			       I915_ERROR_MEMORY_REFRESH));
C
Chris Wilson 已提交
3866 3867 3868 3869

	/* Unmask the interrupts that we always want on. */
	dev_priv->irq_mask =
		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3870 3871
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
		  I915_MASTER_ERROR_INTERRUPT);
C
Chris Wilson 已提交
3872

3873 3874 3875
	enable_mask =
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3876
		I915_MASTER_ERROR_INTERRUPT |
3877 3878
		I915_USER_INTERRUPT;

3879
	GEN2_IRQ_INIT(uncore, dev_priv->irq_mask, enable_mask);
C
Chris Wilson 已提交
3880

3881 3882
	/* Interrupt setup is already guaranteed to be single-threaded, this is
	 * just to make the assert_spin_locked check happy. */
3883
	spin_lock_irq(&dev_priv->irq_lock);
3884 3885
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3886
	spin_unlock_irq(&dev_priv->irq_lock);
C
Chris Wilson 已提交
3887 3888
}

3889
static void i8xx_error_irq_ack(struct drm_i915_private *i915,
3890 3891
			       u16 *eir, u16 *eir_stuck)
{
3892
	struct intel_uncore *uncore = &i915->uncore;
3893 3894
	u16 emr;

3895
	*eir = intel_uncore_read16(uncore, EIR);
3896 3897

	if (*eir)
3898
		intel_uncore_write16(uncore, EIR, *eir);
3899

3900
	*eir_stuck = intel_uncore_read16(uncore, EIR);
3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913
	if (*eir_stuck == 0)
		return;

	/*
	 * Toggle all EMR bits to make sure we get an edge
	 * in the ISR master error bit if we don't clear
	 * all the EIR bits. Otherwise the edge triggered
	 * IIR on i965/g4x wouldn't notice that an interrupt
	 * is still pending. Also some EIR bits can't be
	 * cleared except by handling the underlying error
	 * (or by a GPU reset) so we mask any bit that
	 * remains set.
	 */
3914 3915 3916
	emr = intel_uncore_read16(uncore, EMR);
	intel_uncore_write16(uncore, EMR, 0xffff);
	intel_uncore_write16(uncore, EMR, emr | *eir_stuck);
3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964
}

static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
				   u16 eir, u16 eir_stuck)
{
	DRM_DEBUG("Master Error: EIR 0x%04x\n", eir);

	if (eir_stuck)
		DRM_DEBUG_DRIVER("EIR stuck: 0x%04x, masked\n", eir_stuck);
}

static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
			       u32 *eir, u32 *eir_stuck)
{
	u32 emr;

	*eir = I915_READ(EIR);

	I915_WRITE(EIR, *eir);

	*eir_stuck = I915_READ(EIR);
	if (*eir_stuck == 0)
		return;

	/*
	 * Toggle all EMR bits to make sure we get an edge
	 * in the ISR master error bit if we don't clear
	 * all the EIR bits. Otherwise the edge triggered
	 * IIR on i965/g4x wouldn't notice that an interrupt
	 * is still pending. Also some EIR bits can't be
	 * cleared except by handling the underlying error
	 * (or by a GPU reset) so we mask any bit that
	 * remains set.
	 */
	emr = I915_READ(EMR);
	I915_WRITE(EMR, 0xffffffff);
	I915_WRITE(EMR, emr | *eir_stuck);
}

static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
				   u32 eir, u32 eir_stuck)
{
	DRM_DEBUG("Master Error, EIR 0x%08x\n", eir);

	if (eir_stuck)
		DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masked\n", eir_stuck);
}

3965
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
C
Chris Wilson 已提交
3966
{
3967
	struct drm_i915_private *dev_priv = arg;
3968
	irqreturn_t ret = IRQ_NONE;
C
Chris Wilson 已提交
3969

3970 3971 3972
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

3973
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
3974
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3975

3976
	do {
3977
		u32 pipe_stats[I915_MAX_PIPES] = {};
3978
		u16 eir = 0, eir_stuck = 0;
3979
		u16 iir;
3980

3981
		iir = intel_uncore_read16(&dev_priv->uncore, GEN2_IIR);
3982 3983 3984 3985
		if (iir == 0)
			break;

		ret = IRQ_HANDLED;
C
Chris Wilson 已提交
3986

3987 3988 3989
		/* Call regardless, as some status bits might not be
		 * signalled in iir */
		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
C
Chris Wilson 已提交
3990

3991 3992 3993
		if (iir & I915_MASTER_ERROR_INTERRUPT)
			i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);

3994
		intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir);
C
Chris Wilson 已提交
3995 3996

		if (iir & I915_USER_INTERRUPT)
3997
			intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
C
Chris Wilson 已提交
3998

3999 4000
		if (iir & I915_MASTER_ERROR_INTERRUPT)
			i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
C
Chris Wilson 已提交
4001

4002 4003
		i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
	} while (0);
4004

4005
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
C
Chris Wilson 已提交
4006

4007
	return ret;
C
Chris Wilson 已提交
4008 4009
}

4010
static void i915_irq_reset(struct drm_i915_private *dev_priv)
4011
{
4012
	struct intel_uncore *uncore = &dev_priv->uncore;
4013

4014
	if (I915_HAS_HOTPLUG(dev_priv)) {
4015
		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4016 4017 4018
		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
	}

4019 4020
	i9xx_pipestat_irq_reset(dev_priv);

4021
	GEN3_IRQ_RESET(uncore, GEN2_);
4022 4023
}

4024
static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
4025
{
4026
	struct intel_uncore *uncore = &dev_priv->uncore;
4027
	u32 enable_mask;
4028

4029 4030
	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE |
			  I915_ERROR_MEMORY_REFRESH));
4031 4032 4033 4034 4035

	/* Unmask the interrupts that we always want on. */
	dev_priv->irq_mask =
		~(I915_ASLE_INTERRUPT |
		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4036 4037
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
		  I915_MASTER_ERROR_INTERRUPT);
4038 4039 4040 4041 4042

	enable_mask =
		I915_ASLE_INTERRUPT |
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4043
		I915_MASTER_ERROR_INTERRUPT |
4044 4045
		I915_USER_INTERRUPT;

4046
	if (I915_HAS_HOTPLUG(dev_priv)) {
4047 4048 4049 4050 4051 4052
		/* Enable in IER... */
		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
		/* and unmask in IMR */
		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
	}

4053
	GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
4054

4055 4056
	/* Interrupt setup is already guaranteed to be single-threaded, this is
	 * just to make the assert_spin_locked check happy. */
4057
	spin_lock_irq(&dev_priv->irq_lock);
4058 4059
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4060
	spin_unlock_irq(&dev_priv->irq_lock);
4061

4062
	i915_enable_asle_pipestat(dev_priv);
4063 4064
}

4065
static irqreturn_t i915_irq_handler(int irq, void *arg)
4066
{
4067
	struct drm_i915_private *dev_priv = arg;
4068
	irqreturn_t ret = IRQ_NONE;
4069

4070 4071 4072
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

4073
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
4074
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4075

4076
	do {
4077
		u32 pipe_stats[I915_MAX_PIPES] = {};
4078
		u32 eir = 0, eir_stuck = 0;
4079 4080
		u32 hotplug_status = 0;
		u32 iir;
4081

4082
		iir = I915_READ(GEN2_IIR);
4083 4084 4085 4086 4087 4088 4089 4090
		if (iir == 0)
			break;

		ret = IRQ_HANDLED;

		if (I915_HAS_HOTPLUG(dev_priv) &&
		    iir & I915_DISPLAY_PORT_INTERRUPT)
			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4091

4092 4093 4094
		/* Call regardless, as some status bits might not be
		 * signalled in iir */
		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4095

4096 4097 4098
		if (iir & I915_MASTER_ERROR_INTERRUPT)
			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);

4099
		I915_WRITE(GEN2_IIR, iir);
4100 4101

		if (iir & I915_USER_INTERRUPT)
4102
			intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
4103

4104 4105
		if (iir & I915_MASTER_ERROR_INTERRUPT)
			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4106

4107 4108 4109 4110 4111
		if (hotplug_status)
			i9xx_hpd_irq_handler(dev_priv, hotplug_status);

		i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
	} while (0);
4112

4113
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4114

4115 4116 4117
	return ret;
}

4118
static void i965_irq_reset(struct drm_i915_private *dev_priv)
4119
{
4120
	struct intel_uncore *uncore = &dev_priv->uncore;
4121

4122
	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4123
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4124

4125 4126
	i9xx_pipestat_irq_reset(dev_priv);

4127
	GEN3_IRQ_RESET(uncore, GEN2_);
4128 4129
}

4130
static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
4131
{
4132
	struct intel_uncore *uncore = &dev_priv->uncore;
4133
	u32 enable_mask;
4134 4135
	u32 error_mask;

4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150
	/*
	 * Enable some error detection, note the instruction error mask
	 * bit is reserved, so we leave it masked.
	 */
	if (IS_G4X(dev_priv)) {
		error_mask = ~(GM45_ERROR_PAGE_TABLE |
			       GM45_ERROR_MEM_PRIV |
			       GM45_ERROR_CP_PRIV |
			       I915_ERROR_MEMORY_REFRESH);
	} else {
		error_mask = ~(I915_ERROR_PAGE_TABLE |
			       I915_ERROR_MEMORY_REFRESH);
	}
	I915_WRITE(EMR, error_mask);

4151
	/* Unmask the interrupts that we always want on. */
4152 4153 4154 4155 4156
	dev_priv->irq_mask =
		~(I915_ASLE_INTERRUPT |
		  I915_DISPLAY_PORT_INTERRUPT |
		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4157
		  I915_MASTER_ERROR_INTERRUPT);
4158

4159 4160 4161 4162 4163
	enable_mask =
		I915_ASLE_INTERRUPT |
		I915_DISPLAY_PORT_INTERRUPT |
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4164
		I915_MASTER_ERROR_INTERRUPT |
4165
		I915_USER_INTERRUPT;
4166

4167
	if (IS_G4X(dev_priv))
4168
		enable_mask |= I915_BSD_USER_INTERRUPT;
4169

4170
	GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
4171

4172 4173
	/* Interrupt setup is already guaranteed to be single-threaded, this is
	 * just to make the assert_spin_locked check happy. */
4174
	spin_lock_irq(&dev_priv->irq_lock);
4175 4176 4177
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4178
	spin_unlock_irq(&dev_priv->irq_lock);
4179

4180
	i915_enable_asle_pipestat(dev_priv);
4181 4182
}

4183
static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
4184 4185 4186
{
	u32 hotplug_en;

4187
	lockdep_assert_held(&dev_priv->irq_lock);
4188

4189 4190
	/* Note HDMI and DP share hotplug bits */
	/* enable bits are the same for all generations */
4191
	hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
4192 4193 4194 4195
	/* Programming the CRT detection parameters tends
	   to generate a spurious hotplug event about three
	   seconds later.  So just do it once.
	*/
4196
	if (IS_G4X(dev_priv))
4197 4198 4199 4200
		hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
	hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;

	/* Ignore TV since it's buggy */
4201
	i915_hotplug_interrupt_update_locked(dev_priv,
4202 4203 4204 4205
					     HOTPLUG_INT_EN_MASK |
					     CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
					     CRT_HOTPLUG_ACTIVATION_PERIOD_64,
					     hotplug_en);
4206 4207
}

4208
static irqreturn_t i965_irq_handler(int irq, void *arg)
4209
{
4210
	struct drm_i915_private *dev_priv = arg;
4211
	irqreturn_t ret = IRQ_NONE;
4212

4213 4214 4215
	if (!intel_irqs_enabled(dev_priv))
		return IRQ_NONE;

4216
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
4217
	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4218

4219
	do {
4220
		u32 pipe_stats[I915_MAX_PIPES] = {};
4221
		u32 eir = 0, eir_stuck = 0;
4222 4223
		u32 hotplug_status = 0;
		u32 iir;
4224

4225
		iir = I915_READ(GEN2_IIR);
4226
		if (iir == 0)
4227 4228 4229 4230
			break;

		ret = IRQ_HANDLED;

4231 4232 4233 4234 4235 4236
		if (iir & I915_DISPLAY_PORT_INTERRUPT)
			hotplug_status = i9xx_hpd_irq_ack(dev_priv);

		/* Call regardless, as some status bits might not be
		 * signalled in iir */
		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4237

4238 4239 4240
		if (iir & I915_MASTER_ERROR_INTERRUPT)
			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);

4241
		I915_WRITE(GEN2_IIR, iir);
4242 4243

		if (iir & I915_USER_INTERRUPT)
4244
			intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
4245

4246
		if (iir & I915_BSD_USER_INTERRUPT)
4247
			intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
4248

4249 4250
		if (iir & I915_MASTER_ERROR_INTERRUPT)
			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4251

4252 4253 4254 4255 4256
		if (hotplug_status)
			i9xx_hpd_irq_handler(dev_priv, hotplug_status);

		i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
	} while (0);
4257

4258
	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4259

4260 4261 4262
	return ret;
}

4263 4264 4265 4266 4267 4268 4269
/**
 * intel_irq_init - initializes irq support
 * @dev_priv: i915 device instance
 *
 * This function initializes all the irq support including work items, timers
 * and all the vtables. It does not setup the interrupt itself though.
 */
4270
void intel_irq_init(struct drm_i915_private *dev_priv)
4271
{
4272
	struct drm_device *dev = &dev_priv->drm;
4273
	struct intel_rps *rps = &dev_priv->gt_pm.rps;
4274
	int i;
4275

4276 4277
	intel_hpd_init_work(dev_priv);

4278
	INIT_WORK(&rps->work, gen6_pm_rps_work);
4279

4280
	INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4281 4282
	for (i = 0; i < MAX_L3_SLICES; ++i)
		dev_priv->l3_parity.remap_info[i] = NULL;
4283

4284
	/* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */
4285
	if (HAS_GT_UC(dev_priv) && INTEL_GEN(dev_priv) < 11)
4286
		dev_priv->gt.pm_guc_events = GUC_INTR_GUC2HOST << 16;
4287

4288
	/* Let's track the enabled rps events */
4289
	if (IS_VALLEYVIEW(dev_priv))
4290
		/* WaGsvRC0ResidencyMethod:vlv */
4291
		dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
4292
	else
4293 4294 4295
		dev_priv->pm_rps_events = (GEN6_PM_RP_UP_THRESHOLD |
					   GEN6_PM_RP_DOWN_THRESHOLD |
					   GEN6_PM_RP_DOWN_TIMEOUT);
4296

4297 4298 4299 4300
	/* We share the register with other engine */
	if (INTEL_GEN(dev_priv) > 9)
		GEM_WARN_ON(dev_priv->pm_rps_events & 0xffff0000);

4301
	rps->pm_intrmsk_mbz = 0;
4302 4303

	/*
4304
	 * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer
4305 4306 4307 4308
	 * if GEN6_PM_UP_EI_EXPIRED is masked.
	 *
	 * TODO: verify if this can be reproduced on VLV,CHV.
	 */
4309
	if (INTEL_GEN(dev_priv) <= 7)
4310
		rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
4311

4312
	if (INTEL_GEN(dev_priv) >= 8)
4313
		rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
4314

4315
	dev->vblank_disable_immediate = true;
4316

4317 4318 4319 4320 4321 4322 4323 4324 4325 4326
	/* Most platforms treat the display irq block as an always-on
	 * power domain. vlv/chv can disable it at runtime and need
	 * special care to avoid writing any of the display block registers
	 * outside of the power domain. We defer setting up the display irqs
	 * in this case to the runtime pm.
	 */
	dev_priv->display_irqs_enabled = true;
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
		dev_priv->display_irqs_enabled = false;

L
Lyude 已提交
4327
	dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4328 4329 4330 4331 4332 4333 4334
	/* If we have MST support, we want to avoid doing short HPD IRQ storm
	 * detection, as short HPD storms will occur as a natural part of
	 * sideband messaging with MST.
	 * On older platforms however, IRQ storms can occur with both long and
	 * short pulses, as seen on some G4x systems.
	 */
	dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
L
Lyude 已提交
4335

4336 4337 4338 4339
	if (HAS_GMCH(dev_priv)) {
		if (I915_HAS_HOTPLUG(dev_priv))
			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
	} else {
M
Matt Roper 已提交
4340 4341 4342
		if (HAS_PCH_JSP(dev_priv))
			dev_priv->display.hpd_irq_setup = jsp_hpd_irq_setup;
		else if (HAS_PCH_MCC(dev_priv))
4343 4344
			dev_priv->display.hpd_irq_setup = mcc_hpd_irq_setup;
		else if (INTEL_GEN(dev_priv) >= 11)
4345 4346
			dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
		else if (IS_GEN9_LP(dev_priv))
4347
			dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
4348
		else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
4349 4350
			dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
		else
4351
			dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4352 4353
	}
}
4354

4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368
/**
 * intel_irq_fini - deinitializes IRQ support
 * @i915: i915 device instance
 *
 * This function deinitializes all the IRQ support.
 */
void intel_irq_fini(struct drm_i915_private *i915)
{
	int i;

	for (i = 0; i < MAX_L3_SLICES; ++i)
		kfree(i915->l3_parity.remap_info[i]);
}

4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437
static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
{
	if (HAS_GMCH(dev_priv)) {
		if (IS_CHERRYVIEW(dev_priv))
			return cherryview_irq_handler;
		else if (IS_VALLEYVIEW(dev_priv))
			return valleyview_irq_handler;
		else if (IS_GEN(dev_priv, 4))
			return i965_irq_handler;
		else if (IS_GEN(dev_priv, 3))
			return i915_irq_handler;
		else
			return i8xx_irq_handler;
	} else {
		if (INTEL_GEN(dev_priv) >= 11)
			return gen11_irq_handler;
		else if (INTEL_GEN(dev_priv) >= 8)
			return gen8_irq_handler;
		else
			return ironlake_irq_handler;
	}
}

static void intel_irq_reset(struct drm_i915_private *dev_priv)
{
	if (HAS_GMCH(dev_priv)) {
		if (IS_CHERRYVIEW(dev_priv))
			cherryview_irq_reset(dev_priv);
		else if (IS_VALLEYVIEW(dev_priv))
			valleyview_irq_reset(dev_priv);
		else if (IS_GEN(dev_priv, 4))
			i965_irq_reset(dev_priv);
		else if (IS_GEN(dev_priv, 3))
			i915_irq_reset(dev_priv);
		else
			i8xx_irq_reset(dev_priv);
	} else {
		if (INTEL_GEN(dev_priv) >= 11)
			gen11_irq_reset(dev_priv);
		else if (INTEL_GEN(dev_priv) >= 8)
			gen8_irq_reset(dev_priv);
		else
			ironlake_irq_reset(dev_priv);
	}
}

static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
{
	if (HAS_GMCH(dev_priv)) {
		if (IS_CHERRYVIEW(dev_priv))
			cherryview_irq_postinstall(dev_priv);
		else if (IS_VALLEYVIEW(dev_priv))
			valleyview_irq_postinstall(dev_priv);
		else if (IS_GEN(dev_priv, 4))
			i965_irq_postinstall(dev_priv);
		else if (IS_GEN(dev_priv, 3))
			i915_irq_postinstall(dev_priv);
		else
			i8xx_irq_postinstall(dev_priv);
	} else {
		if (INTEL_GEN(dev_priv) >= 11)
			gen11_irq_postinstall(dev_priv);
		else if (INTEL_GEN(dev_priv) >= 8)
			gen8_irq_postinstall(dev_priv);
		else
			ironlake_irq_postinstall(dev_priv);
	}
}

4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448
/**
 * intel_irq_install - enables the hardware interrupt
 * @dev_priv: i915 device instance
 *
 * This function enables the hardware interrupt handling, but leaves the hotplug
 * handling still disabled. It is called after intel_irq_init().
 *
 * In the driver load and resume code we need working interrupts in a few places
 * but don't want to deal with the hassle of concurrent probe and hotplug
 * workers. Hence the split into this two-stage approach.
 */
4449 4450
int intel_irq_install(struct drm_i915_private *dev_priv)
{
4451 4452 4453
	int irq = dev_priv->drm.pdev->irq;
	int ret;

4454 4455 4456 4457 4458
	/*
	 * We enable some interrupt sources in our postinstall hooks, so mark
	 * interrupts as enabled _before_ actually enabling them to avoid
	 * special cases in our ordering checks.
	 */
4459
	dev_priv->runtime_pm.irqs_enabled = true;
4460

4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474
	dev_priv->drm.irq_enabled = true;

	intel_irq_reset(dev_priv);

	ret = request_irq(irq, intel_irq_handler(dev_priv),
			  IRQF_SHARED, DRIVER_NAME, dev_priv);
	if (ret < 0) {
		dev_priv->drm.irq_enabled = false;
		return ret;
	}

	intel_irq_postinstall(dev_priv);

	return ret;
4475 4476
}

4477 4478 4479 4480 4481 4482 4483
/**
 * intel_irq_uninstall - finilizes all irq handling
 * @dev_priv: i915 device instance
 *
 * This stops interrupt and hotplug handling and unregisters and frees all
 * resources acquired in the init functions.
 */
4484 4485
void intel_irq_uninstall(struct drm_i915_private *dev_priv)
{
4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502
	int irq = dev_priv->drm.pdev->irq;

	/*
	 * FIXME we can get called twice during driver load
	 * error handling due to intel_modeset_cleanup()
	 * calling us out of sequence. Would be nice if
	 * it didn't do that...
	 */
	if (!dev_priv->drm.irq_enabled)
		return;

	dev_priv->drm.irq_enabled = false;

	intel_irq_reset(dev_priv);

	free_irq(irq, dev_priv);

4503
	intel_hpd_cancel_work(dev_priv);
4504
	dev_priv->runtime_pm.irqs_enabled = false;
4505 4506
}

4507 4508 4509 4510 4511 4512 4513
/**
 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
 * @dev_priv: i915 device instance
 *
 * This function is used to disable interrupts at runtime, both in the runtime
 * pm and the system suspend/resume code.
 */
4514
void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4515
{
4516
	intel_irq_reset(dev_priv);
4517
	dev_priv->runtime_pm.irqs_enabled = false;
4518
	intel_synchronize_irq(dev_priv);
4519 4520
}

4521 4522 4523 4524 4525 4526 4527
/**
 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
 * @dev_priv: i915 device instance
 *
 * This function is used to enable interrupts at runtime, both in the runtime
 * pm and the system suspend/resume code.
 */
4528
void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4529
{
4530
	dev_priv->runtime_pm.irqs_enabled = true;
4531 4532
	intel_irq_reset(dev_priv);
	intel_irq_postinstall(dev_priv);
4533
}
4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547

bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
{
	/*
	 * We only use drm_irq_uninstall() at unload and VT switch, so
	 * this is the only thing we need to check.
	 */
	return dev_priv->runtime_pm.irqs_enabled;
}

void intel_synchronize_irq(struct drm_i915_private *i915)
{
	synchronize_irq(i915->drm.pdev->irq);
}