intel_uncore.c 30.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * Copyright © 2013 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 */

#include "i915_drv.h"
#include "intel_drv.h"

#define FORCEWAKE_ACK_TIMEOUT_MS 2

29 30 31 32 33 34 35 36 37 38 39 40 41 42
#define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__))
#define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__))

#define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
#define __raw_i915_write16(dev_priv__, reg__, val__) writew(val__, (dev_priv__)->regs + (reg__))

#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
#define __raw_i915_write32(dev_priv__, reg__, val__) writel(val__, (dev_priv__)->regs + (reg__))

#define __raw_i915_read64(dev_priv__, reg__) readq((dev_priv__)->regs + (reg__))
#define __raw_i915_write64(dev_priv__, reg__, val__) writeq(val__, (dev_priv__)->regs + (reg__))

#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__)

43 44 45 46 47 48
static void
assert_device_not_suspended(struct drm_i915_private *dev_priv)
{
	WARN(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
	     "Device suspended\n");
}
49

50 51 52 53 54 55 56 57 58 59 60 61
static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
{
	u32 gt_thread_status_mask;

	if (IS_HASWELL(dev_priv->dev))
		gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW;
	else
		gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK;

	/* w/a for a sporadic read returning 0 by waiting for the GT
	 * thread to wake up.
	 */
62
	if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500))
63 64 65 66 67
		DRM_ERROR("GT thread status wait timed out\n");
}

static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
{
68 69 70
	__raw_i915_write32(dev_priv, FORCEWAKE, 0);
	/* something from same cacheline, but !FORCEWAKE */
	__raw_posting_read(dev_priv, ECOBUS);
71 72
}

73 74
static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv,
							int fw_engine)
75
{
76
	if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1) == 0,
77 78 79
			    FORCEWAKE_ACK_TIMEOUT_MS))
		DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");

80 81 82
	__raw_i915_write32(dev_priv, FORCEWAKE, 1);
	/* something from same cacheline, but !FORCEWAKE */
	__raw_posting_read(dev_priv, ECOBUS);
83

84
	if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1),
85 86 87 88 89 90 91
			    FORCEWAKE_ACK_TIMEOUT_MS))
		DRM_ERROR("Timed out waiting for forcewake to ack request.\n");

	/* WaRsForcewakeWaitTC0:snb */
	__gen6_gt_wait_for_thread_c0(dev_priv);
}

92
static void __gen7_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
93
{
94
	__raw_i915_write32(dev_priv, FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
95
	/* something from same cacheline, but !FORCEWAKE_MT */
96
	__raw_posting_read(dev_priv, ECOBUS);
97 98
}

99
static void __gen7_gt_force_wake_mt_get(struct drm_i915_private *dev_priv,
100
							int fw_engine)
101 102 103
{
	u32 forcewake_ack;

104
	if (IS_HASWELL(dev_priv->dev) || IS_GEN8(dev_priv->dev))
105 106 107 108
		forcewake_ack = FORCEWAKE_ACK_HSW;
	else
		forcewake_ack = FORCEWAKE_MT_ACK;

109
	if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL) == 0,
110 111 112
			    FORCEWAKE_ACK_TIMEOUT_MS))
		DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");

113 114
	__raw_i915_write32(dev_priv, FORCEWAKE_MT,
			   _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
115
	/* something from same cacheline, but !FORCEWAKE_MT */
116
	__raw_posting_read(dev_priv, ECOBUS);
117

118
	if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL),
119 120 121 122
			    FORCEWAKE_ACK_TIMEOUT_MS))
		DRM_ERROR("Timed out waiting for forcewake to ack request.\n");

	/* WaRsForcewakeWaitTC0:ivb,hsw */
123 124
	if (INTEL_INFO(dev_priv->dev)->gen < 8)
		__gen6_gt_wait_for_thread_c0(dev_priv);
125 126 127 128 129
}

static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
{
	u32 gtfifodbg;
130 131

	gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
132 133
	if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg))
		__raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
134 135
}

136 137
static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv,
							int fw_engine)
138
{
139
	__raw_i915_write32(dev_priv, FORCEWAKE, 0);
140
	/* something from same cacheline, but !FORCEWAKE */
141
	__raw_posting_read(dev_priv, ECOBUS);
142 143 144
	gen6_gt_check_fifodbg(dev_priv);
}

145
static void __gen7_gt_force_wake_mt_put(struct drm_i915_private *dev_priv,
146
							int fw_engine)
147
{
148 149
	__raw_i915_write32(dev_priv, FORCEWAKE_MT,
			   _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
150
	/* something from same cacheline, but !FORCEWAKE_MT */
151
	__raw_posting_read(dev_priv, ECOBUS);
152 153 154

	if (IS_GEN7(dev_priv->dev))
		gen6_gt_check_fifodbg(dev_priv);
155 156 157 158 159 160
}

static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
{
	int ret = 0;

161 162 163 164 165 166 167
	/* On VLV, FIFO will be shared by both SW and HW.
	 * So, we need to read the FREE_ENTRIES everytime */
	if (IS_VALLEYVIEW(dev_priv->dev))
		dev_priv->uncore.fifo_count =
			__raw_i915_read32(dev_priv, GTFIFOCTL) &
						GT_FIFO_FREE_ENTRIES_MASK;

168 169
	if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
		int loop = 500;
170
		u32 fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
171 172
		while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
			udelay(10);
173
			fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
174 175 176 177 178 179 180 181 182 183 184 185
		}
		if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
			++ret;
		dev_priv->uncore.fifo_count = fifo;
	}
	dev_priv->uncore.fifo_count--;

	return ret;
}

static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
{
186 187
	__raw_i915_write32(dev_priv, FORCEWAKE_VLV,
			   _MASKED_BIT_DISABLE(0xffff));
188 189
	__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
			   _MASKED_BIT_DISABLE(0xffff));
190
	/* something from same cacheline, but !FORCEWAKE_VLV */
191
	__raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
192 193
}

194 195
static void __vlv_force_wake_get(struct drm_i915_private *dev_priv,
						int fw_engine)
196
{
197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213
	/* Check for Render Engine */
	if (FORCEWAKE_RENDER & fw_engine) {
		if (wait_for_atomic((__raw_i915_read32(dev_priv,
						FORCEWAKE_ACK_VLV) &
						FORCEWAKE_KERNEL) == 0,
					FORCEWAKE_ACK_TIMEOUT_MS))
			DRM_ERROR("Timed out: Render forcewake old ack to clear.\n");

		__raw_i915_write32(dev_priv, FORCEWAKE_VLV,
				   _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));

		if (wait_for_atomic((__raw_i915_read32(dev_priv,
						FORCEWAKE_ACK_VLV) &
						FORCEWAKE_KERNEL),
					FORCEWAKE_ACK_TIMEOUT_MS))
			DRM_ERROR("Timed out: waiting for Render to ack.\n");
	}
214

215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231
	/* Check for Media Engine */
	if (FORCEWAKE_MEDIA & fw_engine) {
		if (wait_for_atomic((__raw_i915_read32(dev_priv,
						FORCEWAKE_ACK_MEDIA_VLV) &
						FORCEWAKE_KERNEL) == 0,
					FORCEWAKE_ACK_TIMEOUT_MS))
			DRM_ERROR("Timed out: Media forcewake old ack to clear.\n");

		__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
				   _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));

		if (wait_for_atomic((__raw_i915_read32(dev_priv,
						FORCEWAKE_ACK_MEDIA_VLV) &
						FORCEWAKE_KERNEL),
					FORCEWAKE_ACK_TIMEOUT_MS))
			DRM_ERROR("Timed out: waiting for media to ack.\n");
	}
232 233 234

	/* WaRsForcewakeWaitTC0:vlv */
	__gen6_gt_wait_for_thread_c0(dev_priv);
235

236 237
}

238 239
static void __vlv_force_wake_put(struct drm_i915_private *dev_priv,
					int fw_engine)
240
{
241 242 243 244 245 246 247 248 249 250 251 252

	/* Check for Render Engine */
	if (FORCEWAKE_RENDER & fw_engine)
		__raw_i915_write32(dev_priv, FORCEWAKE_VLV,
					_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));


	/* Check for Media Engine */
	if (FORCEWAKE_MEDIA & fw_engine)
		__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
				_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));

253 254
	/* The below doubles as a POSTING_READ */
	gen6_gt_check_fifodbg(dev_priv);
255 256 257

}

258
static void vlv_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
259 260 261 262
{
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
263 264 265 266 267 268 269 270 271 272

	if (fw_engine & FORCEWAKE_RENDER &&
	    dev_priv->uncore.fw_rendercount++ != 0)
		fw_engine &= ~FORCEWAKE_RENDER;
	if (fw_engine & FORCEWAKE_MEDIA &&
	    dev_priv->uncore.fw_mediacount++ != 0)
		fw_engine &= ~FORCEWAKE_MEDIA;

	if (fw_engine)
		dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_engine);
273 274 275 276

	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}

277
static void vlv_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
278 279 280 281 282
{
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);

283 284 285 286 287 288 289 290 291 292 293
	if (fw_engine & FORCEWAKE_RENDER) {
		WARN_ON(!dev_priv->uncore.fw_rendercount);
		if (--dev_priv->uncore.fw_rendercount != 0)
			fw_engine &= ~FORCEWAKE_RENDER;
	}

	if (fw_engine & FORCEWAKE_MEDIA) {
		WARN_ON(!dev_priv->uncore.fw_mediacount);
		if (--dev_priv->uncore.fw_mediacount != 0)
			fw_engine &= ~FORCEWAKE_MEDIA;
	}
294

295 296
	if (fw_engine)
		dev_priv->uncore.funcs.force_wake_put(dev_priv, fw_engine);
297 298

	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
299 300
}

301
static void gen6_force_wake_timer(unsigned long arg)
302
{
303
	struct drm_i915_private *dev_priv = (void *)arg;
304 305
	unsigned long irqflags;

306 307
	assert_device_not_suspended(dev_priv);

308
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
309 310
	WARN_ON(!dev_priv->uncore.forcewake_count);

311
	if (--dev_priv->uncore.forcewake_count == 0)
312
		dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
313
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
314 315

	intel_runtime_pm_put(dev_priv);
316 317
}

318
static void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
319 320
{
	struct drm_i915_private *dev_priv = dev->dev_private;
321 322 323 324 325 326 327 328
	unsigned long irqflags;

	del_timer_sync(&dev_priv->uncore.force_wake_timer);

	/* Hold uncore.lock across reset to prevent any register access
	 * with forcewake not set correctly
	 */
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
329

330
	if (IS_VALLEYVIEW(dev))
331
		vlv_force_wake_reset(dev_priv);
332
	else if (IS_GEN6(dev) || IS_GEN7(dev))
333
		__gen6_gt_force_wake_reset(dev_priv);
334 335

	if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_GEN8(dev))
336
		__gen7_gt_force_wake_mt_reset(dev_priv);
337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365

	if (restore) { /* If reset with a user forcewake, try to restore */
		unsigned fw = 0;

		if (IS_VALLEYVIEW(dev)) {
			if (dev_priv->uncore.fw_rendercount)
				fw |= FORCEWAKE_RENDER;

			if (dev_priv->uncore.fw_mediacount)
				fw |= FORCEWAKE_MEDIA;
		} else {
			if (dev_priv->uncore.forcewake_count)
				fw = FORCEWAKE_ALL;
		}

		if (fw)
			dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);

		if (IS_GEN6(dev) || IS_GEN7(dev))
			dev_priv->uncore.fifo_count =
				__raw_i915_read32(dev_priv, GTFIFOCTL) &
				GT_FIFO_FREE_ENTRIES_MASK;
	} else {
		dev_priv->uncore.forcewake_count = 0;
		dev_priv->uncore.fw_rendercount = 0;
		dev_priv->uncore.fw_mediacount = 0;
	}

	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
366 367
}

368 369 370 371 372
void intel_uncore_early_sanitize(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;

	if (HAS_FPGA_DBG_UNCLAIMED(dev))
373
		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
374

B
Ben Widawsky 已提交
375
	if ((IS_HASWELL(dev) || IS_BROADWELL(dev)) &&
376 377 378 379 380 381 382 383 384
	    (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) == 1)) {
		/* The docs do not explain exactly how the calculation can be
		 * made. It is somewhat guessable, but for now, it's always
		 * 128MB.
		 * NB: We can't write IDICR yet because we do not have gt funcs
		 * set up */
		dev_priv->ellc_size = 128;
		DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
	}
385

386 387 388 389 390
	/* clear out old GT FIFO errors */
	if (IS_GEN6(dev) || IS_GEN7(dev))
		__raw_i915_write32(dev_priv, GTFIFODBG,
				   __raw_i915_read32(dev_priv, GTFIFODBG));

391
	intel_uncore_forcewake_reset(dev, false);
392 393 394 395
}

void intel_uncore_sanitize(struct drm_device *dev)
{
396 397 398 399 400 401 402 403 404 405
	/* BIOS often leaves RC6 enabled, but disable it for hw init */
	intel_disable_gt_powersave(dev);
}

/*
 * Generally this is called implicitly by the register read function. However,
 * if some sequence requires the GT to not power down then this function should
 * be called at the beginning of the sequence followed by a call to
 * gen6_gt_force_wake_put() at the end of the sequence.
 */
406
void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
407 408 409
{
	unsigned long irqflags;

410 411 412
	if (!dev_priv->uncore.funcs.force_wake_get)
		return;

413 414
	intel_runtime_pm_get(dev_priv);

415 416 417 418
	/* Redirect to VLV specific routine */
	if (IS_VALLEYVIEW(dev_priv->dev))
		return vlv_force_wake_get(dev_priv, fw_engine);

419 420
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
	if (dev_priv->uncore.forcewake_count++ == 0)
421
		dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
422 423 424 425 426 427
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}

/*
 * see gen6_gt_force_wake_get()
 */
428
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
429 430
{
	unsigned long irqflags;
431
	bool delayed = false;
432

433 434 435
	if (!dev_priv->uncore.funcs.force_wake_put)
		return;

436
	/* Redirect to VLV specific routine */
437 438 439 440
	if (IS_VALLEYVIEW(dev_priv->dev)) {
		vlv_force_wake_put(dev_priv, fw_engine);
		goto out;
	}
441 442


443
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
444 445
	WARN_ON(!dev_priv->uncore.forcewake_count);

446 447
	if (--dev_priv->uncore.forcewake_count == 0) {
		dev_priv->uncore.forcewake_count++;
448
		delayed = true;
449 450
		mod_timer_pinned(&dev_priv->uncore.force_wake_timer,
				 jiffies + 1);
451
	}
452
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
453

454 455 456
out:
	if (!delayed)
		intel_runtime_pm_put(dev_priv);
457 458
}

459 460 461 462 463 464 465 466
void assert_force_wake_inactive(struct drm_i915_private *dev_priv)
{
	if (!dev_priv->uncore.funcs.force_wake_get)
		return;

	WARN_ON(dev_priv->uncore.forcewake_count > 0);
}

467 468
/* We give fast paths for the really cool registers */
#define NEEDS_FORCE_WAKE(dev_priv, reg) \
469
	 ((reg) < 0x40000 && (reg) != FORCEWAKE)
470

471 472 473 474 475 476 477 478 479 480 481
#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
	(((reg) >= 0x2000 && (reg) < 0x4000) ||\
	((reg) >= 0x5000 && (reg) < 0x8000) ||\
	((reg) >= 0xB000 && (reg) < 0x12000) ||\
	((reg) >= 0x2E000 && (reg) < 0x30000))

#define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)\
	(((reg) >= 0x12000 && (reg) < 0x14000) ||\
	((reg) >= 0x22000 && (reg) < 0x24000) ||\
	((reg) >= 0x30000 && (reg) < 0x40000))

482 483 484 485 486 487
static void
ilk_dummy_write(struct drm_i915_private *dev_priv)
{
	/* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
	 * the chip from rc6 before touching it for real. MI_MODE is masked,
	 * hence harmless to write 0 into. */
488
	__raw_i915_write32(dev_priv, MI_MODE, 0);
489 490 491 492 493
}

static void
hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
{
494
	if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
495 496
		DRM_ERROR("Unknown unclaimed register before writing to %x\n",
			  reg);
497
		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
498 499 500 501 502 503
	}
}

static void
hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
{
504
	if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
505
		DRM_ERROR("Unclaimed write to %x\n", reg);
506
		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
507 508 509
	}
}

B
Ben Widawsky 已提交
510 511 512
#define REG_READ_HEADER(x) \
	unsigned long irqflags; \
	u##x val = 0; \
513
	assert_device_not_suspended(dev_priv); \
B
Ben Widawsky 已提交
514 515 516 517 518 519 520
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)

#define REG_READ_FOOTER \
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
	return val

521
#define __gen4_read(x) \
522
static u##x \
523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540
gen4_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
	REG_READ_HEADER(x); \
	val = __raw_i915_read##x(dev_priv, reg); \
	REG_READ_FOOTER; \
}

#define __gen5_read(x) \
static u##x \
gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
	REG_READ_HEADER(x); \
	ilk_dummy_write(dev_priv); \
	val = __raw_i915_read##x(dev_priv, reg); \
	REG_READ_FOOTER; \
}

#define __gen6_read(x) \
static u##x \
gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
B
Ben Widawsky 已提交
541
	REG_READ_HEADER(x); \
542 543 544 545
	if (dev_priv->uncore.forcewake_count == 0 && \
	    NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
		dev_priv->uncore.funcs.force_wake_get(dev_priv, \
						      FORCEWAKE_ALL); \
546 547 548 549 550
		val = __raw_i915_read##x(dev_priv, reg); \
		dev_priv->uncore.funcs.force_wake_put(dev_priv, \
						      FORCEWAKE_ALL); \
	} else { \
		val = __raw_i915_read##x(dev_priv, reg); \
551
	} \
B
Ben Widawsky 已提交
552
	REG_READ_FOOTER; \
553 554
}

555 556 557 558 559
#define __vlv_read(x) \
static u##x \
vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
	unsigned fwengine = 0; \
	REG_READ_HEADER(x); \
560 561 562 563 564 565
	if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) { \
		if (dev_priv->uncore.fw_rendercount == 0) \
			fwengine = FORCEWAKE_RENDER; \
	} else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) { \
		if (dev_priv->uncore.fw_mediacount == 0) \
			fwengine = FORCEWAKE_MEDIA; \
566
	}  \
567 568 569 570 571
	if (fwengine) \
		dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
	val = __raw_i915_read##x(dev_priv, reg); \
	if (fwengine) \
		dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
572 573 574 575 576 577 578 579
	REG_READ_FOOTER; \
}


__vlv_read(8)
__vlv_read(16)
__vlv_read(32)
__vlv_read(64)
580 581 582 583 584 585 586 587 588 589 590 591 592
__gen6_read(8)
__gen6_read(16)
__gen6_read(32)
__gen6_read(64)
__gen5_read(8)
__gen5_read(16)
__gen5_read(32)
__gen5_read(64)
__gen4_read(8)
__gen4_read(16)
__gen4_read(32)
__gen4_read(64)

593
#undef __vlv_read
594 595 596
#undef __gen6_read
#undef __gen5_read
#undef __gen4_read
B
Ben Widawsky 已提交
597 598 599 600 601 602
#undef REG_READ_FOOTER
#undef REG_READ_HEADER

#define REG_WRITE_HEADER \
	unsigned long irqflags; \
	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
603
	assert_device_not_suspended(dev_priv); \
B
Ben Widawsky 已提交
604
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
605

V
Ville Syrjälä 已提交
606 607 608
#define REG_WRITE_FOOTER \
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)

609
#define __gen4_write(x) \
610
static void \
611 612 613
gen4_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
	REG_WRITE_HEADER; \
	__raw_i915_write##x(dev_priv, reg, val); \
V
Ville Syrjälä 已提交
614
	REG_WRITE_FOOTER; \
615 616 617 618 619 620 621 622
}

#define __gen5_write(x) \
static void \
gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
	REG_WRITE_HEADER; \
	ilk_dummy_write(dev_priv); \
	__raw_i915_write##x(dev_priv, reg, val); \
V
Ville Syrjälä 已提交
623
	REG_WRITE_FOOTER; \
624 625 626 627 628 629 630 631 632 633 634 635 636 637
}

#define __gen6_write(x) \
static void \
gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
	u32 __fifo_ret = 0; \
	REG_WRITE_HEADER; \
	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
	} \
	__raw_i915_write##x(dev_priv, reg, val); \
	if (unlikely(__fifo_ret)) { \
		gen6_gt_check_fifodbg(dev_priv); \
	} \
V
Ville Syrjälä 已提交
638
	REG_WRITE_FOOTER; \
639 640 641 642 643
}

#define __hsw_write(x) \
static void \
hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
644
	u32 __fifo_ret = 0; \
B
Ben Widawsky 已提交
645
	REG_WRITE_HEADER; \
646 647 648 649
	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
	} \
	hsw_unclaimed_reg_clear(dev_priv, reg); \
650
	__raw_i915_write##x(dev_priv, reg, val); \
651 652 653 654
	if (unlikely(__fifo_ret)) { \
		gen6_gt_check_fifodbg(dev_priv); \
	} \
	hsw_unclaimed_reg_check(dev_priv, reg); \
V
Ville Syrjälä 已提交
655
	REG_WRITE_FOOTER; \
656
}
657

658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682
static const u32 gen8_shadowed_regs[] = {
	FORCEWAKE_MT,
	GEN6_RPNSWREQ,
	GEN6_RC_VIDEO_FREQ,
	RING_TAIL(RENDER_RING_BASE),
	RING_TAIL(GEN6_BSD_RING_BASE),
	RING_TAIL(VEBOX_RING_BASE),
	RING_TAIL(BLT_RING_BASE),
	/* TODO: Other registers are not yet used */
};

static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg)
{
	int i;
	for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
		if (reg == gen8_shadowed_regs[i])
			return true;

	return false;
}

#define __gen8_write(x) \
static void \
gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
	REG_WRITE_HEADER; \
683 684 685 686 687 688 689 690 691 692
	if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) { \
		if (dev_priv->uncore.forcewake_count == 0) \
			dev_priv->uncore.funcs.force_wake_get(dev_priv,	\
							      FORCEWAKE_ALL); \
		__raw_i915_write##x(dev_priv, reg, val); \
		if (dev_priv->uncore.forcewake_count == 0) \
			dev_priv->uncore.funcs.force_wake_put(dev_priv, \
							      FORCEWAKE_ALL); \
	} else { \
		__raw_i915_write##x(dev_priv, reg, val); \
693
	} \
V
Ville Syrjälä 已提交
694
	REG_WRITE_FOOTER; \
695 696 697 698 699 700
}

__gen8_write(8)
__gen8_write(16)
__gen8_write(32)
__gen8_write(64)
701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717
__hsw_write(8)
__hsw_write(16)
__hsw_write(32)
__hsw_write(64)
__gen6_write(8)
__gen6_write(16)
__gen6_write(32)
__gen6_write(64)
__gen5_write(8)
__gen5_write(16)
__gen5_write(32)
__gen5_write(64)
__gen4_write(8)
__gen4_write(16)
__gen4_write(32)
__gen4_write(64)

718
#undef __gen8_write
719 720 721 722
#undef __hsw_write
#undef __gen6_write
#undef __gen5_write
#undef __gen4_write
V
Ville Syrjälä 已提交
723
#undef REG_WRITE_FOOTER
B
Ben Widawsky 已提交
724
#undef REG_WRITE_HEADER
725

726 727 728 729
void intel_uncore_init(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;

730 731
	setup_timer(&dev_priv->uncore.force_wake_timer,
		    gen6_force_wake_timer, (unsigned long)dev_priv);
732

733 734
	intel_uncore_early_sanitize(dev);

735
	if (IS_VALLEYVIEW(dev)) {
736 737
		dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get;
		dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put;
738
	} else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
739 740
		dev_priv->uncore.funcs.force_wake_get = __gen7_gt_force_wake_mt_get;
		dev_priv->uncore.funcs.force_wake_put = __gen7_gt_force_wake_mt_put;
741 742 743 744 745 746 747 748 749 750 751 752 753
	} else if (IS_IVYBRIDGE(dev)) {
		u32 ecobus;

		/* IVB configs may use multi-threaded forcewake */

		/* A small trick here - if the bios hasn't configured
		 * MT forcewake, and if the device is in RC6, then
		 * force_wake_mt_get will not wake the device and the
		 * ECOBUS read will return zero. Which will be
		 * (correctly) interpreted by the test below as MT
		 * forcewake being disabled.
		 */
		mutex_lock(&dev->struct_mutex);
754
		__gen7_gt_force_wake_mt_get(dev_priv, FORCEWAKE_ALL);
755
		ecobus = __raw_i915_read32(dev_priv, ECOBUS);
756
		__gen7_gt_force_wake_mt_put(dev_priv, FORCEWAKE_ALL);
757 758 759 760
		mutex_unlock(&dev->struct_mutex);

		if (ecobus & FORCEWAKE_MT_ENABLE) {
			dev_priv->uncore.funcs.force_wake_get =
761
				__gen7_gt_force_wake_mt_get;
762
			dev_priv->uncore.funcs.force_wake_put =
763
				__gen7_gt_force_wake_mt_put;
764 765 766 767 768 769 770 771 772 773 774 775 776 777 778
		} else {
			DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
			DRM_INFO("when using vblank-synced partial screen updates.\n");
			dev_priv->uncore.funcs.force_wake_get =
				__gen6_gt_force_wake_get;
			dev_priv->uncore.funcs.force_wake_put =
				__gen6_gt_force_wake_put;
		}
	} else if (IS_GEN6(dev)) {
		dev_priv->uncore.funcs.force_wake_get =
			__gen6_gt_force_wake_get;
		dev_priv->uncore.funcs.force_wake_put =
			__gen6_gt_force_wake_put;
	}

779
	switch (INTEL_INFO(dev)->gen) {
780 781 782 783 784 785 786 787 788 789
	default:
		dev_priv->uncore.funcs.mmio_writeb  = gen8_write8;
		dev_priv->uncore.funcs.mmio_writew  = gen8_write16;
		dev_priv->uncore.funcs.mmio_writel  = gen8_write32;
		dev_priv->uncore.funcs.mmio_writeq  = gen8_write64;
		dev_priv->uncore.funcs.mmio_readb  = gen6_read8;
		dev_priv->uncore.funcs.mmio_readw  = gen6_read16;
		dev_priv->uncore.funcs.mmio_readl  = gen6_read32;
		dev_priv->uncore.funcs.mmio_readq  = gen6_read64;
		break;
790 791
	case 7:
	case 6:
792 793 794 795 796 797 798 799 800 801 802
		if (IS_HASWELL(dev)) {
			dev_priv->uncore.funcs.mmio_writeb  = hsw_write8;
			dev_priv->uncore.funcs.mmio_writew  = hsw_write16;
			dev_priv->uncore.funcs.mmio_writel  = hsw_write32;
			dev_priv->uncore.funcs.mmio_writeq  = hsw_write64;
		} else {
			dev_priv->uncore.funcs.mmio_writeb  = gen6_write8;
			dev_priv->uncore.funcs.mmio_writew  = gen6_write16;
			dev_priv->uncore.funcs.mmio_writel  = gen6_write32;
			dev_priv->uncore.funcs.mmio_writeq  = gen6_write64;
		}
803 804 805 806 807 808 809 810 811 812 813 814

		if (IS_VALLEYVIEW(dev)) {
			dev_priv->uncore.funcs.mmio_readb  = vlv_read8;
			dev_priv->uncore.funcs.mmio_readw  = vlv_read16;
			dev_priv->uncore.funcs.mmio_readl  = vlv_read32;
			dev_priv->uncore.funcs.mmio_readq  = vlv_read64;
		} else {
			dev_priv->uncore.funcs.mmio_readb  = gen6_read8;
			dev_priv->uncore.funcs.mmio_readw  = gen6_read16;
			dev_priv->uncore.funcs.mmio_readl  = gen6_read32;
			dev_priv->uncore.funcs.mmio_readq  = gen6_read64;
		}
815 816
		break;
	case 5:
817 818 819 820
		dev_priv->uncore.funcs.mmio_writeb  = gen5_write8;
		dev_priv->uncore.funcs.mmio_writew  = gen5_write16;
		dev_priv->uncore.funcs.mmio_writel  = gen5_write32;
		dev_priv->uncore.funcs.mmio_writeq  = gen5_write64;
821 822 823 824 825 826 827 828
		dev_priv->uncore.funcs.mmio_readb  = gen5_read8;
		dev_priv->uncore.funcs.mmio_readw  = gen5_read16;
		dev_priv->uncore.funcs.mmio_readl  = gen5_read32;
		dev_priv->uncore.funcs.mmio_readq  = gen5_read64;
		break;
	case 4:
	case 3:
	case 2:
829 830 831 832
		dev_priv->uncore.funcs.mmio_writeb  = gen4_write8;
		dev_priv->uncore.funcs.mmio_writew  = gen4_write16;
		dev_priv->uncore.funcs.mmio_writel  = gen4_write32;
		dev_priv->uncore.funcs.mmio_writeq  = gen4_write64;
833 834 835 836 837 838
		dev_priv->uncore.funcs.mmio_readb  = gen4_read8;
		dev_priv->uncore.funcs.mmio_readw  = gen4_read16;
		dev_priv->uncore.funcs.mmio_readl  = gen4_read32;
		dev_priv->uncore.funcs.mmio_readq  = gen4_read64;
		break;
	}
839 840 841 842 843 844
}

void intel_uncore_fini(struct drm_device *dev)
{
	/* Paranoia: make sure we have disabled everything before we exit. */
	intel_uncore_sanitize(dev);
845
	intel_uncore_forcewake_reset(dev, false);
846 847
}

848 849
#define GEN_RANGE(l, h) GENMASK(h, l)

850 851 852
static const struct register_whitelist {
	uint64_t offset;
	uint32_t size;
853 854
	/* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
	uint32_t gen_bitmask;
855
} whitelist[] = {
856
	{ RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 8) },
857 858 859 860 861 862 863 864
};

int i915_reg_read_ioctl(struct drm_device *dev,
			void *data, struct drm_file *file)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct drm_i915_reg_read *reg = data;
	struct register_whitelist const *entry = whitelist;
865
	int i, ret = 0;
866 867 868 869 870 871 872 873 874 875

	for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
		if (entry->offset == reg->offset &&
		    (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
			break;
	}

	if (i == ARRAY_SIZE(whitelist))
		return -EINVAL;

876 877
	intel_runtime_pm_get(dev_priv);

878 879 880 881 882 883 884 885 886 887 888 889 890 891 892
	switch (entry->size) {
	case 8:
		reg->val = I915_READ64(reg->offset);
		break;
	case 4:
		reg->val = I915_READ(reg->offset);
		break;
	case 2:
		reg->val = I915_READ16(reg->offset);
		break;
	case 1:
		reg->val = I915_READ8(reg->offset);
		break;
	default:
		WARN_ON(1);
893 894
		ret = -EINVAL;
		goto out;
895 896
	}

897 898 899
out:
	intel_runtime_pm_put(dev_priv);
	return ret;
900 901
}

902 903 904 905 906 907
int i915_get_reset_stats_ioctl(struct drm_device *dev,
			       void *data, struct drm_file *file)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct drm_i915_reset_stats *args = data;
	struct i915_ctx_hang_stats *hs;
908
	struct intel_context *ctx;
909 910
	int ret;

911 912 913
	if (args->flags || args->pad)
		return -EINVAL;

914 915 916 917 918 919 920
	if (args->ctx_id == DEFAULT_CONTEXT_ID && !capable(CAP_SYS_ADMIN))
		return -EPERM;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

921 922
	ctx = i915_gem_context_get(file->driver_priv, args->ctx_id);
	if (IS_ERR(ctx)) {
923
		mutex_unlock(&dev->struct_mutex);
924
		return PTR_ERR(ctx);
925
	}
926
	hs = &ctx->hang_stats;
927 928 929 930 931 932 933 934 935 936 937 938 939 940

	if (capable(CAP_SYS_ADMIN))
		args->reset_count = i915_reset_count(&dev_priv->gpu_error);
	else
		args->reset_count = 0;

	args->batch_active = hs->batch_active;
	args->batch_pending = hs->batch_pending;

	mutex_unlock(&dev->struct_mutex);

	return 0;
}

941 942 943 944 945 946 947 948 949 950 951
static int i965_reset_complete(struct drm_device *dev)
{
	u8 gdrst;
	pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
	return (gdrst & GRDOM_RESET_ENABLE) == 0;
}

static int i965_do_reset(struct drm_device *dev)
{
	int ret;

952 953 954
	/* FIXME: i965g/gm need a display save/restore for gpu reset. */
	return -ENODEV;

955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977
	/*
	 * Set the domains we want to reset (GRDOM/bits 2 and 3) as
	 * well as the reset bit (GR/bit 0).  Setting the GR bit
	 * triggers the reset; when done, the hardware will clear it.
	 */
	pci_write_config_byte(dev->pdev, I965_GDRST,
			      GRDOM_RENDER | GRDOM_RESET_ENABLE);
	ret =  wait_for(i965_reset_complete(dev), 500);
	if (ret)
		return ret;

	pci_write_config_byte(dev->pdev, I965_GDRST,
			      GRDOM_MEDIA | GRDOM_RESET_ENABLE);

	ret =  wait_for(i965_reset_complete(dev), 500);
	if (ret)
		return ret;

	pci_write_config_byte(dev->pdev, I965_GDRST, 0);

	return 0;
}

978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007
static int g4x_do_reset(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret;

	pci_write_config_byte(dev->pdev, I965_GDRST,
			      GRDOM_RENDER | GRDOM_RESET_ENABLE);
	ret =  wait_for(i965_reset_complete(dev), 500);
	if (ret)
		return ret;

	/* WaVcpClkGateDisableForMediaReset:ctg,elk */
	I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
	POSTING_READ(VDECCLK_GATE_D);

	pci_write_config_byte(dev->pdev, I965_GDRST,
			      GRDOM_MEDIA | GRDOM_RESET_ENABLE);
	ret =  wait_for(i965_reset_complete(dev), 500);
	if (ret)
		return ret;

	/* WaVcpClkGateDisableForMediaReset:ctg,elk */
	I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
	POSTING_READ(VDECCLK_GATE_D);

	pci_write_config_byte(dev->pdev, I965_GDRST, 0);

	return 0;
}

1008 1009 1010 1011 1012 1013
static int ironlake_do_reset(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret;

	I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
1014
		   ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
V
Ville Syrjälä 已提交
1015
	ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) &
1016
			ILK_GRDOM_RESET_ENABLE) == 0, 500);
1017 1018 1019 1020
	if (ret)
		return ret;

	I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
1021
		   ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
1022 1023 1024 1025 1026 1027 1028 1029
	ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) &
			ILK_GRDOM_RESET_ENABLE) == 0, 500);
	if (ret)
		return ret;

	I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 0);

	return 0;
1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042
}

static int gen6_do_reset(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int	ret;

	/* Reset the chip */

	/* GEN6_GDRST is not in the gt power well, no need to check
	 * for fifo space for the write or forcewake the chip for
	 * the read
	 */
1043
	__raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL);
1044 1045

	/* Spin waiting for the device to ack the reset request */
1046
	ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
1047

1048
	intel_uncore_forcewake_reset(dev, true);
1049

1050 1051 1052 1053 1054 1055
	return ret;
}

int intel_gpu_reset(struct drm_device *dev)
{
	switch (INTEL_INFO(dev)->gen) {
1056
	case 8:
1057 1058 1059
	case 7:
	case 6: return gen6_do_reset(dev);
	case 5: return ironlake_do_reset(dev);
1060 1061 1062 1063 1064
	case 4:
		if (IS_G4X(dev))
			return g4x_do_reset(dev);
		else
			return i965_do_reset(dev);
1065 1066 1067 1068 1069 1070 1071 1072 1073
	default: return -ENODEV;
	}
}

void intel_uncore_check_errors(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;

	if (HAS_FPGA_DBG_UNCLAIMED(dev) &&
1074
	    (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
1075
		DRM_ERROR("Unclaimed register before interrupt\n");
1076
		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
1077 1078
	}
}