intel_uncore.c 38.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * Copyright © 2013 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 */

#include "i915_drv.h"
#include "intel_drv.h"

#define FORCEWAKE_ACK_TIMEOUT_MS 2

29 30 31 32 33 34 35 36 37 38 39 40 41 42
#define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__))
#define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__))

#define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
#define __raw_i915_write16(dev_priv__, reg__, val__) writew(val__, (dev_priv__)->regs + (reg__))

#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
#define __raw_i915_write32(dev_priv__, reg__, val__) writel(val__, (dev_priv__)->regs + (reg__))

#define __raw_i915_read64(dev_priv__, reg__) readq((dev_priv__)->regs + (reg__))
#define __raw_i915_write64(dev_priv__, reg__, val__) writeq(val__, (dev_priv__)->regs + (reg__))

#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__)

43 44 45 46 47 48
static void
assert_device_not_suspended(struct drm_i915_private *dev_priv)
{
	WARN(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
	     "Device suspended\n");
}
49

50 51 52 53 54 55 56 57 58 59 60 61
static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
{
	u32 gt_thread_status_mask;

	if (IS_HASWELL(dev_priv->dev))
		gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW;
	else
		gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK;

	/* w/a for a sporadic read returning 0 by waiting for the GT
	 * thread to wake up.
	 */
62
	if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500))
63 64 65 66 67
		DRM_ERROR("GT thread status wait timed out\n");
}

static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
{
68 69 70
	__raw_i915_write32(dev_priv, FORCEWAKE, 0);
	/* something from same cacheline, but !FORCEWAKE */
	__raw_posting_read(dev_priv, ECOBUS);
71 72
}

73 74
static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv,
							int fw_engine)
75
{
76
	if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1) == 0,
77 78 79
			    FORCEWAKE_ACK_TIMEOUT_MS))
		DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");

80 81 82
	__raw_i915_write32(dev_priv, FORCEWAKE, 1);
	/* something from same cacheline, but !FORCEWAKE */
	__raw_posting_read(dev_priv, ECOBUS);
83

84
	if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1),
85 86 87 88 89 90 91
			    FORCEWAKE_ACK_TIMEOUT_MS))
		DRM_ERROR("Timed out waiting for forcewake to ack request.\n");

	/* WaRsForcewakeWaitTC0:snb */
	__gen6_gt_wait_for_thread_c0(dev_priv);
}

92
static void __gen7_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
93
{
94
	__raw_i915_write32(dev_priv, FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
95
	/* something from same cacheline, but !FORCEWAKE_MT */
96
	__raw_posting_read(dev_priv, ECOBUS);
97 98
}

99
static void __gen7_gt_force_wake_mt_get(struct drm_i915_private *dev_priv,
100
							int fw_engine)
101 102 103
{
	u32 forcewake_ack;

104
	if (IS_HASWELL(dev_priv->dev) || IS_BROADWELL(dev_priv->dev))
105 106 107 108
		forcewake_ack = FORCEWAKE_ACK_HSW;
	else
		forcewake_ack = FORCEWAKE_MT_ACK;

109
	if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL) == 0,
110 111 112
			    FORCEWAKE_ACK_TIMEOUT_MS))
		DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");

113 114
	__raw_i915_write32(dev_priv, FORCEWAKE_MT,
			   _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
115
	/* something from same cacheline, but !FORCEWAKE_MT */
116
	__raw_posting_read(dev_priv, ECOBUS);
117

118
	if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL),
119 120 121 122
			    FORCEWAKE_ACK_TIMEOUT_MS))
		DRM_ERROR("Timed out waiting for forcewake to ack request.\n");

	/* WaRsForcewakeWaitTC0:ivb,hsw */
123 124
	if (INTEL_INFO(dev_priv->dev)->gen < 8)
		__gen6_gt_wait_for_thread_c0(dev_priv);
125 126 127 128 129
}

static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
{
	u32 gtfifodbg;
130 131

	gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
132 133
	if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg))
		__raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
134 135
}

136 137
static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv,
							int fw_engine)
138
{
139
	__raw_i915_write32(dev_priv, FORCEWAKE, 0);
140
	/* something from same cacheline, but !FORCEWAKE */
141
	__raw_posting_read(dev_priv, ECOBUS);
142 143 144
	gen6_gt_check_fifodbg(dev_priv);
}

145
static void __gen7_gt_force_wake_mt_put(struct drm_i915_private *dev_priv,
146
							int fw_engine)
147
{
148 149
	__raw_i915_write32(dev_priv, FORCEWAKE_MT,
			   _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
150
	/* something from same cacheline, but !FORCEWAKE_MT */
151
	__raw_posting_read(dev_priv, ECOBUS);
152 153 154

	if (IS_GEN7(dev_priv->dev))
		gen6_gt_check_fifodbg(dev_priv);
155 156 157 158 159 160
}

static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
{
	int ret = 0;

161 162 163 164 165 166 167
	/* On VLV, FIFO will be shared by both SW and HW.
	 * So, we need to read the FREE_ENTRIES everytime */
	if (IS_VALLEYVIEW(dev_priv->dev))
		dev_priv->uncore.fifo_count =
			__raw_i915_read32(dev_priv, GTFIFOCTL) &
						GT_FIFO_FREE_ENTRIES_MASK;

168 169
	if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
		int loop = 500;
170
		u32 fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
171 172
		while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
			udelay(10);
173
			fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
174 175 176 177 178 179 180 181 182 183 184 185
		}
		if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
			++ret;
		dev_priv->uncore.fifo_count = fifo;
	}
	dev_priv->uncore.fifo_count--;

	return ret;
}

static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
{
186 187
	__raw_i915_write32(dev_priv, FORCEWAKE_VLV,
			   _MASKED_BIT_DISABLE(0xffff));
188 189
	__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
			   _MASKED_BIT_DISABLE(0xffff));
190
	/* something from same cacheline, but !FORCEWAKE_VLV */
191
	__raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
192 193
}

194 195
static void __vlv_force_wake_get(struct drm_i915_private *dev_priv,
						int fw_engine)
196
{
197 198 199 200 201 202 203
	/*
	 * WaRsDontPollForAckOnClearingFWBits:vlv
	 * Hardware clears ack bits lazily (only when all ack
	 * bits become 0) so don't poll for individiual ack
	 * bits to be clear here like on other platforms.
	 */

204 205 206 207 208 209 210 211 212 213 214 215
	/* Check for Render Engine */
	if (FORCEWAKE_RENDER & fw_engine) {

		__raw_i915_write32(dev_priv, FORCEWAKE_VLV,
				   _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));

		if (wait_for_atomic((__raw_i915_read32(dev_priv,
						FORCEWAKE_ACK_VLV) &
						FORCEWAKE_KERNEL),
					FORCEWAKE_ACK_TIMEOUT_MS))
			DRM_ERROR("Timed out: waiting for Render to ack.\n");
	}
216

217 218 219 220 221 222 223 224 225 226 227 228
	/* Check for Media Engine */
	if (FORCEWAKE_MEDIA & fw_engine) {

		__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
				   _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));

		if (wait_for_atomic((__raw_i915_read32(dev_priv,
						FORCEWAKE_ACK_MEDIA_VLV) &
						FORCEWAKE_KERNEL),
					FORCEWAKE_ACK_TIMEOUT_MS))
			DRM_ERROR("Timed out: waiting for media to ack.\n");
	}
229 230

	/* WaRsForcewakeWaitTC0:vlv */
231 232
	if (!IS_CHERRYVIEW(dev_priv->dev))
		__gen6_gt_wait_for_thread_c0(dev_priv);
233 234
}

235 236
static void __vlv_force_wake_put(struct drm_i915_private *dev_priv,
					int fw_engine)
237
{
238 239 240 241 242 243 244 245 246 247 248 249

	/* Check for Render Engine */
	if (FORCEWAKE_RENDER & fw_engine)
		__raw_i915_write32(dev_priv, FORCEWAKE_VLV,
					_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));


	/* Check for Media Engine */
	if (FORCEWAKE_MEDIA & fw_engine)
		__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
				_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));

250 251 252 253
	/* something from same cacheline, but !FORCEWAKE_VLV */
	__raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
	if (!IS_CHERRYVIEW(dev_priv->dev))
		gen6_gt_check_fifodbg(dev_priv);
254 255
}

256
static void vlv_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
257 258 259 260
{
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
261 262 263 264 265 266 267 268 269 270

	if (fw_engine & FORCEWAKE_RENDER &&
	    dev_priv->uncore.fw_rendercount++ != 0)
		fw_engine &= ~FORCEWAKE_RENDER;
	if (fw_engine & FORCEWAKE_MEDIA &&
	    dev_priv->uncore.fw_mediacount++ != 0)
		fw_engine &= ~FORCEWAKE_MEDIA;

	if (fw_engine)
		dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_engine);
271 272 273 274

	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}

275
static void vlv_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
276 277 278 279 280
{
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);

281 282 283 284 285 286 287 288 289 290 291
	if (fw_engine & FORCEWAKE_RENDER) {
		WARN_ON(!dev_priv->uncore.fw_rendercount);
		if (--dev_priv->uncore.fw_rendercount != 0)
			fw_engine &= ~FORCEWAKE_RENDER;
	}

	if (fw_engine & FORCEWAKE_MEDIA) {
		WARN_ON(!dev_priv->uncore.fw_mediacount);
		if (--dev_priv->uncore.fw_mediacount != 0)
			fw_engine &= ~FORCEWAKE_MEDIA;
	}
292

293 294
	if (fw_engine)
		dev_priv->uncore.funcs.force_wake_put(dev_priv, fw_engine);
295 296

	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
297 298
}

Z
Zhe Wang 已提交
299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446
static void __gen9_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
{
	__raw_i915_write32(dev_priv, FORCEWAKE_RENDER_GEN9,
			_MASKED_BIT_DISABLE(0xffff));

	__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_GEN9,
			_MASKED_BIT_DISABLE(0xffff));

	__raw_i915_write32(dev_priv, FORCEWAKE_BLITTER_GEN9,
			_MASKED_BIT_DISABLE(0xffff));
}

static void
__gen9_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
{
	/* Check for Render Engine */
	if (FORCEWAKE_RENDER & fw_engine) {
		if (wait_for_atomic((__raw_i915_read32(dev_priv,
						FORCEWAKE_ACK_RENDER_GEN9) &
						FORCEWAKE_KERNEL) == 0,
					FORCEWAKE_ACK_TIMEOUT_MS))
			DRM_ERROR("Timed out: Render forcewake old ack to clear.\n");

		__raw_i915_write32(dev_priv, FORCEWAKE_RENDER_GEN9,
				   _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));

		if (wait_for_atomic((__raw_i915_read32(dev_priv,
						FORCEWAKE_ACK_RENDER_GEN9) &
						FORCEWAKE_KERNEL),
					FORCEWAKE_ACK_TIMEOUT_MS))
			DRM_ERROR("Timed out: waiting for Render to ack.\n");
	}

	/* Check for Media Engine */
	if (FORCEWAKE_MEDIA & fw_engine) {
		if (wait_for_atomic((__raw_i915_read32(dev_priv,
						FORCEWAKE_ACK_MEDIA_GEN9) &
						FORCEWAKE_KERNEL) == 0,
					FORCEWAKE_ACK_TIMEOUT_MS))
			DRM_ERROR("Timed out: Media forcewake old ack to clear.\n");

		__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_GEN9,
				   _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));

		if (wait_for_atomic((__raw_i915_read32(dev_priv,
						FORCEWAKE_ACK_MEDIA_GEN9) &
						FORCEWAKE_KERNEL),
					FORCEWAKE_ACK_TIMEOUT_MS))
			DRM_ERROR("Timed out: waiting for Media to ack.\n");
	}

	/* Check for Blitter Engine */
	if (FORCEWAKE_BLITTER & fw_engine) {
		if (wait_for_atomic((__raw_i915_read32(dev_priv,
						FORCEWAKE_ACK_BLITTER_GEN9) &
						FORCEWAKE_KERNEL) == 0,
					FORCEWAKE_ACK_TIMEOUT_MS))
			DRM_ERROR("Timed out: Blitter forcewake old ack to clear.\n");

		__raw_i915_write32(dev_priv, FORCEWAKE_BLITTER_GEN9,
				   _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));

		if (wait_for_atomic((__raw_i915_read32(dev_priv,
						FORCEWAKE_ACK_BLITTER_GEN9) &
						FORCEWAKE_KERNEL),
					FORCEWAKE_ACK_TIMEOUT_MS))
			DRM_ERROR("Timed out: waiting for Blitter to ack.\n");
	}
}

static void
__gen9_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
{
	/* Check for Render Engine */
	if (FORCEWAKE_RENDER & fw_engine)
		__raw_i915_write32(dev_priv, FORCEWAKE_RENDER_GEN9,
				_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));

	/* Check for Media Engine */
	if (FORCEWAKE_MEDIA & fw_engine)
		__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_GEN9,
				_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));

	/* Check for Blitter Engine */
	if (FORCEWAKE_BLITTER & fw_engine)
		__raw_i915_write32(dev_priv, FORCEWAKE_BLITTER_GEN9,
				_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
}

static void
gen9_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
{
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);

	if (FORCEWAKE_RENDER & fw_engine) {
		if (dev_priv->uncore.fw_rendercount++ == 0)
			dev_priv->uncore.funcs.force_wake_get(dev_priv,
							FORCEWAKE_RENDER);
	}

	if (FORCEWAKE_MEDIA & fw_engine) {
		if (dev_priv->uncore.fw_mediacount++ == 0)
			dev_priv->uncore.funcs.force_wake_get(dev_priv,
							FORCEWAKE_MEDIA);
	}

	if (FORCEWAKE_BLITTER & fw_engine) {
		if (dev_priv->uncore.fw_blittercount++ == 0)
			dev_priv->uncore.funcs.force_wake_get(dev_priv,
							FORCEWAKE_BLITTER);
	}

	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}

static void
gen9_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
{
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);

	if (FORCEWAKE_RENDER & fw_engine) {
		WARN_ON(dev_priv->uncore.fw_rendercount == 0);
		if (--dev_priv->uncore.fw_rendercount == 0)
			dev_priv->uncore.funcs.force_wake_put(dev_priv,
							FORCEWAKE_RENDER);
	}

	if (FORCEWAKE_MEDIA & fw_engine) {
		WARN_ON(dev_priv->uncore.fw_mediacount == 0);
		if (--dev_priv->uncore.fw_mediacount == 0)
			dev_priv->uncore.funcs.force_wake_put(dev_priv,
							FORCEWAKE_MEDIA);
	}

	if (FORCEWAKE_BLITTER & fw_engine) {
		WARN_ON(dev_priv->uncore.fw_blittercount == 0);
		if (--dev_priv->uncore.fw_blittercount == 0)
			dev_priv->uncore.funcs.force_wake_put(dev_priv,
							FORCEWAKE_BLITTER);
	}

	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}

447
static void gen6_force_wake_timer(unsigned long arg)
448
{
449
	struct drm_i915_private *dev_priv = (void *)arg;
450 451
	unsigned long irqflags;

452 453
	assert_device_not_suspended(dev_priv);

454
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
455 456
	WARN_ON(!dev_priv->uncore.forcewake_count);

457
	if (--dev_priv->uncore.forcewake_count == 0)
458
		dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
459
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
460 461

	intel_runtime_pm_put(dev_priv);
462 463
}

464
void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
465 466
{
	struct drm_i915_private *dev_priv = dev->dev_private;
467 468
	unsigned long irqflags;

469 470
	if (del_timer_sync(&dev_priv->uncore.force_wake_timer))
		gen6_force_wake_timer((unsigned long)dev_priv);
471 472 473 474 475

	/* Hold uncore.lock across reset to prevent any register access
	 * with forcewake not set correctly
	 */
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
476

477
	if (IS_VALLEYVIEW(dev))
478
		vlv_force_wake_reset(dev_priv);
479
	else if (IS_GEN6(dev) || IS_GEN7(dev))
480
		__gen6_gt_force_wake_reset(dev_priv);
481

482
	if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev))
483
		__gen7_gt_force_wake_mt_reset(dev_priv);
484

Z
Zhe Wang 已提交
485 486 487
	if (IS_GEN9(dev))
		__gen9_gt_force_wake_mt_reset(dev_priv);

488 489 490 491 492 493 494 495 496
	if (restore) { /* If reset with a user forcewake, try to restore */
		unsigned fw = 0;

		if (IS_VALLEYVIEW(dev)) {
			if (dev_priv->uncore.fw_rendercount)
				fw |= FORCEWAKE_RENDER;

			if (dev_priv->uncore.fw_mediacount)
				fw |= FORCEWAKE_MEDIA;
Z
Zhe Wang 已提交
497 498 499 500 501 502 503 504 505
		} else if (IS_GEN9(dev)) {
			if (dev_priv->uncore.fw_rendercount)
				fw |= FORCEWAKE_RENDER;

			if (dev_priv->uncore.fw_mediacount)
				fw |= FORCEWAKE_MEDIA;

			if (dev_priv->uncore.fw_blittercount)
				fw |= FORCEWAKE_BLITTER;
506 507 508 509 510 511 512 513 514 515 516 517 518 519 520
		} else {
			if (dev_priv->uncore.forcewake_count)
				fw = FORCEWAKE_ALL;
		}

		if (fw)
			dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);

		if (IS_GEN6(dev) || IS_GEN7(dev))
			dev_priv->uncore.fifo_count =
				__raw_i915_read32(dev_priv, GTFIFOCTL) &
				GT_FIFO_FREE_ENTRIES_MASK;
	}

	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
521 522
}

523 524
static void __intel_uncore_early_sanitize(struct drm_device *dev,
					  bool restore_forcewake)
525 526 527 528
{
	struct drm_i915_private *dev_priv = dev->dev_private;

	if (HAS_FPGA_DBG_UNCLAIMED(dev))
529
		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
530

B
Ben Widawsky 已提交
531
	if ((IS_HASWELL(dev) || IS_BROADWELL(dev)) &&
532 533 534 535 536 537 538 539 540
	    (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) == 1)) {
		/* The docs do not explain exactly how the calculation can be
		 * made. It is somewhat guessable, but for now, it's always
		 * 128MB.
		 * NB: We can't write IDICR yet because we do not have gt funcs
		 * set up */
		dev_priv->ellc_size = 128;
		DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
	}
541

542 543 544 545 546
	/* clear out old GT FIFO errors */
	if (IS_GEN6(dev) || IS_GEN7(dev))
		__raw_i915_write32(dev_priv, GTFIFODBG,
				   __raw_i915_read32(dev_priv, GTFIFODBG));

547
	intel_uncore_forcewake_reset(dev, restore_forcewake);
548 549
}

550 551 552 553 554 555
void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
{
	__intel_uncore_early_sanitize(dev, restore_forcewake);
	i915_check_and_clear_faults(dev);
}

556 557
void intel_uncore_sanitize(struct drm_device *dev)
{
558 559 560 561 562 563 564 565 566 567
	/* BIOS often leaves RC6 enabled, but disable it for hw init */
	intel_disable_gt_powersave(dev);
}

/*
 * Generally this is called implicitly by the register read function. However,
 * if some sequence requires the GT to not power down then this function should
 * be called at the beginning of the sequence followed by a call to
 * gen6_gt_force_wake_put() at the end of the sequence.
 */
568
void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
569 570 571
{
	unsigned long irqflags;

572 573 574
	if (!dev_priv->uncore.funcs.force_wake_get)
		return;

575 576
	intel_runtime_pm_get(dev_priv);

Z
Zhe Wang 已提交
577 578 579 580
	/* Redirect to Gen9 specific routine */
	if (IS_GEN9(dev_priv->dev))
		return gen9_force_wake_get(dev_priv, fw_engine);

581 582 583 584
	/* Redirect to VLV specific routine */
	if (IS_VALLEYVIEW(dev_priv->dev))
		return vlv_force_wake_get(dev_priv, fw_engine);

585 586
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
	if (dev_priv->uncore.forcewake_count++ == 0)
587
		dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
588 589 590 591 592 593
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}

/*
 * see gen6_gt_force_wake_get()
 */
594
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
595 596
{
	unsigned long irqflags;
597
	bool delayed = false;
598

599 600 601
	if (!dev_priv->uncore.funcs.force_wake_put)
		return;

Z
Zhe Wang 已提交
602 603 604 605 606 607
	/* Redirect to Gen9 specific routine */
	if (IS_GEN9(dev_priv->dev)) {
		gen9_force_wake_put(dev_priv, fw_engine);
		goto out;
	}

608
	/* Redirect to VLV specific routine */
609 610 611 612
	if (IS_VALLEYVIEW(dev_priv->dev)) {
		vlv_force_wake_put(dev_priv, fw_engine);
		goto out;
	}
613 614


615
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
616 617
	WARN_ON(!dev_priv->uncore.forcewake_count);

618 619
	if (--dev_priv->uncore.forcewake_count == 0) {
		dev_priv->uncore.forcewake_count++;
620
		delayed = true;
621 622
		mod_timer_pinned(&dev_priv->uncore.force_wake_timer,
				 jiffies + 1);
623
	}
624
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
625

626 627 628
out:
	if (!delayed)
		intel_runtime_pm_put(dev_priv);
629 630
}

631 632 633 634 635 636 637 638
void assert_force_wake_inactive(struct drm_i915_private *dev_priv)
{
	if (!dev_priv->uncore.funcs.force_wake_get)
		return;

	WARN_ON(dev_priv->uncore.forcewake_count > 0);
}

639 640
/* We give fast paths for the really cool registers */
#define NEEDS_FORCE_WAKE(dev_priv, reg) \
641
	 ((reg) < 0x40000 && (reg) != FORCEWAKE)
642

643
#define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end))
644

645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679
#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
	(REG_RANGE((reg), 0x2000, 0x4000) || \
	 REG_RANGE((reg), 0x5000, 0x8000) || \
	 REG_RANGE((reg), 0xB000, 0x12000) || \
	 REG_RANGE((reg), 0x2E000, 0x30000))

#define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg) \
	(REG_RANGE((reg), 0x12000, 0x14000) || \
	 REG_RANGE((reg), 0x22000, 0x24000) || \
	 REG_RANGE((reg), 0x30000, 0x40000))

#define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \
	(REG_RANGE((reg), 0x2000, 0x4000) || \
	 REG_RANGE((reg), 0x5000, 0x8000) || \
	 REG_RANGE((reg), 0x8300, 0x8500) || \
	 REG_RANGE((reg), 0xB000, 0xC000) || \
	 REG_RANGE((reg), 0xE000, 0xE800))

#define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \
	(REG_RANGE((reg), 0x8800, 0x8900) || \
	 REG_RANGE((reg), 0xD000, 0xD800) || \
	 REG_RANGE((reg), 0x12000, 0x14000) || \
	 REG_RANGE((reg), 0x1A000, 0x1C000) || \
	 REG_RANGE((reg), 0x1E800, 0x1EA00) || \
	 REG_RANGE((reg), 0x30000, 0x40000))

#define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \
	(REG_RANGE((reg), 0x4000, 0x5000) || \
	 REG_RANGE((reg), 0x8000, 0x8300) || \
	 REG_RANGE((reg), 0x8500, 0x8600) || \
	 REG_RANGE((reg), 0x9000, 0xB000) || \
	 REG_RANGE((reg), 0xC000, 0xC800) || \
	 REG_RANGE((reg), 0xF000, 0x10000) || \
	 REG_RANGE((reg), 0x14000, 0x14400) || \
	 REG_RANGE((reg), 0x22000, 0x24000))
680

681 682 683 684 685 686
static void
ilk_dummy_write(struct drm_i915_private *dev_priv)
{
	/* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
	 * the chip from rc6 before touching it for real. MI_MODE is masked,
	 * hence harmless to write 0 into. */
687
	__raw_i915_write32(dev_priv, MI_MODE, 0);
688 689 690
}

static void
691 692
hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv, u32 reg, bool read,
			bool before)
693
{
694 695 696 697 698 699
	const char *op = read ? "reading" : "writing to";
	const char *when = before ? "before" : "after";

	if (!i915.mmio_debug)
		return;

700
	if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
701 702
		WARN(1, "Unclaimed register detected %s %s register 0x%x\n",
		     when, op, reg);
703
		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
704 705 706 707
	}
}

static void
708
hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv)
709
{
710 711 712
	if (i915.mmio_debug)
		return;

713
	if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
714
		DRM_ERROR("Unclaimed register detected. Please use the i915.mmio_debug=1 to debug this problem.");
715
		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
716 717 718
	}
}

B
Ben Widawsky 已提交
719 720 721
#define REG_READ_HEADER(x) \
	unsigned long irqflags; \
	u##x val = 0; \
722
	assert_device_not_suspended(dev_priv); \
B
Ben Widawsky 已提交
723 724 725 726 727 728 729
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)

#define REG_READ_FOOTER \
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
	return val

730
#define __gen4_read(x) \
731
static u##x \
732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749
gen4_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
	REG_READ_HEADER(x); \
	val = __raw_i915_read##x(dev_priv, reg); \
	REG_READ_FOOTER; \
}

#define __gen5_read(x) \
static u##x \
gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
	REG_READ_HEADER(x); \
	ilk_dummy_write(dev_priv); \
	val = __raw_i915_read##x(dev_priv, reg); \
	REG_READ_FOOTER; \
}

#define __gen6_read(x) \
static u##x \
gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
B
Ben Widawsky 已提交
750
	REG_READ_HEADER(x); \
751
	hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
752 753 754 755
	if (dev_priv->uncore.forcewake_count == 0 && \
	    NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
		dev_priv->uncore.funcs.force_wake_get(dev_priv, \
						      FORCEWAKE_ALL); \
756 757 758 759 760
		val = __raw_i915_read##x(dev_priv, reg); \
		dev_priv->uncore.funcs.force_wake_put(dev_priv, \
						      FORCEWAKE_ALL); \
	} else { \
		val = __raw_i915_read##x(dev_priv, reg); \
761
	} \
762
	hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
B
Ben Widawsky 已提交
763
	REG_READ_FOOTER; \
764 765
}

766 767 768 769 770
#define __vlv_read(x) \
static u##x \
vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
	unsigned fwengine = 0; \
	REG_READ_HEADER(x); \
771 772 773 774 775 776
	if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) { \
		if (dev_priv->uncore.fw_rendercount == 0) \
			fwengine = FORCEWAKE_RENDER; \
	} else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) { \
		if (dev_priv->uncore.fw_mediacount == 0) \
			fwengine = FORCEWAKE_MEDIA; \
777
	}  \
778 779 780 781 782
	if (fwengine) \
		dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
	val = __raw_i915_read##x(dev_priv, reg); \
	if (fwengine) \
		dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
783 784 785
	REG_READ_FOOTER; \
}

786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809
#define __chv_read(x) \
static u##x \
chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
	unsigned fwengine = 0; \
	REG_READ_HEADER(x); \
	if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) { \
		if (dev_priv->uncore.fw_rendercount == 0) \
			fwengine = FORCEWAKE_RENDER; \
	} else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) { \
		if (dev_priv->uncore.fw_mediacount == 0) \
			fwengine = FORCEWAKE_MEDIA; \
	} else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) { \
		if (dev_priv->uncore.fw_rendercount == 0) \
			fwengine |= FORCEWAKE_RENDER; \
		if (dev_priv->uncore.fw_mediacount == 0) \
			fwengine |= FORCEWAKE_MEDIA; \
	} \
	if (fwengine) \
		dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
	val = __raw_i915_read##x(dev_priv, reg); \
	if (fwengine) \
		dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
	REG_READ_FOOTER; \
}
810

811 812 813 814
__chv_read(8)
__chv_read(16)
__chv_read(32)
__chv_read(64)
815 816 817 818
__vlv_read(8)
__vlv_read(16)
__vlv_read(32)
__vlv_read(64)
819 820 821 822 823 824 825 826 827 828 829 830 831
__gen6_read(8)
__gen6_read(16)
__gen6_read(32)
__gen6_read(64)
__gen5_read(8)
__gen5_read(16)
__gen5_read(32)
__gen5_read(64)
__gen4_read(8)
__gen4_read(16)
__gen4_read(32)
__gen4_read(64)

832
#undef __chv_read
833
#undef __vlv_read
834 835 836
#undef __gen6_read
#undef __gen5_read
#undef __gen4_read
B
Ben Widawsky 已提交
837 838 839 840 841 842
#undef REG_READ_FOOTER
#undef REG_READ_HEADER

#define REG_WRITE_HEADER \
	unsigned long irqflags; \
	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
843
	assert_device_not_suspended(dev_priv); \
B
Ben Widawsky 已提交
844
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
845

V
Ville Syrjälä 已提交
846 847 848
#define REG_WRITE_FOOTER \
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)

849
#define __gen4_write(x) \
850
static void \
851 852 853
gen4_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
	REG_WRITE_HEADER; \
	__raw_i915_write##x(dev_priv, reg, val); \
V
Ville Syrjälä 已提交
854
	REG_WRITE_FOOTER; \
855 856 857 858 859 860 861 862
}

#define __gen5_write(x) \
static void \
gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
	REG_WRITE_HEADER; \
	ilk_dummy_write(dev_priv); \
	__raw_i915_write##x(dev_priv, reg, val); \
V
Ville Syrjälä 已提交
863
	REG_WRITE_FOOTER; \
864 865 866 867 868 869 870 871 872 873 874 875 876 877
}

#define __gen6_write(x) \
static void \
gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
	u32 __fifo_ret = 0; \
	REG_WRITE_HEADER; \
	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
	} \
	__raw_i915_write##x(dev_priv, reg, val); \
	if (unlikely(__fifo_ret)) { \
		gen6_gt_check_fifodbg(dev_priv); \
	} \
V
Ville Syrjälä 已提交
878
	REG_WRITE_FOOTER; \
879 880 881 882 883
}

#define __hsw_write(x) \
static void \
hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
884
	u32 __fifo_ret = 0; \
B
Ben Widawsky 已提交
885
	REG_WRITE_HEADER; \
886 887 888
	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
	} \
889
	hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
890
	__raw_i915_write##x(dev_priv, reg, val); \
891 892 893
	if (unlikely(__fifo_ret)) { \
		gen6_gt_check_fifodbg(dev_priv); \
	} \
894 895
	hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
	hsw_unclaimed_reg_detect(dev_priv); \
V
Ville Syrjälä 已提交
896
	REG_WRITE_FOOTER; \
897
}
898

899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923
static const u32 gen8_shadowed_regs[] = {
	FORCEWAKE_MT,
	GEN6_RPNSWREQ,
	GEN6_RC_VIDEO_FREQ,
	RING_TAIL(RENDER_RING_BASE),
	RING_TAIL(GEN6_BSD_RING_BASE),
	RING_TAIL(VEBOX_RING_BASE),
	RING_TAIL(BLT_RING_BASE),
	/* TODO: Other registers are not yet used */
};

static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg)
{
	int i;
	for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
		if (reg == gen8_shadowed_regs[i])
			return true;

	return false;
}

#define __gen8_write(x) \
static void \
gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
	REG_WRITE_HEADER; \
924
	hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
925 926 927 928 929 930 931 932 933 934
	if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) { \
		if (dev_priv->uncore.forcewake_count == 0) \
			dev_priv->uncore.funcs.force_wake_get(dev_priv,	\
							      FORCEWAKE_ALL); \
		__raw_i915_write##x(dev_priv, reg, val); \
		if (dev_priv->uncore.forcewake_count == 0) \
			dev_priv->uncore.funcs.force_wake_put(dev_priv, \
							      FORCEWAKE_ALL); \
	} else { \
		__raw_i915_write##x(dev_priv, reg, val); \
935
	} \
936 937
	hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
	hsw_unclaimed_reg_detect(dev_priv); \
V
Ville Syrjälä 已提交
938
	REG_WRITE_FOOTER; \
939 940
}

941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972
#define __chv_write(x) \
static void \
chv_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
	unsigned fwengine = 0; \
	bool shadowed = is_gen8_shadowed(dev_priv, reg); \
	REG_WRITE_HEADER; \
	if (!shadowed) { \
		if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) { \
			if (dev_priv->uncore.fw_rendercount == 0) \
				fwengine = FORCEWAKE_RENDER; \
		} else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) { \
			if (dev_priv->uncore.fw_mediacount == 0) \
				fwengine = FORCEWAKE_MEDIA; \
		} else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) { \
			if (dev_priv->uncore.fw_rendercount == 0) \
				fwengine |= FORCEWAKE_RENDER; \
			if (dev_priv->uncore.fw_mediacount == 0) \
				fwengine |= FORCEWAKE_MEDIA; \
		} \
	} \
	if (fwengine) \
		dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
	__raw_i915_write##x(dev_priv, reg, val); \
	if (fwengine) \
		dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
	REG_WRITE_FOOTER; \
}

__chv_write(8)
__chv_write(16)
__chv_write(32)
__chv_write(64)
973 974 975 976
__gen8_write(8)
__gen8_write(16)
__gen8_write(32)
__gen8_write(64)
977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993
__hsw_write(8)
__hsw_write(16)
__hsw_write(32)
__hsw_write(64)
__gen6_write(8)
__gen6_write(16)
__gen6_write(32)
__gen6_write(64)
__gen5_write(8)
__gen5_write(16)
__gen5_write(32)
__gen5_write(64)
__gen4_write(8)
__gen4_write(16)
__gen4_write(32)
__gen4_write(64)

994
#undef __chv_write
995
#undef __gen8_write
996 997 998 999
#undef __hsw_write
#undef __gen6_write
#undef __gen5_write
#undef __gen4_write
V
Ville Syrjälä 已提交
1000
#undef REG_WRITE_FOOTER
B
Ben Widawsky 已提交
1001
#undef REG_WRITE_HEADER
1002

1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018
#define ASSIGN_WRITE_MMIO_VFUNCS(x) \
do { \
	dev_priv->uncore.funcs.mmio_writeb = x##_write8; \
	dev_priv->uncore.funcs.mmio_writew = x##_write16; \
	dev_priv->uncore.funcs.mmio_writel = x##_write32; \
	dev_priv->uncore.funcs.mmio_writeq = x##_write64; \
} while (0)

#define ASSIGN_READ_MMIO_VFUNCS(x) \
do { \
	dev_priv->uncore.funcs.mmio_readb = x##_read8; \
	dev_priv->uncore.funcs.mmio_readw = x##_read16; \
	dev_priv->uncore.funcs.mmio_readl = x##_read32; \
	dev_priv->uncore.funcs.mmio_readq = x##_read64; \
} while (0)

1019 1020 1021 1022
void intel_uncore_init(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;

1023 1024
	setup_timer(&dev_priv->uncore.force_wake_timer,
		    gen6_force_wake_timer, (unsigned long)dev_priv);
1025

1026
	__intel_uncore_early_sanitize(dev, false);
1027

Z
Zhe Wang 已提交
1028 1029 1030 1031
	if (IS_GEN9(dev)) {
		dev_priv->uncore.funcs.force_wake_get = __gen9_force_wake_get;
		dev_priv->uncore.funcs.force_wake_put = __gen9_force_wake_put;
	} else if (IS_VALLEYVIEW(dev)) {
1032 1033
		dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get;
		dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put;
1034
	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
1035 1036
		dev_priv->uncore.funcs.force_wake_get = __gen7_gt_force_wake_mt_get;
		dev_priv->uncore.funcs.force_wake_put = __gen7_gt_force_wake_mt_put;
1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049
	} else if (IS_IVYBRIDGE(dev)) {
		u32 ecobus;

		/* IVB configs may use multi-threaded forcewake */

		/* A small trick here - if the bios hasn't configured
		 * MT forcewake, and if the device is in RC6, then
		 * force_wake_mt_get will not wake the device and the
		 * ECOBUS read will return zero. Which will be
		 * (correctly) interpreted by the test below as MT
		 * forcewake being disabled.
		 */
		mutex_lock(&dev->struct_mutex);
1050
		__gen7_gt_force_wake_mt_get(dev_priv, FORCEWAKE_ALL);
1051
		ecobus = __raw_i915_read32(dev_priv, ECOBUS);
1052
		__gen7_gt_force_wake_mt_put(dev_priv, FORCEWAKE_ALL);
1053 1054 1055 1056
		mutex_unlock(&dev->struct_mutex);

		if (ecobus & FORCEWAKE_MT_ENABLE) {
			dev_priv->uncore.funcs.force_wake_get =
1057
				__gen7_gt_force_wake_mt_get;
1058
			dev_priv->uncore.funcs.force_wake_put =
1059
				__gen7_gt_force_wake_mt_put;
1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074
		} else {
			DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
			DRM_INFO("when using vblank-synced partial screen updates.\n");
			dev_priv->uncore.funcs.force_wake_get =
				__gen6_gt_force_wake_get;
			dev_priv->uncore.funcs.force_wake_put =
				__gen6_gt_force_wake_put;
		}
	} else if (IS_GEN6(dev)) {
		dev_priv->uncore.funcs.force_wake_get =
			__gen6_gt_force_wake_get;
		dev_priv->uncore.funcs.force_wake_put =
			__gen6_gt_force_wake_put;
	}

1075
	switch (INTEL_INFO(dev)->gen) {
1076
	default:
1077
		if (IS_CHERRYVIEW(dev)) {
1078 1079
			ASSIGN_WRITE_MMIO_VFUNCS(chv);
			ASSIGN_READ_MMIO_VFUNCS(chv);
1080 1081

		} else {
1082 1083
			ASSIGN_WRITE_MMIO_VFUNCS(gen8);
			ASSIGN_READ_MMIO_VFUNCS(gen6);
1084
		}
1085
		break;
1086 1087
	case 7:
	case 6:
1088
		if (IS_HASWELL(dev)) {
1089
			ASSIGN_WRITE_MMIO_VFUNCS(hsw);
1090
		} else {
1091
			ASSIGN_WRITE_MMIO_VFUNCS(gen6);
1092
		}
1093 1094

		if (IS_VALLEYVIEW(dev)) {
1095
			ASSIGN_READ_MMIO_VFUNCS(vlv);
1096
		} else {
1097
			ASSIGN_READ_MMIO_VFUNCS(gen6);
1098
		}
1099 1100
		break;
	case 5:
1101 1102
		ASSIGN_WRITE_MMIO_VFUNCS(gen5);
		ASSIGN_READ_MMIO_VFUNCS(gen5);
1103 1104 1105 1106
		break;
	case 4:
	case 3:
	case 2:
1107 1108
		ASSIGN_WRITE_MMIO_VFUNCS(gen4);
		ASSIGN_READ_MMIO_VFUNCS(gen4);
1109 1110
		break;
	}
1111 1112

	i915_check_and_clear_faults(dev);
1113
}
1114 1115
#undef ASSIGN_WRITE_MMIO_VFUNCS
#undef ASSIGN_READ_MMIO_VFUNCS
1116 1117 1118 1119 1120

void intel_uncore_fini(struct drm_device *dev)
{
	/* Paranoia: make sure we have disabled everything before we exit. */
	intel_uncore_sanitize(dev);
1121
	intel_uncore_forcewake_reset(dev, false);
1122 1123
}

1124 1125
#define GEN_RANGE(l, h) GENMASK(h, l)

1126 1127 1128
static const struct register_whitelist {
	uint64_t offset;
	uint32_t size;
1129 1130
	/* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
	uint32_t gen_bitmask;
1131
} whitelist[] = {
1132
	{ RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 9) },
1133 1134 1135 1136 1137 1138 1139 1140
};

int i915_reg_read_ioctl(struct drm_device *dev,
			void *data, struct drm_file *file)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct drm_i915_reg_read *reg = data;
	struct register_whitelist const *entry = whitelist;
1141
	int i, ret = 0;
1142 1143 1144 1145 1146 1147 1148 1149 1150 1151

	for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
		if (entry->offset == reg->offset &&
		    (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
			break;
	}

	if (i == ARRAY_SIZE(whitelist))
		return -EINVAL;

1152 1153
	intel_runtime_pm_get(dev_priv);

1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168
	switch (entry->size) {
	case 8:
		reg->val = I915_READ64(reg->offset);
		break;
	case 4:
		reg->val = I915_READ(reg->offset);
		break;
	case 2:
		reg->val = I915_READ16(reg->offset);
		break;
	case 1:
		reg->val = I915_READ8(reg->offset);
		break;
	default:
		WARN_ON(1);
1169 1170
		ret = -EINVAL;
		goto out;
1171 1172
	}

1173 1174 1175
out:
	intel_runtime_pm_put(dev_priv);
	return ret;
1176 1177
}

1178 1179 1180 1181 1182 1183
int i915_get_reset_stats_ioctl(struct drm_device *dev,
			       void *data, struct drm_file *file)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct drm_i915_reset_stats *args = data;
	struct i915_ctx_hang_stats *hs;
1184
	struct intel_context *ctx;
1185 1186
	int ret;

1187 1188 1189
	if (args->flags || args->pad)
		return -EINVAL;

1190
	if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
1191 1192 1193 1194 1195 1196
		return -EPERM;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

1197 1198
	ctx = i915_gem_context_get(file->driver_priv, args->ctx_id);
	if (IS_ERR(ctx)) {
1199
		mutex_unlock(&dev->struct_mutex);
1200
		return PTR_ERR(ctx);
1201
	}
1202
	hs = &ctx->hang_stats;
1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216

	if (capable(CAP_SYS_ADMIN))
		args->reset_count = i915_reset_count(&dev_priv->gpu_error);
	else
		args->reset_count = 0;

	args->batch_active = hs->batch_active;
	args->batch_pending = hs->batch_pending;

	mutex_unlock(&dev->struct_mutex);

	return 0;
}

1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227
static int i965_reset_complete(struct drm_device *dev)
{
	u8 gdrst;
	pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
	return (gdrst & GRDOM_RESET_ENABLE) == 0;
}

static int i965_do_reset(struct drm_device *dev)
{
	int ret;

1228 1229 1230
	/* FIXME: i965g/gm need a display save/restore for gpu reset. */
	return -ENODEV;

1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253
	/*
	 * Set the domains we want to reset (GRDOM/bits 2 and 3) as
	 * well as the reset bit (GR/bit 0).  Setting the GR bit
	 * triggers the reset; when done, the hardware will clear it.
	 */
	pci_write_config_byte(dev->pdev, I965_GDRST,
			      GRDOM_RENDER | GRDOM_RESET_ENABLE);
	ret =  wait_for(i965_reset_complete(dev), 500);
	if (ret)
		return ret;

	pci_write_config_byte(dev->pdev, I965_GDRST,
			      GRDOM_MEDIA | GRDOM_RESET_ENABLE);

	ret =  wait_for(i965_reset_complete(dev), 500);
	if (ret)
		return ret;

	pci_write_config_byte(dev->pdev, I965_GDRST, 0);

	return 0;
}

1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283
static int g4x_do_reset(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret;

	pci_write_config_byte(dev->pdev, I965_GDRST,
			      GRDOM_RENDER | GRDOM_RESET_ENABLE);
	ret =  wait_for(i965_reset_complete(dev), 500);
	if (ret)
		return ret;

	/* WaVcpClkGateDisableForMediaReset:ctg,elk */
	I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
	POSTING_READ(VDECCLK_GATE_D);

	pci_write_config_byte(dev->pdev, I965_GDRST,
			      GRDOM_MEDIA | GRDOM_RESET_ENABLE);
	ret =  wait_for(i965_reset_complete(dev), 500);
	if (ret)
		return ret;

	/* WaVcpClkGateDisableForMediaReset:ctg,elk */
	I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
	POSTING_READ(VDECCLK_GATE_D);

	pci_write_config_byte(dev->pdev, I965_GDRST, 0);

	return 0;
}

1284 1285 1286 1287 1288 1289
static int ironlake_do_reset(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret;

	I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
1290
		   ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
V
Ville Syrjälä 已提交
1291
	ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) &
1292
			ILK_GRDOM_RESET_ENABLE) == 0, 500);
1293 1294 1295 1296
	if (ret)
		return ret;

	I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
1297
		   ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
1298 1299 1300 1301 1302 1303 1304 1305
	ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) &
			ILK_GRDOM_RESET_ENABLE) == 0, 500);
	if (ret)
		return ret;

	I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 0);

	return 0;
1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318
}

static int gen6_do_reset(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int	ret;

	/* Reset the chip */

	/* GEN6_GDRST is not in the gt power well, no need to check
	 * for fifo space for the write or forcewake the chip for
	 * the read
	 */
1319
	__raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL);
1320 1321

	/* Spin waiting for the device to ack the reset request */
1322
	ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
1323

1324
	intel_uncore_forcewake_reset(dev, true);
1325

1326 1327 1328 1329 1330
	return ret;
}

int intel_gpu_reset(struct drm_device *dev)
{
1331 1332 1333 1334 1335 1336 1337 1338 1339 1340
	if (INTEL_INFO(dev)->gen >= 6)
		return gen6_do_reset(dev);
	else if (IS_GEN5(dev))
		return ironlake_do_reset(dev);
	else if (IS_G4X(dev))
		return g4x_do_reset(dev);
	else if (IS_GEN4(dev))
		return i965_do_reset(dev);
	else
		return -ENODEV;
1341 1342 1343 1344 1345 1346 1347
}

void intel_uncore_check_errors(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;

	if (HAS_FPGA_DBG_UNCLAIMED(dev) &&
1348
	    (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
1349
		DRM_ERROR("Unclaimed register before interrupt\n");
1350
		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
1351 1352
	}
}