intel_uncore.c 42.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
/*
 * Copyright © 2013 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 */

#include "i915_drv.h"
#include "intel_drv.h"
26
#include "i915_vgpu.h"
27

28 29
#include <linux/pm_runtime.h>

30
#define FORCEWAKE_ACK_TIMEOUT_MS 50
31

32
#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32((dev_priv__), (reg__))
33

34 35 36 37 38 39 40
static const char * const forcewake_domain_names[] = {
	"render",
	"blitter",
	"media",
};

const char *
41
intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
42
{
43
	BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
44 45 46 47 48 49 50 51 52

	if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
		return forcewake_domain_names[id];

	WARN_ON(id);

	return "unknown";
}

53 54 55
static void
assert_device_not_suspended(struct drm_i915_private *dev_priv)
{
56 57
	WARN_ONCE(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
		  "Device suspended\n");
58
}
59

60 61
static inline void
fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
62
{
63
	WARN_ON(d->reg_set == 0);
64
	__raw_i915_write32(d->i915, d->reg_set, d->val_reset);
65 66
}

67 68
static inline void
fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
69
{
70
	mod_timer_pinned(&d->timer, jiffies + 1);
71 72
}

73 74
static inline void
fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
75
{
76 77
	if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
			     FORCEWAKE_KERNEL) == 0,
78
			    FORCEWAKE_ACK_TIMEOUT_MS))
79 80 81
		DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
			  intel_uncore_forcewake_domain_to_str(d->id));
}
82

83 84 85 86 87
static inline void
fw_domain_get(const struct intel_uncore_forcewake_domain *d)
{
	__raw_i915_write32(d->i915, d->reg_set, d->val_set);
}
88

89 90 91 92 93
static inline void
fw_domain_wait_ack(const struct intel_uncore_forcewake_domain *d)
{
	if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
			     FORCEWAKE_KERNEL),
94
			    FORCEWAKE_ACK_TIMEOUT_MS))
95 96 97
		DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
			  intel_uncore_forcewake_domain_to_str(d->id));
}
98

99 100 101 102
static inline void
fw_domain_put(const struct intel_uncore_forcewake_domain *d)
{
	__raw_i915_write32(d->i915, d->reg_set, d->val_clear);
103 104
}

105 106
static inline void
fw_domain_posting_read(const struct intel_uncore_forcewake_domain *d)
107
{
108 109 110
	/* something from same cacheline, but not from the set register */
	if (d->reg_post)
		__raw_posting_read(d->i915, d->reg_post);
111 112
}

113
static void
114
fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
115
{
116
	struct intel_uncore_forcewake_domain *d;
117
	enum forcewake_domain_id id;
118

119 120 121 122 123 124
	for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
		fw_domain_wait_ack_clear(d);
		fw_domain_get(d);
		fw_domain_wait_ack(d);
	}
}
125

126
static void
127
fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
128 129
{
	struct intel_uncore_forcewake_domain *d;
130
	enum forcewake_domain_id id;
131

132 133 134 135 136
	for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
		fw_domain_put(d);
		fw_domain_posting_read(d);
	}
}
137

138 139 140 141
static void
fw_domains_posting_read(struct drm_i915_private *dev_priv)
{
	struct intel_uncore_forcewake_domain *d;
142
	enum forcewake_domain_id id;
143 144 145 146 147 148 149 150 151

	/* No need to do for all, just do for first found */
	for_each_fw_domain(d, dev_priv, id) {
		fw_domain_posting_read(d);
		break;
	}
}

static void
152
fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
153 154
{
	struct intel_uncore_forcewake_domain *d;
155
	enum forcewake_domain_id id;
156

157 158
	if (dev_priv->uncore.fw_domains == 0)
		return;
159

160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176
	for_each_fw_domain_mask(d, fw_domains, dev_priv, id)
		fw_domain_reset(d);

	fw_domains_posting_read(dev_priv);
}

static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
{
	/* w/a for a sporadic read returning 0 by waiting for the GT
	 * thread to wake up.
	 */
	if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
				GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
		DRM_ERROR("GT thread status wait timed out\n");
}

static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv,
177
					      enum forcewake_domains fw_domains)
178 179
{
	fw_domains_get(dev_priv, fw_domains);
180

181
	/* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
182
	__gen6_gt_wait_for_thread_c0(dev_priv);
183 184 185 186 187
}

static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
{
	u32 gtfifodbg;
188 189

	gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
190 191
	if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg))
		__raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
192 193
}

194
static void fw_domains_put_with_fifo(struct drm_i915_private *dev_priv,
195
				     enum forcewake_domains fw_domains)
196
{
197
	fw_domains_put(dev_priv, fw_domains);
198 199 200
	gen6_gt_check_fifodbg(dev_priv);
}

201 202 203 204 205 206 207
static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv)
{
	u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL);

	return count & GT_FIFO_FREE_ENTRIES_MASK;
}

208 209 210 211
static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
{
	int ret = 0;

212 213 214
	/* On VLV, FIFO will be shared by both SW and HW.
	 * So, we need to read the FREE_ENTRIES everytime */
	if (IS_VALLEYVIEW(dev_priv->dev))
215
		dev_priv->uncore.fifo_count = fifo_free_entries(dev_priv);
216

217 218
	if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
		int loop = 500;
219 220
		u32 fifo = fifo_free_entries(dev_priv);

221 222
		while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
			udelay(10);
223
			fifo = fifo_free_entries(dev_priv);
224 225 226 227 228 229 230 231 232 233
		}
		if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
			++ret;
		dev_priv->uncore.fifo_count = fifo;
	}
	dev_priv->uncore.fifo_count--;

	return ret;
}

234
static void intel_uncore_fw_release_timer(unsigned long arg)
Z
Zhe Wang 已提交
235
{
236 237
	struct intel_uncore_forcewake_domain *domain = (void *)arg;
	unsigned long irqflags;
Z
Zhe Wang 已提交
238

239
	assert_device_not_suspended(domain->i915);
Z
Zhe Wang 已提交
240

241 242 243 244 245 246 247 248 249
	spin_lock_irqsave(&domain->i915->uncore.lock, irqflags);
	if (WARN_ON(domain->wake_count == 0))
		domain->wake_count++;

	if (--domain->wake_count == 0)
		domain->i915->uncore.funcs.force_wake_put(domain->i915,
							  1 << domain->id);

	spin_unlock_irqrestore(&domain->i915->uncore.lock, irqflags);
Z
Zhe Wang 已提交
250 251
}

252
void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
Z
Zhe Wang 已提交
253
{
254
	struct drm_i915_private *dev_priv = dev->dev_private;
255
	unsigned long irqflags;
256
	struct intel_uncore_forcewake_domain *domain;
257 258 259
	int retry_count = 100;
	enum forcewake_domain_id id;
	enum forcewake_domains fw = 0, active_domains;
Z
Zhe Wang 已提交
260

261 262 263 264 265 266
	/* Hold uncore.lock across reset to prevent any register access
	 * with forcewake not set correctly. Wait until all pending
	 * timers are run before holding.
	 */
	while (1) {
		active_domains = 0;
Z
Zhe Wang 已提交
267

268 269 270
		for_each_fw_domain(domain, dev_priv, id) {
			if (del_timer_sync(&domain->timer) == 0)
				continue;
Z
Zhe Wang 已提交
271

272
			intel_uncore_fw_release_timer((unsigned long)domain);
273
		}
274

275
		spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
276

277 278 279 280
		for_each_fw_domain(domain, dev_priv, id) {
			if (timer_pending(&domain->timer))
				active_domains |= (1 << id);
		}
281

282 283
		if (active_domains == 0)
			break;
284

285 286 287 288
		if (--retry_count == 0) {
			DRM_ERROR("Timed out waiting for forcewake timers to finish\n");
			break;
		}
289

290 291 292
		spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
		cond_resched();
	}
293

294 295 296 297 298 299 300 301
	WARN_ON(active_domains);

	for_each_fw_domain(domain, dev_priv, id)
		if (domain->wake_count)
			fw |= 1 << id;

	if (fw)
		dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
302

303
	fw_domains_reset(dev_priv, FORCEWAKE_ALL);
Z
Zhe Wang 已提交
304

305 306 307 308 309 310
	if (restore) { /* If reset with a user forcewake, try to restore */
		if (fw)
			dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);

		if (IS_GEN6(dev) || IS_GEN7(dev))
			dev_priv->uncore.fifo_count =
311
				fifo_free_entries(dev_priv);
312 313
	}

314
	if (!restore)
315
		assert_forcewakes_inactive(dev_priv);
316

317
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
318 319
}

320
static void intel_uncore_ellc_detect(struct drm_device *dev)
321 322 323
{
	struct drm_i915_private *dev_priv = dev->dev_private;

324 325
	if ((IS_HASWELL(dev) || IS_BROADWELL(dev) ||
	     INTEL_INFO(dev)->gen >= 9) &&
326
	    (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) & EDRAM_ENABLED)) {
327 328 329 330 331 332 333 334
		/* The docs do not explain exactly how the calculation can be
		 * made. It is somewhat guessable, but for now, it's always
		 * 128MB.
		 * NB: We can't write IDICR yet because we do not have gt funcs
		 * set up */
		dev_priv->ellc_size = 128;
		DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
	}
335 336 337 338 339 340 341 342 343
}

static void __intel_uncore_early_sanitize(struct drm_device *dev,
					  bool restore_forcewake)
{
	struct drm_i915_private *dev_priv = dev->dev_private;

	if (HAS_FPGA_DBG_UNCLAIMED(dev))
		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
344

345 346 347 348 349
	/* clear out old GT FIFO errors */
	if (IS_GEN6(dev) || IS_GEN7(dev))
		__raw_i915_write32(dev_priv, GTFIFODBG,
				   __raw_i915_read32(dev_priv, GTFIFODBG));

350 351 352 353 354 355 356 357
	/* WaDisableShadowRegForCpd:chv */
	if (IS_CHERRYVIEW(dev)) {
		__raw_i915_write32(dev_priv, GTFIFOCTL,
				   __raw_i915_read32(dev_priv, GTFIFOCTL) |
				   GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
				   GT_FIFO_CTL_RC6_POLICY_STALL);
	}

358
	intel_uncore_forcewake_reset(dev, restore_forcewake);
359 360
}

361 362 363 364 365 366
void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
{
	__intel_uncore_early_sanitize(dev, restore_forcewake);
	i915_check_and_clear_faults(dev);
}

367 368
void intel_uncore_sanitize(struct drm_device *dev)
{
369 370 371 372
	/* BIOS often leaves RC6 enabled, but disable it for hw init */
	intel_disable_gt_powersave(dev);
}

373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392
static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
					 enum forcewake_domains fw_domains)
{
	struct intel_uncore_forcewake_domain *domain;
	enum forcewake_domain_id id;

	if (!dev_priv->uncore.funcs.force_wake_get)
		return;

	fw_domains &= dev_priv->uncore.fw_domains;

	for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
		if (domain->wake_count++)
			fw_domains &= ~(1 << id);
	}

	if (fw_domains)
		dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
}

393 394 395 396 397 398 399 400 401 402 403 404
/**
 * intel_uncore_forcewake_get - grab forcewake domain references
 * @dev_priv: i915 device instance
 * @fw_domains: forcewake domains to get reference on
 *
 * This function can be used get GT's forcewake domain references.
 * Normal register access will handle the forcewake domains automatically.
 * However if some sequence requires the GT to not power down a particular
 * forcewake domains this function should be called at the beginning of the
 * sequence. And subsequently the reference should be dropped by symmetric
 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
405
 */
406
void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
407
				enum forcewake_domains fw_domains)
408 409 410
{
	unsigned long irqflags;

411 412 413
	if (!dev_priv->uncore.funcs.force_wake_get)
		return;

414
	WARN_ON(dev_priv->pm.suspended);
415

416
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
417
	__intel_uncore_forcewake_get(dev_priv, fw_domains);
418 419 420
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}

421
/**
422
 * intel_uncore_forcewake_get__locked - grab forcewake domain references
423
 * @dev_priv: i915 device instance
424
 * @fw_domains: forcewake domains to get reference on
425
 *
426 427
 * See intel_uncore_forcewake_get(). This variant places the onus
 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
428
 */
429 430 431 432 433 434 435 436 437 438 439 440 441
void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
					enum forcewake_domains fw_domains)
{
	assert_spin_locked(&dev_priv->uncore.lock);

	if (!dev_priv->uncore.funcs.force_wake_get)
		return;

	__intel_uncore_forcewake_get(dev_priv, fw_domains);
}

static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
					 enum forcewake_domains fw_domains)
442
{
443
	struct intel_uncore_forcewake_domain *domain;
444
	enum forcewake_domain_id id;
445

446 447 448
	if (!dev_priv->uncore.funcs.force_wake_put)
		return;

449 450 451 452 453 454 455 456 457 458
	fw_domains &= dev_priv->uncore.fw_domains;

	for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
		if (WARN_ON(domain->wake_count == 0))
			continue;

		if (--domain->wake_count)
			continue;

		domain->wake_count++;
459
		fw_domain_arm_timer(domain);
460
	}
461
}
462

463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480
/**
 * intel_uncore_forcewake_put - release a forcewake domain reference
 * @dev_priv: i915 device instance
 * @fw_domains: forcewake domains to put references
 *
 * This function drops the device-level forcewakes for specified
 * domains obtained by intel_uncore_forcewake_get().
 */
void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
				enum forcewake_domains fw_domains)
{
	unsigned long irqflags;

	if (!dev_priv->uncore.funcs.force_wake_put)
		return;

	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
	__intel_uncore_forcewake_put(dev_priv, fw_domains);
481 482 483
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}

484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502
/**
 * intel_uncore_forcewake_put__locked - grab forcewake domain references
 * @dev_priv: i915 device instance
 * @fw_domains: forcewake domains to get reference on
 *
 * See intel_uncore_forcewake_put(). This variant places the onus
 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
 */
void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
					enum forcewake_domains fw_domains)
{
	assert_spin_locked(&dev_priv->uncore.lock);

	if (!dev_priv->uncore.funcs.force_wake_put)
		return;

	__intel_uncore_forcewake_put(dev_priv, fw_domains);
}

503
void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
504
{
505
	struct intel_uncore_forcewake_domain *domain;
506
	enum forcewake_domain_id id;
507

508 509 510
	if (!dev_priv->uncore.funcs.force_wake_get)
		return;

511
	for_each_fw_domain(domain, dev_priv, id)
512
		WARN_ON(domain->wake_count);
513 514
}

515
/* We give fast paths for the really cool registers */
516
#define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000)
517

518
#define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end))
519

520 521 522 523 524 525 526 527 528 529 530 531 532
#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
	(REG_RANGE((reg), 0x2000, 0x4000) || \
	 REG_RANGE((reg), 0x5000, 0x8000) || \
	 REG_RANGE((reg), 0xB000, 0x12000) || \
	 REG_RANGE((reg), 0x2E000, 0x30000))

#define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg) \
	(REG_RANGE((reg), 0x12000, 0x14000) || \
	 REG_RANGE((reg), 0x22000, 0x24000) || \
	 REG_RANGE((reg), 0x30000, 0x40000))

#define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \
	(REG_RANGE((reg), 0x2000, 0x4000) || \
533
	 REG_RANGE((reg), 0x5200, 0x8000) || \
534
	 REG_RANGE((reg), 0x8300, 0x8500) || \
535
	 REG_RANGE((reg), 0xB000, 0xB480) || \
536 537 538 539 540 541 542 543
	 REG_RANGE((reg), 0xE000, 0xE800))

#define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \
	(REG_RANGE((reg), 0x8800, 0x8900) || \
	 REG_RANGE((reg), 0xD000, 0xD800) || \
	 REG_RANGE((reg), 0x12000, 0x14000) || \
	 REG_RANGE((reg), 0x1A000, 0x1C000) || \
	 REG_RANGE((reg), 0x1E800, 0x1EA00) || \
544
	 REG_RANGE((reg), 0x30000, 0x38000))
545 546 547 548 549 550

#define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \
	(REG_RANGE((reg), 0x4000, 0x5000) || \
	 REG_RANGE((reg), 0x8000, 0x8300) || \
	 REG_RANGE((reg), 0x8500, 0x8600) || \
	 REG_RANGE((reg), 0x9000, 0xB000) || \
551
	 REG_RANGE((reg), 0xF000, 0x10000))
552

553
#define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \
554
	REG_RANGE((reg), 0xB00,  0x2000)
555 556

#define FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) \
557 558
	(REG_RANGE((reg), 0x2000, 0x2700) || \
	 REG_RANGE((reg), 0x3000, 0x4000) || \
559
	 REG_RANGE((reg), 0x5200, 0x8000) || \
560
	 REG_RANGE((reg), 0x8140, 0x8160) || \
561 562 563
	 REG_RANGE((reg), 0x8300, 0x8500) || \
	 REG_RANGE((reg), 0x8C00, 0x8D00) || \
	 REG_RANGE((reg), 0xB000, 0xB480) || \
564 565
	 REG_RANGE((reg), 0xE000, 0xE900) || \
	 REG_RANGE((reg), 0x24400, 0x24800))
566 567

#define FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) \
568 569
	(REG_RANGE((reg), 0x8130, 0x8140) || \
	 REG_RANGE((reg), 0x8800, 0x8A00) || \
570 571 572 573 574 575 576 577 578
	 REG_RANGE((reg), 0xD000, 0xD800) || \
	 REG_RANGE((reg), 0x12000, 0x14000) || \
	 REG_RANGE((reg), 0x1A000, 0x1EA00) || \
	 REG_RANGE((reg), 0x30000, 0x40000))

#define FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg) \
	REG_RANGE((reg), 0x9400, 0x9800)

#define FORCEWAKE_GEN9_BLITTER_RANGE_OFFSET(reg) \
579
	((reg) < 0x40000 && \
580 581 582 583 584
	 !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) && \
	 !FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) && \
	 !FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \
	 !FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg))

585 586 587 588 589 590
static void
ilk_dummy_write(struct drm_i915_private *dev_priv)
{
	/* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
	 * the chip from rc6 before touching it for real. MI_MODE is masked,
	 * hence harmless to write 0 into. */
591
	__raw_i915_write32(dev_priv, MI_MODE, 0);
592 593 594
}

static void
595 596
hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv, u32 reg, bool read,
			bool before)
597
{
598 599 600 601 602 603
	const char *op = read ? "reading" : "writing to";
	const char *when = before ? "before" : "after";

	if (!i915.mmio_debug)
		return;

604
	if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
605 606
		WARN(1, "Unclaimed register detected %s %s register 0x%x\n",
		     when, op, reg);
607
		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
608
		i915.mmio_debug--; /* Only report the first N failures */
609 610 611 612
	}
}

static void
613
hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv)
614
{
615 616 617
	static bool mmio_debug_once = true;

	if (i915.mmio_debug || !mmio_debug_once)
618 619
		return;

620
	if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
621 622 623
		DRM_DEBUG("Unclaimed register detected, "
			  "enabling oneshot unclaimed register reporting. "
			  "Please use i915.mmio_debug=N for more information.\n");
624
		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
625
		i915.mmio_debug = mmio_debug_once--;
626 627 628
	}
}

629
#define GEN2_READ_HEADER(x) \
B
Ben Widawsky 已提交
630
	u##x val = 0; \
631
	assert_device_not_suspended(dev_priv);
B
Ben Widawsky 已提交
632

633
#define GEN2_READ_FOOTER \
B
Ben Widawsky 已提交
634 635 636
	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
	return val

637
#define __gen2_read(x) \
638
static u##x \
639 640
gen2_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
	GEN2_READ_HEADER(x); \
641
	val = __raw_i915_read##x(dev_priv, reg); \
642
	GEN2_READ_FOOTER; \
643 644 645 646 647
}

#define __gen5_read(x) \
static u##x \
gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
648
	GEN2_READ_HEADER(x); \
649 650
	ilk_dummy_write(dev_priv); \
	val = __raw_i915_read##x(dev_priv, reg); \
651
	GEN2_READ_FOOTER; \
652 653
}

654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669
__gen5_read(8)
__gen5_read(16)
__gen5_read(32)
__gen5_read(64)
__gen2_read(8)
__gen2_read(16)
__gen2_read(32)
__gen2_read(64)

#undef __gen5_read
#undef __gen2_read

#undef GEN2_READ_FOOTER
#undef GEN2_READ_HEADER

#define GEN6_READ_HEADER(x) \
670
	u32 offset = reg; \
671 672 673 674 675 676 677 678 679 680
	unsigned long irqflags; \
	u##x val = 0; \
	assert_device_not_suspended(dev_priv); \
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)

#define GEN6_READ_FOOTER \
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
	return val

681
static inline void __force_wake_get(struct drm_i915_private *dev_priv,
682
				    enum forcewake_domains fw_domains)
683 684
{
	struct intel_uncore_forcewake_domain *domain;
685
	enum forcewake_domain_id id;
686 687 688 689 690

	if (WARN_ON(!fw_domains))
		return;

	/* Ideally GCC would be constant-fold and eliminate this loop */
691
	for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
692
		if (domain->wake_count) {
693
			fw_domains &= ~(1 << id);
694 695 696 697
			continue;
		}

		domain->wake_count++;
698
		fw_domain_arm_timer(domain);
699 700 701 702 703 704
	}

	if (fw_domains)
		dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
}

705 706 707
#define __gen6_read(x) \
static u##x \
gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
708
	GEN6_READ_HEADER(x); \
709
	hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
710
	if (NEEDS_FORCE_WAKE(offset)) \
711
		__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
712
	val = __raw_i915_read##x(dev_priv, reg); \
713
	hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
714
	GEN6_READ_FOOTER; \
715 716
}

717 718 719
#define __vlv_read(x) \
static u##x \
vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
720
	enum forcewake_domains fw_engine = 0; \
721
	GEN6_READ_HEADER(x); \
722
	if (!NEEDS_FORCE_WAKE(offset)) \
723
		fw_engine = 0; \
724
	else if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(offset)) \
725
		fw_engine = FORCEWAKE_RENDER; \
726
	else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(offset)) \
727 728 729
		fw_engine = FORCEWAKE_MEDIA; \
	if (fw_engine) \
		__force_wake_get(dev_priv, fw_engine); \
730
	val = __raw_i915_read##x(dev_priv, reg); \
731
	GEN6_READ_FOOTER; \
732 733
}

734 735 736
#define __chv_read(x) \
static u##x \
chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
737
	enum forcewake_domains fw_engine = 0; \
738
	GEN6_READ_HEADER(x); \
739
	if (!NEEDS_FORCE_WAKE(offset)) \
740
		fw_engine = 0; \
741
	else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \
742
		fw_engine = FORCEWAKE_RENDER; \
743
	else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \
744
		fw_engine = FORCEWAKE_MEDIA; \
745
	else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \
746 747 748
		fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
	if (fw_engine) \
		__force_wake_get(dev_priv, fw_engine); \
749
	val = __raw_i915_read##x(dev_priv, reg); \
750
	GEN6_READ_FOOTER; \
751
}
752

753
#define SKL_NEEDS_FORCE_WAKE(reg) \
754
	((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg))
755 756 757 758

#define __gen9_read(x) \
static u##x \
gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
759
	enum forcewake_domains fw_engine; \
760
	GEN6_READ_HEADER(x); \
761
	hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
762
	if (!SKL_NEEDS_FORCE_WAKE(offset)) \
763
		fw_engine = 0; \
764
	else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
765
		fw_engine = FORCEWAKE_RENDER; \
766
	else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \
767
		fw_engine = FORCEWAKE_MEDIA; \
768
	else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \
769 770 771 772 773 774
		fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
	else \
		fw_engine = FORCEWAKE_BLITTER; \
	if (fw_engine) \
		__force_wake_get(dev_priv, fw_engine); \
	val = __raw_i915_read##x(dev_priv, reg); \
775
	hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
776
	GEN6_READ_FOOTER; \
777 778 779 780 781 782
}

__gen9_read(8)
__gen9_read(16)
__gen9_read(32)
__gen9_read(64)
783 784 785 786
__chv_read(8)
__chv_read(16)
__chv_read(32)
__chv_read(64)
787 788 789 790
__vlv_read(8)
__vlv_read(16)
__vlv_read(32)
__vlv_read(64)
791 792 793 794 795
__gen6_read(8)
__gen6_read(16)
__gen6_read(32)
__gen6_read(64)

796
#undef __gen9_read
797
#undef __chv_read
798
#undef __vlv_read
799
#undef __gen6_read
800 801
#undef GEN6_READ_FOOTER
#undef GEN6_READ_HEADER
B
Ben Widawsky 已提交
802

803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830
#define VGPU_READ_HEADER(x) \
	unsigned long irqflags; \
	u##x val = 0; \
	assert_device_not_suspended(dev_priv); \
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)

#define VGPU_READ_FOOTER \
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
	return val

#define __vgpu_read(x) \
static u##x \
vgpu_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
	VGPU_READ_HEADER(x); \
	val = __raw_i915_read##x(dev_priv, reg); \
	VGPU_READ_FOOTER; \
}

__vgpu_read(8)
__vgpu_read(16)
__vgpu_read(32)
__vgpu_read(64)

#undef __vgpu_read
#undef VGPU_READ_FOOTER
#undef VGPU_READ_HEADER

831
#define GEN2_WRITE_HEADER \
B
Ben Widawsky 已提交
832
	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
833
	assert_device_not_suspended(dev_priv); \
834

835
#define GEN2_WRITE_FOOTER
V
Ville Syrjälä 已提交
836

837
#define __gen2_write(x) \
838
static void \
839 840
gen2_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
	GEN2_WRITE_HEADER; \
841
	__raw_i915_write##x(dev_priv, reg, val); \
842
	GEN2_WRITE_FOOTER; \
843 844 845 846 847
}

#define __gen5_write(x) \
static void \
gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
848
	GEN2_WRITE_HEADER; \
849 850
	ilk_dummy_write(dev_priv); \
	__raw_i915_write##x(dev_priv, reg, val); \
851
	GEN2_WRITE_FOOTER; \
852 853
}

854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869
__gen5_write(8)
__gen5_write(16)
__gen5_write(32)
__gen5_write(64)
__gen2_write(8)
__gen2_write(16)
__gen2_write(32)
__gen2_write(64)

#undef __gen5_write
#undef __gen2_write

#undef GEN2_WRITE_FOOTER
#undef GEN2_WRITE_HEADER

#define GEN6_WRITE_HEADER \
870
	u32 offset = reg; \
871 872 873 874 875 876 877 878
	unsigned long irqflags; \
	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
	assert_device_not_suspended(dev_priv); \
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)

#define GEN6_WRITE_FOOTER \
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)

879 880 881 882
#define __gen6_write(x) \
static void \
gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
	u32 __fifo_ret = 0; \
883
	GEN6_WRITE_HEADER; \
884
	if (NEEDS_FORCE_WAKE(offset)) { \
885 886 887 888 889 890
		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
	} \
	__raw_i915_write##x(dev_priv, reg, val); \
	if (unlikely(__fifo_ret)) { \
		gen6_gt_check_fifodbg(dev_priv); \
	} \
891
	GEN6_WRITE_FOOTER; \
892 893 894 895 896
}

#define __hsw_write(x) \
static void \
hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
897
	u32 __fifo_ret = 0; \
898
	GEN6_WRITE_HEADER; \
899
	if (NEEDS_FORCE_WAKE(offset)) { \
900 901
		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
	} \
902
	hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
903
	__raw_i915_write##x(dev_priv, reg, val); \
904 905 906
	if (unlikely(__fifo_ret)) { \
		gen6_gt_check_fifodbg(dev_priv); \
	} \
907 908
	hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
	hsw_unclaimed_reg_detect(dev_priv); \
909
	GEN6_WRITE_FOOTER; \
910
}
911

912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935
static const u32 gen8_shadowed_regs[] = {
	FORCEWAKE_MT,
	GEN6_RPNSWREQ,
	GEN6_RC_VIDEO_FREQ,
	RING_TAIL(RENDER_RING_BASE),
	RING_TAIL(GEN6_BSD_RING_BASE),
	RING_TAIL(VEBOX_RING_BASE),
	RING_TAIL(BLT_RING_BASE),
	/* TODO: Other registers are not yet used */
};

static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg)
{
	int i;
	for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
		if (reg == gen8_shadowed_regs[i])
			return true;

	return false;
}

#define __gen8_write(x) \
static void \
gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
936
	GEN6_WRITE_HEADER; \
937
	hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
938
	if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(dev_priv, reg)) \
939 940
		__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
	__raw_i915_write##x(dev_priv, reg, val); \
941 942
	hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
	hsw_unclaimed_reg_detect(dev_priv); \
943
	GEN6_WRITE_FOOTER; \
944 945
}

946 947 948
#define __chv_write(x) \
static void \
chv_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
949
	enum forcewake_domains fw_engine = 0; \
950
	GEN6_WRITE_HEADER; \
951
	if (!NEEDS_FORCE_WAKE(offset) || \
952
	    is_gen8_shadowed(dev_priv, reg)) \
953
		fw_engine = 0; \
954
	else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \
955
		fw_engine = FORCEWAKE_RENDER; \
956
	else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \
957
		fw_engine = FORCEWAKE_MEDIA; \
958
	else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \
959 960 961
		fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
	if (fw_engine) \
		__force_wake_get(dev_priv, fw_engine); \
962
	__raw_i915_write##x(dev_priv, reg, val); \
963
	GEN6_WRITE_FOOTER; \
964 965
}

Z
Zhe Wang 已提交
966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988
static const u32 gen9_shadowed_regs[] = {
	RING_TAIL(RENDER_RING_BASE),
	RING_TAIL(GEN6_BSD_RING_BASE),
	RING_TAIL(VEBOX_RING_BASE),
	RING_TAIL(BLT_RING_BASE),
	FORCEWAKE_BLITTER_GEN9,
	FORCEWAKE_RENDER_GEN9,
	FORCEWAKE_MEDIA_GEN9,
	GEN6_RPNSWREQ,
	GEN6_RC_VIDEO_FREQ,
	/* TODO: Other registers are not yet used */
};

static bool is_gen9_shadowed(struct drm_i915_private *dev_priv, u32 reg)
{
	int i;
	for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++)
		if (reg == gen9_shadowed_regs[i])
			return true;

	return false;
}

989 990 991 992
#define __gen9_write(x) \
static void \
gen9_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, \
		bool trace) { \
993
	enum forcewake_domains fw_engine; \
994
	GEN6_WRITE_HEADER; \
995
	hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
996
	if (!SKL_NEEDS_FORCE_WAKE(offset) || \
997 998
	    is_gen9_shadowed(dev_priv, reg)) \
		fw_engine = 0; \
999
	else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
1000
		fw_engine = FORCEWAKE_RENDER; \
1001
	else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \
1002
		fw_engine = FORCEWAKE_MEDIA; \
1003
	else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \
1004 1005 1006 1007 1008 1009
		fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
	else \
		fw_engine = FORCEWAKE_BLITTER; \
	if (fw_engine) \
		__force_wake_get(dev_priv, fw_engine); \
	__raw_i915_write##x(dev_priv, reg, val); \
1010 1011
	hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
	hsw_unclaimed_reg_detect(dev_priv); \
1012
	GEN6_WRITE_FOOTER; \
1013 1014 1015 1016 1017 1018
}

__gen9_write(8)
__gen9_write(16)
__gen9_write(32)
__gen9_write(64)
1019 1020 1021 1022
__chv_write(8)
__chv_write(16)
__chv_write(32)
__chv_write(64)
1023 1024 1025 1026
__gen8_write(8)
__gen8_write(16)
__gen8_write(32)
__gen8_write(64)
1027 1028 1029 1030 1031 1032 1033 1034 1035
__hsw_write(8)
__hsw_write(16)
__hsw_write(32)
__hsw_write(64)
__gen6_write(8)
__gen6_write(16)
__gen6_write(32)
__gen6_write(64)

1036
#undef __gen9_write
1037
#undef __chv_write
1038
#undef __gen8_write
1039 1040
#undef __hsw_write
#undef __gen6_write
1041 1042
#undef GEN6_WRITE_FOOTER
#undef GEN6_WRITE_HEADER
1043

1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069
#define VGPU_WRITE_HEADER \
	unsigned long irqflags; \
	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
	assert_device_not_suspended(dev_priv); \
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)

#define VGPU_WRITE_FOOTER \
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)

#define __vgpu_write(x) \
static void vgpu_write##x(struct drm_i915_private *dev_priv, \
			  off_t reg, u##x val, bool trace) { \
	VGPU_WRITE_HEADER; \
	__raw_i915_write##x(dev_priv, reg, val); \
	VGPU_WRITE_FOOTER; \
}

__vgpu_write(8)
__vgpu_write(16)
__vgpu_write(32)
__vgpu_write(64)

#undef __vgpu_write
#undef VGPU_WRITE_FOOTER
#undef VGPU_WRITE_HEADER

1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085
#define ASSIGN_WRITE_MMIO_VFUNCS(x) \
do { \
	dev_priv->uncore.funcs.mmio_writeb = x##_write8; \
	dev_priv->uncore.funcs.mmio_writew = x##_write16; \
	dev_priv->uncore.funcs.mmio_writel = x##_write32; \
	dev_priv->uncore.funcs.mmio_writeq = x##_write64; \
} while (0)

#define ASSIGN_READ_MMIO_VFUNCS(x) \
do { \
	dev_priv->uncore.funcs.mmio_readb = x##_read8; \
	dev_priv->uncore.funcs.mmio_readw = x##_read16; \
	dev_priv->uncore.funcs.mmio_readl = x##_read32; \
	dev_priv->uncore.funcs.mmio_readq = x##_read64; \
} while (0)

1086 1087

static void fw_domain_init(struct drm_i915_private *dev_priv,
1088 1089
			   enum forcewake_domain_id domain_id,
			   u32 reg_set, u32 reg_ack)
1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108
{
	struct intel_uncore_forcewake_domain *d;

	if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
		return;

	d = &dev_priv->uncore.fw_domain[domain_id];

	WARN_ON(d->wake_count);

	d->wake_count = 0;
	d->reg_set = reg_set;
	d->reg_ack = reg_ack;

	if (IS_GEN6(dev_priv)) {
		d->val_reset = 0;
		d->val_set = FORCEWAKE_KERNEL;
		d->val_clear = 0;
	} else {
1109
		/* WaRsClearFWBitsAtReset:bdw,skl */
1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124
		d->val_reset = _MASKED_BIT_DISABLE(0xffff);
		d->val_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
		d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
	}

	if (IS_VALLEYVIEW(dev_priv))
		d->reg_post = FORCEWAKE_ACK_VLV;
	else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv))
		d->reg_post = ECOBUS;
	else
		d->reg_post = 0;

	d->i915 = dev_priv;
	d->id = domain_id;

1125
	setup_timer(&d->timer, intel_uncore_fw_release_timer, (unsigned long)d);
1126 1127

	dev_priv->uncore.fw_domains |= (1 << domain_id);
1128 1129

	fw_domain_reset(d);
1130 1131
}

1132
static void intel_uncore_fw_domains_init(struct drm_device *dev)
1133 1134 1135
{
	struct drm_i915_private *dev_priv = dev->dev_private;

1136 1137 1138
	if (INTEL_INFO(dev_priv->dev)->gen <= 5)
		return;

Z
Zhe Wang 已提交
1139
	if (IS_GEN9(dev)) {
1140 1141 1142 1143 1144 1145 1146 1147 1148 1149
		dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
		dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
			       FORCEWAKE_RENDER_GEN9,
			       FORCEWAKE_ACK_RENDER_GEN9);
		fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
			       FORCEWAKE_BLITTER_GEN9,
			       FORCEWAKE_ACK_BLITTER_GEN9);
		fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
			       FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
Z
Zhe Wang 已提交
1150
	} else if (IS_VALLEYVIEW(dev)) {
1151
		dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1152 1153 1154 1155 1156
		if (!IS_CHERRYVIEW(dev))
			dev_priv->uncore.funcs.force_wake_put =
				fw_domains_put_with_fifo;
		else
			dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1157 1158 1159 1160
		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
			       FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
		fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
			       FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
1161
	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
1162 1163 1164 1165 1166
		dev_priv->uncore.funcs.force_wake_get =
			fw_domains_get_with_thread_status;
		dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
			       FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178
	} else if (IS_IVYBRIDGE(dev)) {
		u32 ecobus;

		/* IVB configs may use multi-threaded forcewake */

		/* A small trick here - if the bios hasn't configured
		 * MT forcewake, and if the device is in RC6, then
		 * force_wake_mt_get will not wake the device and the
		 * ECOBUS read will return zero. Which will be
		 * (correctly) interpreted by the test below as MT
		 * forcewake being disabled.
		 */
1179 1180 1181 1182 1183
		dev_priv->uncore.funcs.force_wake_get =
			fw_domains_get_with_thread_status;
		dev_priv->uncore.funcs.force_wake_put =
			fw_domains_put_with_fifo;

1184 1185
		/* We need to init first for ECOBUS access and then
		 * determine later if we want to reinit, in case of MT access is
1186 1187 1188
		 * not working. In this stage we don't know which flavour this
		 * ivb is, so it is better to reset also the gen6 fw registers
		 * before the ecobus check.
1189
		 */
1190 1191 1192 1193

		__raw_i915_write32(dev_priv, FORCEWAKE, 0);
		__raw_posting_read(dev_priv, ECOBUS);

1194 1195
		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
			       FORCEWAKE_MT, FORCEWAKE_MT_ACK);
1196

1197
		mutex_lock(&dev->struct_mutex);
1198
		fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL);
1199
		ecobus = __raw_i915_read32(dev_priv, ECOBUS);
1200
		fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL);
1201 1202
		mutex_unlock(&dev->struct_mutex);

1203
		if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
1204 1205
			DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
			DRM_INFO("when using vblank-synced partial screen updates.\n");
1206 1207
			fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
				       FORCEWAKE, FORCEWAKE_ACK);
1208 1209 1210
		}
	} else if (IS_GEN6(dev)) {
		dev_priv->uncore.funcs.force_wake_get =
1211
			fw_domains_get_with_thread_status;
1212
		dev_priv->uncore.funcs.force_wake_put =
1213 1214 1215
			fw_domains_put_with_fifo;
		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
			       FORCEWAKE, FORCEWAKE_ACK);
1216
	}
1217 1218 1219

	/* All future platforms are expected to require complex power gating */
	WARN_ON(dev_priv->uncore.fw_domains == 0);
1220 1221 1222 1223 1224 1225
}

void intel_uncore_init(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;

1226 1227
	i915_check_vgpu(dev);

1228 1229 1230
	intel_uncore_ellc_detect(dev);
	intel_uncore_fw_domains_init(dev);
	__intel_uncore_early_sanitize(dev, false);
1231

1232
	switch (INTEL_INFO(dev)->gen) {
1233
	default:
1234 1235 1236 1237 1238
	case 9:
		ASSIGN_WRITE_MMIO_VFUNCS(gen9);
		ASSIGN_READ_MMIO_VFUNCS(gen9);
		break;
	case 8:
1239
		if (IS_CHERRYVIEW(dev)) {
1240 1241
			ASSIGN_WRITE_MMIO_VFUNCS(chv);
			ASSIGN_READ_MMIO_VFUNCS(chv);
1242 1243

		} else {
1244 1245
			ASSIGN_WRITE_MMIO_VFUNCS(gen8);
			ASSIGN_READ_MMIO_VFUNCS(gen6);
1246
		}
1247
		break;
1248 1249
	case 7:
	case 6:
1250
		if (IS_HASWELL(dev)) {
1251
			ASSIGN_WRITE_MMIO_VFUNCS(hsw);
1252
		} else {
1253
			ASSIGN_WRITE_MMIO_VFUNCS(gen6);
1254
		}
1255 1256

		if (IS_VALLEYVIEW(dev)) {
1257
			ASSIGN_READ_MMIO_VFUNCS(vlv);
1258
		} else {
1259
			ASSIGN_READ_MMIO_VFUNCS(gen6);
1260
		}
1261 1262
		break;
	case 5:
1263 1264
		ASSIGN_WRITE_MMIO_VFUNCS(gen5);
		ASSIGN_READ_MMIO_VFUNCS(gen5);
1265 1266 1267 1268
		break;
	case 4:
	case 3:
	case 2:
1269 1270
		ASSIGN_WRITE_MMIO_VFUNCS(gen2);
		ASSIGN_READ_MMIO_VFUNCS(gen2);
1271 1272
		break;
	}
1273

1274 1275 1276 1277 1278
	if (intel_vgpu_active(dev)) {
		ASSIGN_WRITE_MMIO_VFUNCS(vgpu);
		ASSIGN_READ_MMIO_VFUNCS(vgpu);
	}

1279
	i915_check_and_clear_faults(dev);
1280
}
1281 1282
#undef ASSIGN_WRITE_MMIO_VFUNCS
#undef ASSIGN_READ_MMIO_VFUNCS
1283 1284 1285 1286 1287

void intel_uncore_fini(struct drm_device *dev)
{
	/* Paranoia: make sure we have disabled everything before we exit. */
	intel_uncore_sanitize(dev);
1288
	intel_uncore_forcewake_reset(dev, false);
1289 1290
}

1291 1292
#define GEN_RANGE(l, h) GENMASK(h, l)

1293
static const struct register_whitelist {
1294
	uint32_t offset_ldw, offset_udw;
1295
	uint32_t size;
1296 1297
	/* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
	uint32_t gen_bitmask;
1298
} whitelist[] = {
1299 1300 1301
	{ .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE),
	  .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE),
	  .size = 8, .gen_bitmask = GEN_RANGE(4, 9) },
1302 1303 1304 1305 1306 1307 1308 1309
};

int i915_reg_read_ioctl(struct drm_device *dev,
			void *data, struct drm_file *file)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct drm_i915_reg_read *reg = data;
	struct register_whitelist const *entry = whitelist;
1310
	unsigned size;
1311
	uint32_t offset_ldw, offset_udw;
1312
	int i, ret = 0;
1313 1314

	for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
1315
		if (entry->offset_ldw == (reg->offset & -entry->size) &&
1316 1317 1318 1319 1320 1321 1322
		    (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
			break;
	}

	if (i == ARRAY_SIZE(whitelist))
		return -EINVAL;

1323 1324 1325 1326
	/* We use the low bits to encode extra flags as the register should
	 * be naturally aligned (and those that are not so aligned merely
	 * limit the available flags for that register).
	 */
1327 1328
	offset_ldw = entry->offset_ldw;
	offset_udw = entry->offset_udw;
1329
	size = entry->size;
1330
	size |= reg->offset ^ offset_ldw;
1331

1332 1333
	intel_runtime_pm_get(dev_priv);

1334 1335
	switch (size) {
	case 8 | 1:
1336
		reg->val = I915_READ64_2x32(offset_ldw, offset_udw);
1337
		break;
1338
	case 8:
1339
		reg->val = I915_READ64(offset_ldw);
1340 1341
		break;
	case 4:
1342
		reg->val = I915_READ(offset_ldw);
1343 1344
		break;
	case 2:
1345
		reg->val = I915_READ16(offset_ldw);
1346 1347
		break;
	case 1:
1348
		reg->val = I915_READ8(offset_ldw);
1349 1350
		break;
	default:
1351 1352
		ret = -EINVAL;
		goto out;
1353 1354
	}

1355 1356 1357
out:
	intel_runtime_pm_put(dev_priv);
	return ret;
1358 1359
}

1360 1361 1362 1363 1364 1365
int i915_get_reset_stats_ioctl(struct drm_device *dev,
			       void *data, struct drm_file *file)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct drm_i915_reset_stats *args = data;
	struct i915_ctx_hang_stats *hs;
1366
	struct intel_context *ctx;
1367 1368
	int ret;

1369 1370 1371
	if (args->flags || args->pad)
		return -EINVAL;

1372
	if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
1373 1374 1375 1376 1377 1378
		return -EPERM;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

1379 1380
	ctx = i915_gem_context_get(file->driver_priv, args->ctx_id);
	if (IS_ERR(ctx)) {
1381
		mutex_unlock(&dev->struct_mutex);
1382
		return PTR_ERR(ctx);
1383
	}
1384
	hs = &ctx->hang_stats;
1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398

	if (capable(CAP_SYS_ADMIN))
		args->reset_count = i915_reset_count(&dev_priv->gpu_error);
	else
		args->reset_count = 0;

	args->batch_active = hs->batch_active;
	args->batch_pending = hs->batch_pending;

	mutex_unlock(&dev->struct_mutex);

	return 0;
}

1399
static int i915_reset_complete(struct drm_device *dev)
1400 1401
{
	u8 gdrst;
1402
	pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
V
Ville Syrjälä 已提交
1403
	return (gdrst & GRDOM_RESET_STATUS) == 0;
1404 1405
}

1406
static int i915_do_reset(struct drm_device *dev)
1407
{
V
Ville Syrjälä 已提交
1408
	/* assert reset for at least 20 usec */
1409
	pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
V
Ville Syrjälä 已提交
1410
	udelay(20);
1411
	pci_write_config_byte(dev->pdev, I915_GDRST, 0);
1412

1413
	return wait_for(i915_reset_complete(dev), 500);
V
Ville Syrjälä 已提交
1414 1415 1416 1417 1418
}

static int g4x_reset_complete(struct drm_device *dev)
{
	u8 gdrst;
1419
	pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
V
Ville Syrjälä 已提交
1420
	return (gdrst & GRDOM_RESET_ENABLE) == 0;
1421 1422
}

1423 1424 1425 1426 1427 1428
static int g33_do_reset(struct drm_device *dev)
{
	pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
	return wait_for(g4x_reset_complete(dev), 500);
}

1429 1430 1431 1432 1433
static int g4x_do_reset(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret;

1434
	pci_write_config_byte(dev->pdev, I915_GDRST,
1435
			      GRDOM_RENDER | GRDOM_RESET_ENABLE);
V
Ville Syrjälä 已提交
1436
	ret =  wait_for(g4x_reset_complete(dev), 500);
1437 1438 1439 1440 1441 1442 1443
	if (ret)
		return ret;

	/* WaVcpClkGateDisableForMediaReset:ctg,elk */
	I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
	POSTING_READ(VDECCLK_GATE_D);

1444
	pci_write_config_byte(dev->pdev, I915_GDRST,
1445
			      GRDOM_MEDIA | GRDOM_RESET_ENABLE);
V
Ville Syrjälä 已提交
1446
	ret =  wait_for(g4x_reset_complete(dev), 500);
1447 1448 1449 1450 1451 1452 1453
	if (ret)
		return ret;

	/* WaVcpClkGateDisableForMediaReset:ctg,elk */
	I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
	POSTING_READ(VDECCLK_GATE_D);

1454
	pci_write_config_byte(dev->pdev, I915_GDRST, 0);
1455 1456 1457 1458

	return 0;
}

1459 1460 1461 1462 1463
static int ironlake_do_reset(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret;

1464
	I915_WRITE(ILK_GDSR,
1465
		   ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
1466
	ret = wait_for((I915_READ(ILK_GDSR) &
1467
			ILK_GRDOM_RESET_ENABLE) == 0, 500);
1468 1469 1470
	if (ret)
		return ret;

1471
	I915_WRITE(ILK_GDSR,
1472
		   ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
1473
	ret = wait_for((I915_READ(ILK_GDSR) &
1474 1475 1476 1477
			ILK_GRDOM_RESET_ENABLE) == 0, 500);
	if (ret)
		return ret;

1478
	I915_WRITE(ILK_GDSR, 0);
1479 1480

	return 0;
1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493
}

static int gen6_do_reset(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int	ret;

	/* Reset the chip */

	/* GEN6_GDRST is not in the gt power well, no need to check
	 * for fifo space for the write or forcewake the chip for
	 * the read
	 */
1494
	__raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL);
1495 1496

	/* Spin waiting for the device to ack the reset request */
1497
	ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
1498

1499
	intel_uncore_forcewake_reset(dev, true);
1500

1501 1502 1503
	return ret;
}

1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542
static int wait_for_register(struct drm_i915_private *dev_priv,
			     const u32 reg,
			     const u32 mask,
			     const u32 value,
			     const unsigned long timeout_ms)
{
	return wait_for((I915_READ(reg) & mask) == value, timeout_ms);
}

static int gen8_do_reset(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct intel_engine_cs *engine;
	int i;

	for_each_ring(engine, dev_priv, i) {
		I915_WRITE(RING_RESET_CTL(engine->mmio_base),
			   _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));

		if (wait_for_register(dev_priv,
				      RING_RESET_CTL(engine->mmio_base),
				      RESET_CTL_READY_TO_RESET,
				      RESET_CTL_READY_TO_RESET,
				      700)) {
			DRM_ERROR("%s: reset request timeout\n", engine->name);
			goto not_ready;
		}
	}

	return gen6_do_reset(dev);

not_ready:
	for_each_ring(engine, dev_priv, i)
		I915_WRITE(RING_RESET_CTL(engine->mmio_base),
			   _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));

	return -EIO;
}

1543
static int (*intel_get_gpu_reset(struct drm_device *dev))(struct drm_device *)
1544
{
1545 1546 1547
	if (!i915.reset)
		return NULL;

1548 1549 1550
	if (INTEL_INFO(dev)->gen >= 8)
		return gen8_do_reset;
	else if (INTEL_INFO(dev)->gen >= 6)
1551
		return gen6_do_reset;
1552
	else if (IS_GEN5(dev))
1553
		return ironlake_do_reset;
1554
	else if (IS_G4X(dev))
1555
		return g4x_do_reset;
1556
	else if (IS_G33(dev))
1557
		return g33_do_reset;
1558
	else if (INTEL_INFO(dev)->gen >= 3)
1559
		return i915_do_reset;
1560
	else
1561 1562 1563 1564 1565 1566 1567 1568 1569
		return NULL;
}

int intel_gpu_reset(struct drm_device *dev)
{
	int (*reset)(struct drm_device *);

	reset = intel_get_gpu_reset(dev);
	if (reset == NULL)
1570
		return -ENODEV;
1571 1572 1573 1574 1575 1576 1577

	return reset(dev);
}

bool intel_has_gpu_reset(struct drm_device *dev)
{
	return intel_get_gpu_reset(dev) != NULL;
1578 1579 1580 1581 1582 1583 1584
}

void intel_uncore_check_errors(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;

	if (HAS_FPGA_DBG_UNCLAIMED(dev) &&
1585
	    (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
1586
		DRM_ERROR("Unclaimed register before interrupt\n");
1587
		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
1588 1589
	}
}