intel_uncore.c 54.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
/*
 * Copyright © 2013 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 */

#include "i915_drv.h"
#include "intel_drv.h"
26
#include "i915_vgpu.h"
27

28 29
#include <linux/pm_runtime.h>

30
#define FORCEWAKE_ACK_TIMEOUT_MS 50
31

32
#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32((dev_priv__), (reg__))
33

34 35 36 37 38 39 40
static const char * const forcewake_domain_names[] = {
	"render",
	"blitter",
	"media",
};

const char *
41
intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
42
{
43
	BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
44 45 46 47 48 49 50 51 52 53 54

	if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
		return forcewake_domain_names[id];

	WARN_ON(id);

	return "unknown";
}

static inline void
fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
55
{
56
	WARN_ON(!i915_mmio_reg_valid(d->reg_set));
57
	__raw_i915_write32(d->i915, d->reg_set, d->val_reset);
58 59
}

60 61
static inline void
fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
62
{
63 64 65 66 67
	d->wake_count++;
	hrtimer_start_range_ns(&d->timer,
			       ktime_set(0, NSEC_PER_MSEC),
			       NSEC_PER_MSEC,
			       HRTIMER_MODE_REL);
68 69
}

70 71
static inline void
fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
72
{
73 74
	if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
			     FORCEWAKE_KERNEL) == 0,
75
			    FORCEWAKE_ACK_TIMEOUT_MS))
76 77 78
		DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
			  intel_uncore_forcewake_domain_to_str(d->id));
}
79

80 81 82 83 84
static inline void
fw_domain_get(const struct intel_uncore_forcewake_domain *d)
{
	__raw_i915_write32(d->i915, d->reg_set, d->val_set);
}
85

86 87 88 89 90
static inline void
fw_domain_wait_ack(const struct intel_uncore_forcewake_domain *d)
{
	if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
			     FORCEWAKE_KERNEL),
91
			    FORCEWAKE_ACK_TIMEOUT_MS))
92 93 94
		DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
			  intel_uncore_forcewake_domain_to_str(d->id));
}
95

96 97 98 99
static inline void
fw_domain_put(const struct intel_uncore_forcewake_domain *d)
{
	__raw_i915_write32(d->i915, d->reg_set, d->val_clear);
100 101
}

102 103
static inline void
fw_domain_posting_read(const struct intel_uncore_forcewake_domain *d)
104
{
105
	/* something from same cacheline, but not from the set register */
106
	if (i915_mmio_reg_valid(d->reg_post))
107
		__raw_posting_read(d->i915, d->reg_post);
108 109
}

110
static void
111
fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
112
{
113
	struct intel_uncore_forcewake_domain *d;
114

115
	for_each_fw_domain_masked(d, fw_domains, dev_priv) {
116 117 118
		fw_domain_wait_ack_clear(d);
		fw_domain_get(d);
	}
119 120 121

	for_each_fw_domain_masked(d, fw_domains, dev_priv)
		fw_domain_wait_ack(d);
122
}
123

124
static void
125
fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
126 127
{
	struct intel_uncore_forcewake_domain *d;
128

129
	for_each_fw_domain_masked(d, fw_domains, dev_priv) {
130 131 132 133
		fw_domain_put(d);
		fw_domain_posting_read(d);
	}
}
134

135 136 137 138 139 140
static void
fw_domains_posting_read(struct drm_i915_private *dev_priv)
{
	struct intel_uncore_forcewake_domain *d;

	/* No need to do for all, just do for first found */
141
	for_each_fw_domain(d, dev_priv) {
142 143 144 145 146 147
		fw_domain_posting_read(d);
		break;
	}
}

static void
148
fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
149 150 151
{
	struct intel_uncore_forcewake_domain *d;

152 153
	if (dev_priv->uncore.fw_domains == 0)
		return;
154

155
	for_each_fw_domain_masked(d, fw_domains, dev_priv)
156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171
		fw_domain_reset(d);

	fw_domains_posting_read(dev_priv);
}

static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
{
	/* w/a for a sporadic read returning 0 by waiting for the GT
	 * thread to wake up.
	 */
	if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
				GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
		DRM_ERROR("GT thread status wait timed out\n");
}

static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv,
172
					      enum forcewake_domains fw_domains)
173 174
{
	fw_domains_get(dev_priv, fw_domains);
175

176
	/* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
177
	__gen6_gt_wait_for_thread_c0(dev_priv);
178 179 180 181 182
}

static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
{
	u32 gtfifodbg;
183 184

	gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
185 186
	if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg))
		__raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
187 188
}

189
static void fw_domains_put_with_fifo(struct drm_i915_private *dev_priv,
190
				     enum forcewake_domains fw_domains)
191
{
192
	fw_domains_put(dev_priv, fw_domains);
193 194 195
	gen6_gt_check_fifodbg(dev_priv);
}

196 197 198 199 200 201 202
static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv)
{
	u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL);

	return count & GT_FIFO_FREE_ENTRIES_MASK;
}

203 204 205 206
static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
{
	int ret = 0;

207 208
	/* On VLV, FIFO will be shared by both SW and HW.
	 * So, we need to read the FREE_ENTRIES everytime */
209
	if (IS_VALLEYVIEW(dev_priv))
210
		dev_priv->uncore.fifo_count = fifo_free_entries(dev_priv);
211

212 213
	if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
		int loop = 500;
214 215
		u32 fifo = fifo_free_entries(dev_priv);

216 217
		while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
			udelay(10);
218
			fifo = fifo_free_entries(dev_priv);
219 220 221 222 223 224 225 226 227 228
		}
		if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
			++ret;
		dev_priv->uncore.fifo_count = fifo;
	}
	dev_priv->uncore.fifo_count--;

	return ret;
}

229 230
static enum hrtimer_restart
intel_uncore_fw_release_timer(struct hrtimer *timer)
Z
Zhe Wang 已提交
231
{
232 233
	struct intel_uncore_forcewake_domain *domain =
	       container_of(timer, struct intel_uncore_forcewake_domain, timer);
234
	struct drm_i915_private *dev_priv = domain->i915;
235
	unsigned long irqflags;
Z
Zhe Wang 已提交
236

237
	assert_rpm_device_not_suspended(dev_priv);
Z
Zhe Wang 已提交
238

239
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
240 241 242
	if (WARN_ON(domain->wake_count == 0))
		domain->wake_count++;

243 244 245 246
	if (--domain->wake_count == 0) {
		dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask);
		dev_priv->uncore.fw_domains_active &= ~domain->mask;
	}
247

248
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
249 250

	return HRTIMER_NORESTART;
Z
Zhe Wang 已提交
251 252
}

253 254
void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
				  bool restore)
Z
Zhe Wang 已提交
255
{
256
	unsigned long irqflags;
257
	struct intel_uncore_forcewake_domain *domain;
258
	int retry_count = 100;
259
	enum forcewake_domains fw, active_domains;
Z
Zhe Wang 已提交
260

261 262 263 264 265 266
	/* Hold uncore.lock across reset to prevent any register access
	 * with forcewake not set correctly. Wait until all pending
	 * timers are run before holding.
	 */
	while (1) {
		active_domains = 0;
Z
Zhe Wang 已提交
267

268
		for_each_fw_domain(domain, dev_priv) {
269
			if (hrtimer_cancel(&domain->timer) == 0)
270
				continue;
Z
Zhe Wang 已提交
271

272
			intel_uncore_fw_release_timer(&domain->timer);
273
		}
274

275
		spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
276

277
		for_each_fw_domain(domain, dev_priv) {
278
			if (hrtimer_active(&domain->timer))
279
				active_domains |= domain->mask;
280
		}
281

282 283
		if (active_domains == 0)
			break;
284

285 286 287 288
		if (--retry_count == 0) {
			DRM_ERROR("Timed out waiting for forcewake timers to finish\n");
			break;
		}
289

290 291 292
		spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
		cond_resched();
	}
293

294 295
	WARN_ON(active_domains);

296
	fw = dev_priv->uncore.fw_domains_active;
297 298
	if (fw)
		dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
299

300
	fw_domains_reset(dev_priv, FORCEWAKE_ALL);
Z
Zhe Wang 已提交
301

302 303 304 305
	if (restore) { /* If reset with a user forcewake, try to restore */
		if (fw)
			dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);

306
		if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
307
			dev_priv->uncore.fifo_count =
308
				fifo_free_entries(dev_priv);
309 310
	}

311
	if (!restore)
312
		assert_forcewakes_inactive(dev_priv);
313

314
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
315 316
}

M
Mika Kuoppala 已提交
317 318 319 320 321 322 323 324 325 326 327 328
static u64 gen9_edram_size(struct drm_i915_private *dev_priv)
{
	const unsigned int ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
	const unsigned int sets[4] = { 1, 1, 2, 2 };
	const u32 cap = dev_priv->edram_cap;

	return EDRAM_NUM_BANKS(cap) *
		ways[EDRAM_WAYS_IDX(cap)] *
		sets[EDRAM_SETS_IDX(cap)] *
		1024 * 1024;
}

329
u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv)
330
{
331 332 333
	if (!HAS_EDRAM(dev_priv))
		return 0;

M
Mika Kuoppala 已提交
334 335
	/* The needed capability bits for size calculation
	 * are not there with pre gen9 so return 128MB always.
336
	 */
M
Mika Kuoppala 已提交
337 338
	if (INTEL_GEN(dev_priv) < 9)
		return 128 * 1024 * 1024;
339

M
Mika Kuoppala 已提交
340
	return gen9_edram_size(dev_priv);
341
}
342

343 344 345 346 347 348 349 350 351
static void intel_uncore_edram_detect(struct drm_i915_private *dev_priv)
{
	if (IS_HASWELL(dev_priv) ||
	    IS_BROADWELL(dev_priv) ||
	    INTEL_GEN(dev_priv) >= 9) {
		dev_priv->edram_cap = __raw_i915_read32(dev_priv,
							HSW_EDRAM_CAP);

		/* NB: We can't write IDICR yet because we do not have gt funcs
352
		 * set up */
353 354
	} else {
		dev_priv->edram_cap = 0;
355
	}
356 357 358 359

	if (HAS_EDRAM(dev_priv))
		DRM_INFO("Found %lluMB of eDRAM\n",
			 intel_uncore_edram_size(dev_priv) / (1024 * 1024));
360 361
}

362
static bool
363
fpga_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
364 365 366 367 368 369 370 371 372 373 374 375
{
	u32 dbg;

	dbg = __raw_i915_read32(dev_priv, FPGA_DBG);
	if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
		return false;

	__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);

	return true;
}

376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401
static bool
vlv_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
{
	u32 cer;

	cer = __raw_i915_read32(dev_priv, CLAIM_ER);
	if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
		return false;

	__raw_i915_write32(dev_priv, CLAIM_ER, CLAIM_ER_CLR);

	return true;
}

static bool
check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
{
	if (HAS_FPGA_DBG_UNCLAIMED(dev_priv))
		return fpga_check_for_unclaimed_mmio(dev_priv);

	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
		return vlv_check_for_unclaimed_mmio(dev_priv);

	return false;
}

402
static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
403 404
					  bool restore_forcewake)
{
405 406
	struct intel_device_info *info = mkwrite_device_info(dev_priv);

407 408 409
	/* clear out unclaimed reg detection bit */
	if (check_for_unclaimed_mmio(dev_priv))
		DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
410

411
	/* clear out old GT FIFO errors */
412
	if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
413 414 415
		__raw_i915_write32(dev_priv, GTFIFODBG,
				   __raw_i915_read32(dev_priv, GTFIFODBG));

416
	/* WaDisableShadowRegForCpd:chv */
417
	if (IS_CHERRYVIEW(dev_priv)) {
418 419 420 421 422 423
		__raw_i915_write32(dev_priv, GTFIFOCTL,
				   __raw_i915_read32(dev_priv, GTFIFOCTL) |
				   GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
				   GT_FIFO_CTL_RC6_POLICY_STALL);
	}

424 425 426 427
	/* Enable Decoupled MMIO only on BXT C stepping onwards */
	if (!IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
		info->has_decoupled_mmio = false;

428
	intel_uncore_forcewake_reset(dev_priv, restore_forcewake);
429 430
}

431 432
void intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
				 bool restore_forcewake)
433
{
434 435
	__intel_uncore_early_sanitize(dev_priv, restore_forcewake);
	i915_check_and_clear_faults(dev_priv);
436 437
}

438
void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
439
{
440
	i915.enable_rc6 = sanitize_rc6_option(dev_priv, i915.enable_rc6);
441

442
	/* BIOS often leaves RC6 enabled, but disable it for hw init */
443
	intel_sanitize_gt_powersave(dev_priv);
444 445
}

446 447 448 449 450 451 452
static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
					 enum forcewake_domains fw_domains)
{
	struct intel_uncore_forcewake_domain *domain;

	fw_domains &= dev_priv->uncore.fw_domains;

453
	for_each_fw_domain_masked(domain, fw_domains, dev_priv) {
454
		if (domain->wake_count++)
455
			fw_domains &= ~domain->mask;
456 457
	}

458
	if (fw_domains) {
459
		dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
460 461
		dev_priv->uncore.fw_domains_active |= fw_domains;
	}
462 463
}

464 465 466 467 468 469 470 471 472 473 474 475
/**
 * intel_uncore_forcewake_get - grab forcewake domain references
 * @dev_priv: i915 device instance
 * @fw_domains: forcewake domains to get reference on
 *
 * This function can be used get GT's forcewake domain references.
 * Normal register access will handle the forcewake domains automatically.
 * However if some sequence requires the GT to not power down a particular
 * forcewake domains this function should be called at the beginning of the
 * sequence. And subsequently the reference should be dropped by symmetric
 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
476
 */
477
void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
478
				enum forcewake_domains fw_domains)
479 480 481
{
	unsigned long irqflags;

482 483 484
	if (!dev_priv->uncore.funcs.force_wake_get)
		return;

485
	assert_rpm_wakelock_held(dev_priv);
486

487
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
488
	__intel_uncore_forcewake_get(dev_priv, fw_domains);
489 490 491
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}

492
/**
493
 * intel_uncore_forcewake_get__locked - grab forcewake domain references
494
 * @dev_priv: i915 device instance
495
 * @fw_domains: forcewake domains to get reference on
496
 *
497 498
 * See intel_uncore_forcewake_get(). This variant places the onus
 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
499
 */
500 501 502 503 504 505 506 507 508 509 510 511 512
void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
					enum forcewake_domains fw_domains)
{
	assert_spin_locked(&dev_priv->uncore.lock);

	if (!dev_priv->uncore.funcs.force_wake_get)
		return;

	__intel_uncore_forcewake_get(dev_priv, fw_domains);
}

static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
					 enum forcewake_domains fw_domains)
513
{
514
	struct intel_uncore_forcewake_domain *domain;
515

516 517
	fw_domains &= dev_priv->uncore.fw_domains;

518
	for_each_fw_domain_masked(domain, fw_domains, dev_priv) {
519 520 521 522 523 524
		if (WARN_ON(domain->wake_count == 0))
			continue;

		if (--domain->wake_count)
			continue;

525
		fw_domain_arm_timer(domain);
526
	}
527
}
528

529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546
/**
 * intel_uncore_forcewake_put - release a forcewake domain reference
 * @dev_priv: i915 device instance
 * @fw_domains: forcewake domains to put references
 *
 * This function drops the device-level forcewakes for specified
 * domains obtained by intel_uncore_forcewake_get().
 */
void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
				enum forcewake_domains fw_domains)
{
	unsigned long irqflags;

	if (!dev_priv->uncore.funcs.force_wake_put)
		return;

	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
	__intel_uncore_forcewake_put(dev_priv, fw_domains);
547 548 549
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}

550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568
/**
 * intel_uncore_forcewake_put__locked - grab forcewake domain references
 * @dev_priv: i915 device instance
 * @fw_domains: forcewake domains to get reference on
 *
 * See intel_uncore_forcewake_put(). This variant places the onus
 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
 */
void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
					enum forcewake_domains fw_domains)
{
	assert_spin_locked(&dev_priv->uncore.lock);

	if (!dev_priv->uncore.funcs.force_wake_put)
		return;

	__intel_uncore_forcewake_put(dev_priv, fw_domains);
}

569
void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
570 571 572 573
{
	if (!dev_priv->uncore.funcs.force_wake_get)
		return;

574
	WARN_ON(dev_priv->uncore.fw_domains_active);
575 576
}

577
/* We give fast paths for the really cool registers */
578
#define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000)
579

580 581 582 583 584 585 586 587 588 589
#define __gen6_reg_read_fw_domains(offset) \
({ \
	enum forcewake_domains __fwd; \
	if (NEEDS_FORCE_WAKE(offset)) \
		__fwd = FORCEWAKE_RENDER; \
	else \
		__fwd = 0; \
	__fwd; \
})

T
Tvrtko Ursulin 已提交
590
static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry)
591 592 593 594 595 596 597 598 599
{
	if (offset < entry->start)
		return -1;
	else if (offset > entry->end)
		return 1;
	else
		return 0;
}

T
Tvrtko Ursulin 已提交
600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618
/* Copied and "macroized" from lib/bsearch.c */
#define BSEARCH(key, base, num, cmp) ({                                 \
	unsigned int start__ = 0, end__ = (num);                        \
	typeof(base) result__ = NULL;                                   \
	while (start__ < end__) {                                       \
		unsigned int mid__ = start__ + (end__ - start__) / 2;   \
		int ret__ = (cmp)((key), (base) + mid__);               \
		if (ret__ < 0) {                                        \
			end__ = mid__;                                  \
		} else if (ret__ > 0) {                                 \
			start__ = mid__ + 1;                            \
		} else {                                                \
			result__ = (base) + mid__;                      \
			break;                                          \
		}                                                       \
	}                                                               \
	result__;                                                       \
})

619
static enum forcewake_domains
620
find_fw_domain(struct drm_i915_private *dev_priv, u32 offset)
621
{
T
Tvrtko Ursulin 已提交
622
	const struct intel_forcewake_range *entry;
623

T
Tvrtko Ursulin 已提交
624 625 626
	entry = BSEARCH(offset,
			dev_priv->uncore.fw_domains_table,
			dev_priv->uncore.fw_domains_table_entries,
627
			fw_range_cmp);
628

629
	return entry ? entry->domains : 0;
630 631
}

632
static void
633
intel_fw_table_check(struct drm_i915_private *dev_priv)
634
{
635 636
	const struct intel_forcewake_range *ranges;
	unsigned int num_ranges;
637 638 639 640 641 642
	s32 prev;
	unsigned int i;

	if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG))
		return;

643 644 645 646 647 648
	ranges = dev_priv->uncore.fw_domains_table;
	if (!ranges)
		return;

	num_ranges = dev_priv->uncore.fw_domains_table_entries;

649 650 651 652 653 654 655 656
	for (i = 0, prev = -1; i < num_ranges; i++, ranges++) {
		WARN_ON_ONCE(prev >= (s32)ranges->start);
		prev = ranges->start;
		WARN_ON_ONCE(prev >= (s32)ranges->end);
		prev = ranges->end;
	}
}

657 658
#define GEN_FW_RANGE(s, e, d) \
	{ .start = (s), .end = (e), .domains = (d) }
659

T
Tvrtko Ursulin 已提交
660 661 662 663 664
#define HAS_FWTABLE(dev_priv) \
	(IS_GEN9(dev_priv) || \
	 IS_CHERRYVIEW(dev_priv) || \
	 IS_VALLEYVIEW(dev_priv))

665
/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
666 667 668 669 670 671
static const struct intel_forcewake_range __vlv_fw_ranges[] = {
	GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
	GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER),
	GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER),
	GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
	GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA),
672
	GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER),
673 674
	GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
};
675

T
Tvrtko Ursulin 已提交
676
#define __fwtable_reg_read_fw_domains(offset) \
677 678
({ \
	enum forcewake_domains __fwd = 0; \
679
	if (NEEDS_FORCE_WAKE((offset))) \
680
		__fwd = find_fw_domain(dev_priv, offset); \
681 682 683
	__fwd; \
})

684
/* *Must* be sorted by offset! See intel_shadow_table_check(). */
685
static const i915_reg_t gen8_shadowed_regs[] = {
686 687 688 689 690 691
	RING_TAIL(RENDER_RING_BASE),	/* 0x2000 (base) */
	GEN6_RPNSWREQ,			/* 0xA008 */
	GEN6_RC_VIDEO_FREQ,		/* 0xA00C */
	RING_TAIL(GEN6_BSD_RING_BASE),	/* 0x12000 (base) */
	RING_TAIL(VEBOX_RING_BASE),	/* 0x1a000 (base) */
	RING_TAIL(BLT_RING_BASE),	/* 0x22000 (base) */
692 693 694
	/* TODO: Other registers are not yet used */
};

695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711
static void intel_shadow_table_check(void)
{
	const i915_reg_t *reg = gen8_shadowed_regs;
	s32 prev;
	u32 offset;
	unsigned int i;

	if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG))
		return;

	for (i = 0, prev = -1; i < ARRAY_SIZE(gen8_shadowed_regs); i++, reg++) {
		offset = i915_mmio_reg_offset(*reg);
		WARN_ON_ONCE(prev >= (s32)offset);
		prev = offset;
	}
}

T
Tvrtko Ursulin 已提交
712
static int mmio_reg_cmp(u32 key, const i915_reg_t *reg)
713
{
T
Tvrtko Ursulin 已提交
714
	u32 offset = i915_mmio_reg_offset(*reg);
715

T
Tvrtko Ursulin 已提交
716
	if (key < offset)
717
		return -1;
T
Tvrtko Ursulin 已提交
718
	else if (key > offset)
719 720 721 722 723
		return 1;
	else
		return 0;
}

724 725
static bool is_gen8_shadowed(u32 offset)
{
T
Tvrtko Ursulin 已提交
726
	const i915_reg_t *regs = gen8_shadowed_regs;
727

T
Tvrtko Ursulin 已提交
728 729
	return BSEARCH(offset, regs, ARRAY_SIZE(gen8_shadowed_regs),
		       mmio_reg_cmp);
730 731 732 733 734 735 736 737 738 739 740 741
}

#define __gen8_reg_write_fw_domains(offset) \
({ \
	enum forcewake_domains __fwd; \
	if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \
		__fwd = FORCEWAKE_RENDER; \
	else \
		__fwd = 0; \
	__fwd; \
})

742
/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
743 744
static const struct intel_forcewake_range __chv_fw_ranges[] = {
	GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
745
	GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
746
	GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
747
	GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
748
	GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
749
	GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
750
	GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA),
751 752
	GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
	GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
753
	GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
754 755
	GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER),
	GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
756 757 758 759 760
	GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
	GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA),
	GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA),
	GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA),
};
761

762
#define __fwtable_reg_write_fw_domains(offset) \
763 764
({ \
	enum forcewake_domains __fwd = 0; \
765
	if (NEEDS_FORCE_WAKE((offset)) && !is_gen8_shadowed(offset)) \
766
		__fwd = find_fw_domain(dev_priv, offset); \
767 768 769
	__fwd; \
})

770
/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
771
static const struct intel_forcewake_range __gen9_fw_ranges[] = {
772
	GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER),
773 774
	GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
	GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
775
	GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER),
776
	GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
777
	GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER),
778
	GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
779
	GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_BLITTER),
780
	GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA),
781
	GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
782
	GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER),
783
	GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
784
	GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_BLITTER),
785
	GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA),
786
	GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_BLITTER),
787
	GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
788
	GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER),
789
	GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
790
	GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER),
791
	GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
792
	GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_BLITTER),
793
	GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
794
	GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_BLITTER),
795
	GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
796
	GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_BLITTER),
797
	GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
798
	GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_BLITTER),
799
	GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA),
800
	GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_BLITTER),
801
	GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
802
	GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_BLITTER),
803 804
	GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
};
805

806 807 808 809 810 811
static void
ilk_dummy_write(struct drm_i915_private *dev_priv)
{
	/* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
	 * the chip from rc6 before touching it for real. MI_MODE is masked,
	 * hence harmless to write 0 into. */
812
	__raw_i915_write32(dev_priv, MI_MODE, 0);
813 814 815
}

static void
816 817 818 819
__unclaimed_reg_debug(struct drm_i915_private *dev_priv,
		      const i915_reg_t reg,
		      const bool read,
		      const bool before)
820
{
821 822 823
	if (WARN(check_for_unclaimed_mmio(dev_priv) && !before,
		 "Unclaimed %s register 0x%x\n",
		 read ? "read from" : "write to",
824
		 i915_mmio_reg_offset(reg)))
825
		i915.mmio_debug--; /* Only report the first N failures */
826 827
}

828 829 830 831 832 833 834 835 836 837 838 839
static inline void
unclaimed_reg_debug(struct drm_i915_private *dev_priv,
		    const i915_reg_t reg,
		    const bool read,
		    const bool before)
{
	if (likely(!i915.mmio_debug))
		return;

	__unclaimed_reg_debug(dev_priv, reg, read, before);
}

840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899
static const enum decoupled_power_domain fw2dpd_domain[] = {
	GEN9_DECOUPLED_PD_RENDER,
	GEN9_DECOUPLED_PD_BLITTER,
	GEN9_DECOUPLED_PD_ALL,
	GEN9_DECOUPLED_PD_MEDIA,
	GEN9_DECOUPLED_PD_ALL,
	GEN9_DECOUPLED_PD_ALL,
	GEN9_DECOUPLED_PD_ALL
};

/*
 * Decoupled MMIO access for only 1 DWORD
 */
static void __gen9_decoupled_mmio_access(struct drm_i915_private *dev_priv,
					 u32 reg,
					 enum forcewake_domains fw_domain,
					 enum decoupled_ops operation)
{
	enum decoupled_power_domain dp_domain;
	u32 ctrl_reg_data = 0;

	dp_domain = fw2dpd_domain[fw_domain - 1];

	ctrl_reg_data |= reg;
	ctrl_reg_data |= (operation << GEN9_DECOUPLED_OP_SHIFT);
	ctrl_reg_data |= (dp_domain << GEN9_DECOUPLED_PD_SHIFT);
	ctrl_reg_data |= GEN9_DECOUPLED_DW1_GO;
	__raw_i915_write32(dev_priv, GEN9_DECOUPLED_REG0_DW1, ctrl_reg_data);

	if (wait_for_atomic((__raw_i915_read32(dev_priv,
			    GEN9_DECOUPLED_REG0_DW1) &
			    GEN9_DECOUPLED_DW1_GO) == 0,
			    FORCEWAKE_ACK_TIMEOUT_MS))
		DRM_ERROR("Decoupled MMIO wait timed out\n");
}

static inline u32
__gen9_decoupled_mmio_read32(struct drm_i915_private *dev_priv,
			     u32 reg,
			     enum forcewake_domains fw_domain)
{
	__gen9_decoupled_mmio_access(dev_priv, reg, fw_domain,
				     GEN9_DECOUPLED_OP_READ);

	return __raw_i915_read32(dev_priv, GEN9_DECOUPLED_REG0_DW0);
}

static inline void
__gen9_decoupled_mmio_write(struct drm_i915_private *dev_priv,
			    u32 reg, u32 data,
			    enum forcewake_domains fw_domain)
{

	__raw_i915_write32(dev_priv, GEN9_DECOUPLED_REG0_DW0, data);

	__gen9_decoupled_mmio_access(dev_priv, reg, fw_domain,
				     GEN9_DECOUPLED_OP_WRITE);
}


900
#define GEN2_READ_HEADER(x) \
B
Ben Widawsky 已提交
901
	u##x val = 0; \
902
	assert_rpm_wakelock_held(dev_priv);
B
Ben Widawsky 已提交
903

904
#define GEN2_READ_FOOTER \
B
Ben Widawsky 已提交
905 906 907
	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
	return val

908
#define __gen2_read(x) \
909
static u##x \
910
gen2_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
911
	GEN2_READ_HEADER(x); \
912
	val = __raw_i915_read##x(dev_priv, reg); \
913
	GEN2_READ_FOOTER; \
914 915 916 917
}

#define __gen5_read(x) \
static u##x \
918
gen5_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
919
	GEN2_READ_HEADER(x); \
920 921
	ilk_dummy_write(dev_priv); \
	val = __raw_i915_read##x(dev_priv, reg); \
922
	GEN2_READ_FOOTER; \
923 924
}

925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940
__gen5_read(8)
__gen5_read(16)
__gen5_read(32)
__gen5_read(64)
__gen2_read(8)
__gen2_read(16)
__gen2_read(32)
__gen2_read(64)

#undef __gen5_read
#undef __gen2_read

#undef GEN2_READ_FOOTER
#undef GEN2_READ_HEADER

#define GEN6_READ_HEADER(x) \
941
	u32 offset = i915_mmio_reg_offset(reg); \
942 943
	unsigned long irqflags; \
	u##x val = 0; \
944
	assert_rpm_wakelock_held(dev_priv); \
945 946
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
	unclaimed_reg_debug(dev_priv, reg, true, true)
947 948

#define GEN6_READ_FOOTER \
949
	unclaimed_reg_debug(dev_priv, reg, true, false); \
950 951 952 953
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
	return val

954 955
static noinline void ___force_wake_auto(struct drm_i915_private *dev_priv,
					enum forcewake_domains fw_domains)
956 957 958
{
	struct intel_uncore_forcewake_domain *domain;

959 960 961 962 963 964 965 966 967 968
	for_each_fw_domain_masked(domain, fw_domains, dev_priv)
		fw_domain_arm_timer(domain);

	dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
	dev_priv->uncore.fw_domains_active |= fw_domains;
}

static inline void __force_wake_auto(struct drm_i915_private *dev_priv,
				     enum forcewake_domains fw_domains)
{
969 970 971
	if (WARN_ON(!fw_domains))
		return;

972 973 974
	/* Turn on all requested but inactive supported forcewake domains. */
	fw_domains &= dev_priv->uncore.fw_domains;
	fw_domains &= ~dev_priv->uncore.fw_domains_active;
975

976 977
	if (fw_domains)
		___force_wake_auto(dev_priv, fw_domains);
978 979
}

980 981
#define __gen6_read(x) \
static u##x \
982
gen6_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
983
	enum forcewake_domains fw_engine; \
984
	GEN6_READ_HEADER(x); \
985 986 987
	fw_engine = __gen6_reg_read_fw_domains(offset); \
	if (fw_engine) \
		__force_wake_auto(dev_priv, fw_engine); \
988
	val = __raw_i915_read##x(dev_priv, reg); \
989
	GEN6_READ_FOOTER; \
990 991
}

992
#define __fwtable_read(x) \
993
static u##x \
994
fwtable_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
995
	enum forcewake_domains fw_engine; \
996
	GEN6_READ_HEADER(x); \
T
Tvrtko Ursulin 已提交
997
	fw_engine = __fwtable_reg_read_fw_domains(offset); \
998
	if (fw_engine) \
999
		__force_wake_auto(dev_priv, fw_engine); \
1000
	val = __raw_i915_read##x(dev_priv, reg); \
1001
	GEN6_READ_FOOTER; \
1002 1003
}

1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025
#define __gen9_decoupled_read(x) \
static u##x \
gen9_decoupled_read##x(struct drm_i915_private *dev_priv, \
		       i915_reg_t reg, bool trace) { \
	enum forcewake_domains fw_engine; \
	GEN6_READ_HEADER(x); \
	fw_engine = __fwtable_reg_read_fw_domains(offset); \
	if (fw_engine & ~dev_priv->uncore.fw_domains_active) { \
		unsigned i; \
		u32 *ptr_data = (u32 *) &val; \
		for (i = 0; i < x/32; i++, offset += sizeof(u32), ptr_data++) \
			*ptr_data = __gen9_decoupled_mmio_read32(dev_priv, \
								 offset, \
								 fw_engine); \
	} else { \
		val = __raw_i915_read##x(dev_priv, reg); \
	} \
	GEN6_READ_FOOTER; \
}

__gen9_decoupled_read(32)
__gen9_decoupled_read(64)
1026 1027 1028 1029
__fwtable_read(8)
__fwtable_read(16)
__fwtable_read(32)
__fwtable_read(64)
1030 1031 1032 1033 1034
__gen6_read(8)
__gen6_read(16)
__gen6_read(32)
__gen6_read(64)

1035
#undef __fwtable_read
1036
#undef __gen6_read
1037 1038
#undef GEN6_READ_FOOTER
#undef GEN6_READ_HEADER
B
Ben Widawsky 已提交
1039

1040 1041 1042
#define VGPU_READ_HEADER(x) \
	unsigned long irqflags; \
	u##x val = 0; \
1043
	assert_rpm_device_not_suspended(dev_priv); \
1044 1045 1046 1047 1048 1049 1050 1051 1052
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)

#define VGPU_READ_FOOTER \
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
	return val

#define __vgpu_read(x) \
static u##x \
1053
vgpu_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067
	VGPU_READ_HEADER(x); \
	val = __raw_i915_read##x(dev_priv, reg); \
	VGPU_READ_FOOTER; \
}

__vgpu_read(8)
__vgpu_read(16)
__vgpu_read(32)
__vgpu_read(64)

#undef __vgpu_read
#undef VGPU_READ_FOOTER
#undef VGPU_READ_HEADER

1068
#define GEN2_WRITE_HEADER \
B
Ben Widawsky 已提交
1069
	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1070
	assert_rpm_wakelock_held(dev_priv); \
1071

1072
#define GEN2_WRITE_FOOTER
V
Ville Syrjälä 已提交
1073

1074
#define __gen2_write(x) \
1075
static void \
1076
gen2_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
1077
	GEN2_WRITE_HEADER; \
1078
	__raw_i915_write##x(dev_priv, reg, val); \
1079
	GEN2_WRITE_FOOTER; \
1080 1081 1082 1083
}

#define __gen5_write(x) \
static void \
1084
gen5_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
1085
	GEN2_WRITE_HEADER; \
1086 1087
	ilk_dummy_write(dev_priv); \
	__raw_i915_write##x(dev_priv, reg, val); \
1088
	GEN2_WRITE_FOOTER; \
1089 1090
}

1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104
__gen5_write(8)
__gen5_write(16)
__gen5_write(32)
__gen2_write(8)
__gen2_write(16)
__gen2_write(32)

#undef __gen5_write
#undef __gen2_write

#undef GEN2_WRITE_FOOTER
#undef GEN2_WRITE_HEADER

#define GEN6_WRITE_HEADER \
1105
	u32 offset = i915_mmio_reg_offset(reg); \
1106 1107
	unsigned long irqflags; \
	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1108
	assert_rpm_wakelock_held(dev_priv); \
1109 1110
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
	unclaimed_reg_debug(dev_priv, reg, false, true)
1111 1112

#define GEN6_WRITE_FOOTER \
1113
	unclaimed_reg_debug(dev_priv, reg, false, false); \
1114 1115
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)

1116 1117
#define __gen6_write(x) \
static void \
1118
gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
1119
	u32 __fifo_ret = 0; \
1120
	GEN6_WRITE_HEADER; \
1121
	if (NEEDS_FORCE_WAKE(offset)) { \
1122 1123 1124 1125 1126 1127
		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
	} \
	__raw_i915_write##x(dev_priv, reg, val); \
	if (unlikely(__fifo_ret)) { \
		gen6_gt_check_fifodbg(dev_priv); \
	} \
1128
	GEN6_WRITE_FOOTER; \
1129 1130
}

1131 1132
#define __gen8_write(x) \
static void \
1133
gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
1134
	enum forcewake_domains fw_engine; \
1135
	GEN6_WRITE_HEADER; \
1136 1137 1138
	fw_engine = __gen8_reg_write_fw_domains(offset); \
	if (fw_engine) \
		__force_wake_auto(dev_priv, fw_engine); \
1139
	__raw_i915_write##x(dev_priv, reg, val); \
1140
	GEN6_WRITE_FOOTER; \
1141 1142
}

1143
#define __fwtable_write(x) \
1144
static void \
1145
fwtable_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
1146
	enum forcewake_domains fw_engine; \
1147
	GEN6_WRITE_HEADER; \
1148
	fw_engine = __fwtable_reg_write_fw_domains(offset); \
1149
	if (fw_engine) \
1150
		__force_wake_auto(dev_priv, fw_engine); \
1151
	__raw_i915_write##x(dev_priv, reg, val); \
1152
	GEN6_WRITE_FOOTER; \
1153 1154
}

1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173
#define __gen9_decoupled_write(x) \
static void \
gen9_decoupled_write##x(struct drm_i915_private *dev_priv, \
			i915_reg_t reg, u##x val, \
		bool trace) { \
	enum forcewake_domains fw_engine; \
	GEN6_WRITE_HEADER; \
	fw_engine = __fwtable_reg_write_fw_domains(offset); \
	if (fw_engine & ~dev_priv->uncore.fw_domains_active) \
		__gen9_decoupled_mmio_write(dev_priv, \
					    offset, \
					    val, \
					    fw_engine); \
	else \
		__raw_i915_write##x(dev_priv, reg, val); \
	GEN6_WRITE_FOOTER; \
}

__gen9_decoupled_write(32)
1174 1175 1176
__fwtable_write(8)
__fwtable_write(16)
__fwtable_write(32)
1177 1178 1179
__gen8_write(8)
__gen8_write(16)
__gen8_write(32)
1180 1181 1182 1183
__gen6_write(8)
__gen6_write(16)
__gen6_write(32)

1184
#undef __fwtable_write
1185
#undef __gen8_write
1186
#undef __gen6_write
1187 1188
#undef GEN6_WRITE_FOOTER
#undef GEN6_WRITE_HEADER
1189

1190 1191 1192
#define VGPU_WRITE_HEADER \
	unsigned long irqflags; \
	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1193
	assert_rpm_device_not_suspended(dev_priv); \
1194 1195 1196 1197 1198 1199 1200
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)

#define VGPU_WRITE_FOOTER \
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)

#define __vgpu_write(x) \
static void vgpu_write##x(struct drm_i915_private *dev_priv, \
1201
			  i915_reg_t reg, u##x val, bool trace) { \
1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214
	VGPU_WRITE_HEADER; \
	__raw_i915_write##x(dev_priv, reg, val); \
	VGPU_WRITE_FOOTER; \
}

__vgpu_write(8)
__vgpu_write(16)
__vgpu_write(32)

#undef __vgpu_write
#undef VGPU_WRITE_FOOTER
#undef VGPU_WRITE_HEADER

1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229
#define ASSIGN_WRITE_MMIO_VFUNCS(x) \
do { \
	dev_priv->uncore.funcs.mmio_writeb = x##_write8; \
	dev_priv->uncore.funcs.mmio_writew = x##_write16; \
	dev_priv->uncore.funcs.mmio_writel = x##_write32; \
} while (0)

#define ASSIGN_READ_MMIO_VFUNCS(x) \
do { \
	dev_priv->uncore.funcs.mmio_readb = x##_read8; \
	dev_priv->uncore.funcs.mmio_readw = x##_read16; \
	dev_priv->uncore.funcs.mmio_readl = x##_read32; \
	dev_priv->uncore.funcs.mmio_readq = x##_read64; \
} while (0)

1230 1231

static void fw_domain_init(struct drm_i915_private *dev_priv,
1232
			   enum forcewake_domain_id domain_id,
1233 1234
			   i915_reg_t reg_set,
			   i915_reg_t reg_ack)
1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253
{
	struct intel_uncore_forcewake_domain *d;

	if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
		return;

	d = &dev_priv->uncore.fw_domain[domain_id];

	WARN_ON(d->wake_count);

	d->wake_count = 0;
	d->reg_set = reg_set;
	d->reg_ack = reg_ack;

	if (IS_GEN6(dev_priv)) {
		d->val_reset = 0;
		d->val_set = FORCEWAKE_KERNEL;
		d->val_clear = 0;
	} else {
1254
		/* WaRsClearFWBitsAtReset:bdw,skl */
1255 1256 1257 1258 1259
		d->val_reset = _MASKED_BIT_DISABLE(0xffff);
		d->val_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
		d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
	}

1260
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1261 1262 1263 1264 1265 1266 1267
		d->reg_post = FORCEWAKE_ACK_VLV;
	else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv))
		d->reg_post = ECOBUS;

	d->i915 = dev_priv;
	d->id = domain_id;

1268 1269 1270 1271 1272 1273
	BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER));
	BUILD_BUG_ON(FORCEWAKE_BLITTER != (1 << FW_DOMAIN_ID_BLITTER));
	BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA));

	d->mask = 1 << domain_id;

1274 1275
	hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	d->timer.function = intel_uncore_fw_release_timer;
1276 1277

	dev_priv->uncore.fw_domains |= (1 << domain_id);
1278 1279

	fw_domain_reset(d);
1280 1281
}

1282
static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
1283
{
1284
	if (INTEL_INFO(dev_priv)->gen <= 5)
1285 1286
		return;

1287
	if (IS_GEN9(dev_priv)) {
1288 1289 1290 1291 1292 1293 1294 1295 1296 1297
		dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
		dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
			       FORCEWAKE_RENDER_GEN9,
			       FORCEWAKE_ACK_RENDER_GEN9);
		fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
			       FORCEWAKE_BLITTER_GEN9,
			       FORCEWAKE_ACK_BLITTER_GEN9);
		fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
			       FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
1298
	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1299
		dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1300
		if (!IS_CHERRYVIEW(dev_priv))
1301 1302 1303 1304
			dev_priv->uncore.funcs.force_wake_put =
				fw_domains_put_with_fifo;
		else
			dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1305 1306 1307 1308
		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
			       FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
		fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
			       FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
1309
	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
1310 1311
		dev_priv->uncore.funcs.force_wake_get =
			fw_domains_get_with_thread_status;
1312
		if (IS_HASWELL(dev_priv))
1313 1314 1315 1316
			dev_priv->uncore.funcs.force_wake_put =
				fw_domains_put_with_fifo;
		else
			dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1317 1318
		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
			       FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
1319
	} else if (IS_IVYBRIDGE(dev_priv)) {
1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330
		u32 ecobus;

		/* IVB configs may use multi-threaded forcewake */

		/* A small trick here - if the bios hasn't configured
		 * MT forcewake, and if the device is in RC6, then
		 * force_wake_mt_get will not wake the device and the
		 * ECOBUS read will return zero. Which will be
		 * (correctly) interpreted by the test below as MT
		 * forcewake being disabled.
		 */
1331 1332 1333 1334 1335
		dev_priv->uncore.funcs.force_wake_get =
			fw_domains_get_with_thread_status;
		dev_priv->uncore.funcs.force_wake_put =
			fw_domains_put_with_fifo;

1336 1337
		/* We need to init first for ECOBUS access and then
		 * determine later if we want to reinit, in case of MT access is
1338 1339 1340
		 * not working. In this stage we don't know which flavour this
		 * ivb is, so it is better to reset also the gen6 fw registers
		 * before the ecobus check.
1341
		 */
1342 1343 1344 1345

		__raw_i915_write32(dev_priv, FORCEWAKE, 0);
		__raw_posting_read(dev_priv, ECOBUS);

1346 1347
		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
			       FORCEWAKE_MT, FORCEWAKE_MT_ACK);
1348

1349
		spin_lock_irq(&dev_priv->uncore.lock);
1350
		fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL);
1351
		ecobus = __raw_i915_read32(dev_priv, ECOBUS);
1352
		fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL);
1353
		spin_unlock_irq(&dev_priv->uncore.lock);
1354

1355
		if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
1356 1357
			DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
			DRM_INFO("when using vblank-synced partial screen updates.\n");
1358 1359
			fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
				       FORCEWAKE, FORCEWAKE_ACK);
1360
		}
1361
	} else if (IS_GEN6(dev_priv)) {
1362
		dev_priv->uncore.funcs.force_wake_get =
1363
			fw_domains_get_with_thread_status;
1364
		dev_priv->uncore.funcs.force_wake_put =
1365 1366 1367
			fw_domains_put_with_fifo;
		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
			       FORCEWAKE, FORCEWAKE_ACK);
1368
	}
1369 1370 1371

	/* All future platforms are expected to require complex power gating */
	WARN_ON(dev_priv->uncore.fw_domains == 0);
1372 1373
}

1374 1375 1376 1377 1378 1379 1380
#define ASSIGN_FW_DOMAINS_TABLE(d) \
{ \
	dev_priv->uncore.fw_domains_table = \
			(struct intel_forcewake_range *)(d); \
	dev_priv->uncore.fw_domains_table_entries = ARRAY_SIZE((d)); \
}

1381
void intel_uncore_init(struct drm_i915_private *dev_priv)
1382
{
1383
	i915_check_vgpu(dev_priv);
1384

1385
	intel_uncore_edram_detect(dev_priv);
1386 1387
	intel_uncore_fw_domains_init(dev_priv);
	__intel_uncore_early_sanitize(dev_priv, false);
1388

1389 1390
	dev_priv->uncore.unclaimed_mmio_check = 1;

1391
	switch (INTEL_INFO(dev_priv)->gen) {
1392
	default:
1393
	case 9:
1394
		ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges);
1395
		ASSIGN_WRITE_MMIO_VFUNCS(fwtable);
1396
		ASSIGN_READ_MMIO_VFUNCS(fwtable);
1397 1398 1399 1400 1401 1402 1403 1404
		if (HAS_DECOUPLED_MMIO(dev_priv)) {
			dev_priv->uncore.funcs.mmio_readl =
						gen9_decoupled_read32;
			dev_priv->uncore.funcs.mmio_readq =
						gen9_decoupled_read64;
			dev_priv->uncore.funcs.mmio_writel =
						gen9_decoupled_write32;
		}
1405 1406
		break;
	case 8:
1407
		if (IS_CHERRYVIEW(dev_priv)) {
1408
			ASSIGN_FW_DOMAINS_TABLE(__chv_fw_ranges);
1409
			ASSIGN_WRITE_MMIO_VFUNCS(fwtable);
1410
			ASSIGN_READ_MMIO_VFUNCS(fwtable);
1411 1412

		} else {
1413 1414
			ASSIGN_WRITE_MMIO_VFUNCS(gen8);
			ASSIGN_READ_MMIO_VFUNCS(gen6);
1415
		}
1416
		break;
1417 1418
	case 7:
	case 6:
1419
		ASSIGN_WRITE_MMIO_VFUNCS(gen6);
1420

1421
		if (IS_VALLEYVIEW(dev_priv)) {
1422
			ASSIGN_FW_DOMAINS_TABLE(__vlv_fw_ranges);
1423
			ASSIGN_READ_MMIO_VFUNCS(fwtable);
1424
		} else {
1425
			ASSIGN_READ_MMIO_VFUNCS(gen6);
1426
		}
1427 1428
		break;
	case 5:
1429 1430
		ASSIGN_WRITE_MMIO_VFUNCS(gen5);
		ASSIGN_READ_MMIO_VFUNCS(gen5);
1431 1432 1433 1434
		break;
	case 4:
	case 3:
	case 2:
1435 1436
		ASSIGN_WRITE_MMIO_VFUNCS(gen2);
		ASSIGN_READ_MMIO_VFUNCS(gen2);
1437 1438
		break;
	}
1439

1440
	intel_fw_table_check(dev_priv);
1441 1442
	if (INTEL_GEN(dev_priv) >= 8)
		intel_shadow_table_check();
1443

1444
	if (intel_vgpu_active(dev_priv)) {
1445 1446 1447 1448
		ASSIGN_WRITE_MMIO_VFUNCS(vgpu);
		ASSIGN_READ_MMIO_VFUNCS(vgpu);
	}

1449
	i915_check_and_clear_faults(dev_priv);
1450
}
1451 1452
#undef ASSIGN_WRITE_MMIO_VFUNCS
#undef ASSIGN_READ_MMIO_VFUNCS
1453

1454
void intel_uncore_fini(struct drm_i915_private *dev_priv)
1455 1456
{
	/* Paranoia: make sure we have disabled everything before we exit. */
1457 1458
	intel_uncore_sanitize(dev_priv);
	intel_uncore_forcewake_reset(dev_priv, false);
1459 1460
}

1461
#define GEN_RANGE(l, h) GENMASK((h) - 1, (l) - 1)
1462

1463
static const struct register_whitelist {
1464
	i915_reg_t offset_ldw, offset_udw;
1465
	uint32_t size;
1466 1467
	/* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
	uint32_t gen_bitmask;
1468
} whitelist[] = {
1469 1470 1471
	{ .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE),
	  .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE),
	  .size = 8, .gen_bitmask = GEN_RANGE(4, 9) },
1472 1473 1474 1475 1476
};

int i915_reg_read_ioctl(struct drm_device *dev,
			void *data, struct drm_file *file)
{
1477
	struct drm_i915_private *dev_priv = to_i915(dev);
1478 1479
	struct drm_i915_reg_read *reg = data;
	struct register_whitelist const *entry = whitelist;
1480
	unsigned size;
1481
	i915_reg_t offset_ldw, offset_udw;
1482
	int i, ret = 0;
1483 1484

	for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
1485
		if (i915_mmio_reg_offset(entry->offset_ldw) == (reg->offset & -entry->size) &&
1486
		    (INTEL_INFO(dev)->gen_mask & entry->gen_bitmask))
1487 1488 1489 1490 1491 1492
			break;
	}

	if (i == ARRAY_SIZE(whitelist))
		return -EINVAL;

1493 1494 1495 1496
	/* We use the low bits to encode extra flags as the register should
	 * be naturally aligned (and those that are not so aligned merely
	 * limit the available flags for that register).
	 */
1497 1498
	offset_ldw = entry->offset_ldw;
	offset_udw = entry->offset_udw;
1499
	size = entry->size;
1500
	size |= reg->offset ^ i915_mmio_reg_offset(offset_ldw);
1501

1502 1503
	intel_runtime_pm_get(dev_priv);

1504 1505
	switch (size) {
	case 8 | 1:
1506
		reg->val = I915_READ64_2x32(offset_ldw, offset_udw);
1507
		break;
1508
	case 8:
1509
		reg->val = I915_READ64(offset_ldw);
1510 1511
		break;
	case 4:
1512
		reg->val = I915_READ(offset_ldw);
1513 1514
		break;
	case 2:
1515
		reg->val = I915_READ16(offset_ldw);
1516 1517
		break;
	case 1:
1518
		reg->val = I915_READ8(offset_ldw);
1519 1520
		break;
	default:
1521 1522
		ret = -EINVAL;
		goto out;
1523 1524
	}

1525 1526 1527
out:
	intel_runtime_pm_put(dev_priv);
	return ret;
1528 1529
}

1530
static int i915_reset_complete(struct pci_dev *pdev)
1531 1532
{
	u8 gdrst;
1533
	pci_read_config_byte(pdev, I915_GDRST, &gdrst);
V
Ville Syrjälä 已提交
1534
	return (gdrst & GRDOM_RESET_STATUS) == 0;
1535 1536
}

1537
static int i915_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1538
{
1539
	struct pci_dev *pdev = dev_priv->drm.pdev;
1540

V
Ville Syrjälä 已提交
1541
	/* assert reset for at least 20 usec */
1542
	pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
V
Ville Syrjälä 已提交
1543
	udelay(20);
1544
	pci_write_config_byte(pdev, I915_GDRST, 0);
1545

1546
	return wait_for(i915_reset_complete(pdev), 500);
V
Ville Syrjälä 已提交
1547 1548
}

1549
static int g4x_reset_complete(struct pci_dev *pdev)
V
Ville Syrjälä 已提交
1550 1551
{
	u8 gdrst;
1552
	pci_read_config_byte(pdev, I915_GDRST, &gdrst);
V
Ville Syrjälä 已提交
1553
	return (gdrst & GRDOM_RESET_ENABLE) == 0;
1554 1555
}

1556
static int g33_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1557
{
1558
	struct pci_dev *pdev = dev_priv->drm.pdev;
1559 1560
	pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
	return wait_for(g4x_reset_complete(pdev), 500);
1561 1562
}

1563
static int g4x_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1564
{
1565
	struct pci_dev *pdev = dev_priv->drm.pdev;
1566 1567
	int ret;

1568
	pci_write_config_byte(pdev, I915_GDRST,
1569
			      GRDOM_RENDER | GRDOM_RESET_ENABLE);
1570
	ret =  wait_for(g4x_reset_complete(pdev), 500);
1571 1572 1573 1574 1575 1576 1577
	if (ret)
		return ret;

	/* WaVcpClkGateDisableForMediaReset:ctg,elk */
	I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
	POSTING_READ(VDECCLK_GATE_D);

1578
	pci_write_config_byte(pdev, I915_GDRST,
1579
			      GRDOM_MEDIA | GRDOM_RESET_ENABLE);
1580
	ret =  wait_for(g4x_reset_complete(pdev), 500);
1581 1582 1583 1584 1585 1586 1587
	if (ret)
		return ret;

	/* WaVcpClkGateDisableForMediaReset:ctg,elk */
	I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
	POSTING_READ(VDECCLK_GATE_D);

1588
	pci_write_config_byte(pdev, I915_GDRST, 0);
1589 1590 1591 1592

	return 0;
}

1593 1594
static int ironlake_do_reset(struct drm_i915_private *dev_priv,
			     unsigned engine_mask)
1595 1596 1597
{
	int ret;

1598
	I915_WRITE(ILK_GDSR,
1599
		   ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
1600 1601 1602
	ret = intel_wait_for_register(dev_priv,
				      ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
				      500);
1603 1604 1605
	if (ret)
		return ret;

1606
	I915_WRITE(ILK_GDSR,
1607
		   ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
1608 1609 1610
	ret = intel_wait_for_register(dev_priv,
				      ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
				      500);
1611 1612 1613
	if (ret)
		return ret;

1614
	I915_WRITE(ILK_GDSR, 0);
1615 1616

	return 0;
1617 1618
}

1619 1620 1621
/* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
				u32 hw_domain_mask)
1622 1623 1624 1625 1626
{
	/* GEN6_GDRST is not in the gt power well, no need to check
	 * for fifo space for the write or forcewake the chip for
	 * the read
	 */
1627
	__raw_i915_write32(dev_priv, GEN6_GDRST, hw_domain_mask);
1628

1629
	/* Spin waiting for the device to ack the reset requests */
1630 1631 1632
	return intel_wait_for_register_fw(dev_priv,
					  GEN6_GDRST, hw_domain_mask, 0,
					  500);
1633 1634 1635 1636
}

/**
 * gen6_reset_engines - reset individual engines
1637
 * @dev_priv: i915 device
1638 1639 1640 1641 1642 1643 1644 1645 1646 1647
 * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset
 *
 * This function will reset the individual engines that are set in engine_mask.
 * If you provide ALL_ENGINES as mask, full global domain reset will be issued.
 *
 * Note: It is responsibility of the caller to handle the difference between
 * asking full domain reset versus reset for all available individual engines.
 *
 * Returns 0 on success, nonzero on error.
 */
1648 1649
static int gen6_reset_engines(struct drm_i915_private *dev_priv,
			      unsigned engine_mask)
1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664
{
	struct intel_engine_cs *engine;
	const u32 hw_engine_mask[I915_NUM_ENGINES] = {
		[RCS] = GEN6_GRDOM_RENDER,
		[BCS] = GEN6_GRDOM_BLT,
		[VCS] = GEN6_GRDOM_MEDIA,
		[VCS2] = GEN8_GRDOM_MEDIA2,
		[VECS] = GEN6_GRDOM_VECS,
	};
	u32 hw_mask;
	int ret;

	if (engine_mask == ALL_ENGINES) {
		hw_mask = GEN6_GRDOM_FULL;
	} else {
1665 1666
		unsigned int tmp;

1667
		hw_mask = 0;
1668
		for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
1669 1670 1671 1672
			hw_mask |= hw_engine_mask[engine->id];
	}

	ret = gen6_hw_domain_reset(dev_priv, hw_mask);
1673

1674
	intel_uncore_forcewake_reset(dev_priv, true);
1675

1676 1677 1678
	return ret;
}

1679 1680 1681 1682 1683 1684 1685 1686 1687
/**
 * intel_wait_for_register_fw - wait until register matches expected state
 * @dev_priv: the i915 device
 * @reg: the register to read
 * @mask: mask to apply to register value
 * @value: expected value
 * @timeout_ms: timeout in millisecond
 *
 * This routine waits until the target register @reg contains the expected
1688 1689 1690 1691
 * @value after applying the @mask, i.e. it waits until ::
 *
 *     (I915_READ_FW(reg) & mask) == value
 *
1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723
 * Otherwise, the wait will timeout after @timeout_ms milliseconds.
 *
 * Note that this routine assumes the caller holds forcewake asserted, it is
 * not suitable for very long waits. See intel_wait_for_register() if you
 * wish to wait without holding forcewake for the duration (i.e. you expect
 * the wait to be slow).
 *
 * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
 */
int intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
			       i915_reg_t reg,
			       const u32 mask,
			       const u32 value,
			       const unsigned long timeout_ms)
{
#define done ((I915_READ_FW(reg) & mask) == value)
	int ret = wait_for_us(done, 2);
	if (ret)
		ret = wait_for(done, timeout_ms);
	return ret;
#undef done
}

/**
 * intel_wait_for_register - wait until register matches expected state
 * @dev_priv: the i915 device
 * @reg: the register to read
 * @mask: mask to apply to register value
 * @value: expected value
 * @timeout_ms: timeout in millisecond
 *
 * This routine waits until the target register @reg contains the expected
1724 1725 1726 1727
 * @value after applying the @mask, i.e. it waits until ::
 *
 *     (I915_READ(reg) & mask) == value
 *
1728 1729 1730 1731 1732 1733 1734 1735 1736
 * Otherwise, the wait will timeout after @timeout_ms milliseconds.
 *
 * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
 */
int intel_wait_for_register(struct drm_i915_private *dev_priv,
			    i915_reg_t reg,
			    const u32 mask,
			    const u32 value,
			    const unsigned long timeout_ms)
1737
{
1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750

	unsigned fw =
		intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ);
	int ret;

	intel_uncore_forcewake_get(dev_priv, fw);
	ret = wait_for_us((I915_READ_FW(reg) & mask) == value, 2);
	intel_uncore_forcewake_put(dev_priv, fw);
	if (ret)
		ret = wait_for((I915_READ_NOTRACE(reg) & mask) == value,
			       timeout_ms);

	return ret;
1751 1752 1753 1754
}

static int gen8_request_engine_reset(struct intel_engine_cs *engine)
{
1755
	struct drm_i915_private *dev_priv = engine->i915;
1756 1757 1758 1759 1760
	int ret;

	I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
		      _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));

1761 1762 1763 1764 1765
	ret = intel_wait_for_register_fw(dev_priv,
					 RING_RESET_CTL(engine->mmio_base),
					 RESET_CTL_READY_TO_RESET,
					 RESET_CTL_READY_TO_RESET,
					 700);
1766 1767 1768 1769 1770 1771 1772 1773
	if (ret)
		DRM_ERROR("%s: reset request timeout\n", engine->name);

	return ret;
}

static void gen8_unrequest_engine_reset(struct intel_engine_cs *engine)
{
1774
	struct drm_i915_private *dev_priv = engine->i915;
1775 1776 1777

	I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
		      _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
1778 1779
}

1780 1781
static int gen8_reset_engines(struct drm_i915_private *dev_priv,
			      unsigned engine_mask)
1782 1783
{
	struct intel_engine_cs *engine;
1784
	unsigned int tmp;
1785

1786
	for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
1787
		if (gen8_request_engine_reset(engine))
1788 1789
			goto not_ready;

1790
	return gen6_reset_engines(dev_priv, engine_mask);
1791 1792

not_ready:
1793
	for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
1794
		gen8_unrequest_engine_reset(engine);
1795 1796 1797 1798

	return -EIO;
}

1799 1800 1801
typedef int (*reset_func)(struct drm_i915_private *, unsigned engine_mask);

static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv)
1802
{
1803 1804 1805
	if (!i915.reset)
		return NULL;

1806
	if (INTEL_INFO(dev_priv)->gen >= 8)
1807
		return gen8_reset_engines;
1808
	else if (INTEL_INFO(dev_priv)->gen >= 6)
1809
		return gen6_reset_engines;
1810
	else if (IS_GEN5(dev_priv))
1811
		return ironlake_do_reset;
1812
	else if (IS_G4X(dev_priv))
1813
		return g4x_do_reset;
1814
	else if (IS_G33(dev_priv))
1815
		return g33_do_reset;
1816
	else if (INTEL_INFO(dev_priv)->gen >= 3)
1817
		return i915_do_reset;
1818
	else
1819 1820 1821
		return NULL;
}

1822
int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1823
{
1824
	reset_func reset;
1825
	int ret;
1826

1827
	reset = intel_get_gpu_reset(dev_priv);
1828
	if (reset == NULL)
1829
		return -ENODEV;
1830

1831 1832 1833 1834
	/* If the power well sleeps during the reset, the reset
	 * request may be dropped and never completes (causing -EIO).
	 */
	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1835
	ret = reset(dev_priv, engine_mask);
1836 1837 1838
	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);

	return ret;
1839 1840
}

1841
bool intel_has_gpu_reset(struct drm_i915_private *dev_priv)
1842
{
1843
	return intel_get_gpu_reset(dev_priv) != NULL;
1844 1845
}

1846 1847 1848 1849 1850
int intel_guc_reset(struct drm_i915_private *dev_priv)
{
	int ret;
	unsigned long irqflags;

1851
	if (!HAS_GUC(dev_priv))
1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864
		return -EINVAL;

	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);

	ret = gen6_hw_domain_reset(dev_priv, GEN9_GRDOM_GUC);

	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);

	return ret;
}

1865
bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv)
1866
{
1867
	return check_for_unclaimed_mmio(dev_priv);
1868
}
1869

1870
bool
1871 1872 1873 1874
intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv)
{
	if (unlikely(i915.mmio_debug ||
		     dev_priv->uncore.unclaimed_mmio_check <= 0))
1875
		return false;
1876 1877 1878 1879 1880 1881 1882

	if (unlikely(intel_uncore_unclaimed_mmio(dev_priv))) {
		DRM_DEBUG("Unclaimed register detected, "
			  "enabling oneshot unclaimed register reporting. "
			  "Please use i915.mmio_debug=N for more information.\n");
		i915.mmio_debug++;
		dev_priv->uncore.unclaimed_mmio_check--;
1883
		return true;
1884
	}
1885 1886

	return false;
1887
}
1888 1889 1890 1891 1892

static enum forcewake_domains
intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv,
				i915_reg_t reg)
{
T
Tvrtko Ursulin 已提交
1893
	u32 offset = i915_mmio_reg_offset(reg);
1894 1895
	enum forcewake_domains fw_domains;

T
Tvrtko Ursulin 已提交
1896 1897 1898 1899 1900 1901 1902
	if (HAS_FWTABLE(dev_priv)) {
		fw_domains = __fwtable_reg_read_fw_domains(offset);
	} else if (INTEL_GEN(dev_priv) >= 6) {
		fw_domains = __gen6_reg_read_fw_domains(offset);
	} else {
		WARN_ON(!IS_GEN(dev_priv, 2, 5));
		fw_domains = 0;
1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913
	}

	WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);

	return fw_domains;
}

static enum forcewake_domains
intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
				 i915_reg_t reg)
{
1914
	u32 offset = i915_mmio_reg_offset(reg);
1915 1916
	enum forcewake_domains fw_domains;

1917 1918 1919 1920 1921
	if (HAS_FWTABLE(dev_priv) && !IS_VALLEYVIEW(dev_priv)) {
		fw_domains = __fwtable_reg_write_fw_domains(offset);
	} else if (IS_GEN8(dev_priv)) {
		fw_domains = __gen8_reg_write_fw_domains(offset);
	} else if (IS_GEN(dev_priv, 6, 7)) {
1922
		fw_domains = FORCEWAKE_RENDER;
1923 1924 1925
	} else {
		WARN_ON(!IS_GEN(dev_priv, 2, 5));
		fw_domains = 0;
1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954
	}

	WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);

	return fw_domains;
}

/**
 * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
 * 				    a register
 * @dev_priv: pointer to struct drm_i915_private
 * @reg: register in question
 * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE
 *
 * Returns a set of forcewake domains required to be taken with for example
 * intel_uncore_forcewake_get for the specified register to be accessible in the
 * specified mode (read, write or read/write) with raw mmio accessors.
 *
 * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the
 * callers to do FIFO management on their own or risk losing writes.
 */
enum forcewake_domains
intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
			       i915_reg_t reg, unsigned int op)
{
	enum forcewake_domains fw_domains = 0;

	WARN_ON(!op);

T
Tvrtko Ursulin 已提交
1955 1956 1957
	if (intel_vgpu_active(dev_priv))
		return 0;

1958 1959 1960 1961 1962 1963 1964 1965
	if (op & FW_REG_READ)
		fw_domains = intel_uncore_forcewake_for_read(dev_priv, reg);

	if (op & FW_REG_WRITE)
		fw_domains |= intel_uncore_forcewake_for_write(dev_priv, reg);

	return fw_domains;
}