intel_device_info.c 32.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 * Copyright © 2016 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 */

25
#include <drm/drm_print.h>
26
#include <drm/i915_pciids.h>
27

28
#include "display/intel_cdclk.h"
29
#include "intel_device_info.h"
30 31
#include "i915_drv.h"

32 33 34 35 36 37 38 39 40 41 42 43
#define PLATFORM_NAME(x) [INTEL_##x] = #x
static const char * const platform_names[] = {
	PLATFORM_NAME(I830),
	PLATFORM_NAME(I845G),
	PLATFORM_NAME(I85X),
	PLATFORM_NAME(I865G),
	PLATFORM_NAME(I915G),
	PLATFORM_NAME(I915GM),
	PLATFORM_NAME(I945G),
	PLATFORM_NAME(I945GM),
	PLATFORM_NAME(G33),
	PLATFORM_NAME(PINEVIEW),
44 45
	PLATFORM_NAME(I965G),
	PLATFORM_NAME(I965GM),
46 47
	PLATFORM_NAME(G45),
	PLATFORM_NAME(GM45),
48 49 50 51 52 53 54 55 56 57 58
	PLATFORM_NAME(IRONLAKE),
	PLATFORM_NAME(SANDYBRIDGE),
	PLATFORM_NAME(IVYBRIDGE),
	PLATFORM_NAME(VALLEYVIEW),
	PLATFORM_NAME(HASWELL),
	PLATFORM_NAME(BROADWELL),
	PLATFORM_NAME(CHERRYVIEW),
	PLATFORM_NAME(SKYLAKE),
	PLATFORM_NAME(BROXTON),
	PLATFORM_NAME(KABYLAKE),
	PLATFORM_NAME(GEMINILAKE),
59
	PLATFORM_NAME(COFFEELAKE),
60
	PLATFORM_NAME(CANNONLAKE),
61
	PLATFORM_NAME(ICELAKE),
62
	PLATFORM_NAME(ELKHARTLAKE),
63
	PLATFORM_NAME(TIGERLAKE),
64 65 66 67 68
};
#undef PLATFORM_NAME

const char *intel_platform_name(enum intel_platform platform)
{
69 70
	BUILD_BUG_ON(ARRAY_SIZE(platform_names) != INTEL_MAX_PLATFORMS);

71 72 73 74 75 76 77
	if (WARN_ON_ONCE(platform >= ARRAY_SIZE(platform_names) ||
			 platform_names[platform] == NULL))
		return "<unknown>";

	return platform_names[platform];
}

78
static const char *iommu_name(void)
79
{
80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100
	const char *msg = "n/a";

#ifdef CONFIG_INTEL_IOMMU
	msg = enableddisabled(intel_iommu_gfx_mapped);
#endif

	return msg;
}

void intel_device_info_print_static(const struct intel_device_info *info,
				    struct drm_printer *p)
{
	drm_printf(p, "engines: %x\n", info->engine_mask);
	drm_printf(p, "gen: %d\n", info->gen);
	drm_printf(p, "gt: %d\n", info->gt);
	drm_printf(p, "iommu: %s\n", iommu_name());
	drm_printf(p, "memory-regions: %x\n", info->memory_regions);
	drm_printf(p, "page-sizes: %x\n", info->page_sizes);
	drm_printf(p, "platform: %s\n", intel_platform_name(info->platform));
	drm_printf(p, "ppgtt-size: %d\n", info->ppgtt_size);
	drm_printf(p, "ppgtt-type: %d\n", info->ppgtt_type);
101
	drm_printf(p, "dma_mask_size: %u\n", info->dma_mask_size);
102

103 104 105
#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->name));
	DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
#undef PRINT_FLAG
106 107 108 109

#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->display.name));
	DEV_INFO_DISPLAY_FOR_EACH_FLAG(PRINT_FLAG);
#undef PRINT_FLAG
110 111
}

112 113
static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p)
{
114 115
	int s;

116 117
	drm_printf(p, "slice total: %u, mask=%04x\n",
		   hweight8(sseu->slice_mask), sseu->slice_mask);
118
	drm_printf(p, "subslice total: %u\n", intel_sseu_subslice_total(sseu));
119
	for (s = 0; s < sseu->max_slices; s++) {
S
Stuart Summers 已提交
120
		drm_printf(p, "slice%d: %u subslices, mask=%08x\n",
121
			   s, intel_sseu_subslices_per_slice(sseu, s),
S
Stuart Summers 已提交
122
			   intel_sseu_get_subslices(sseu, s));
123
	}
124 125 126 127 128 129 130 131 132
	drm_printf(p, "EU total: %u\n", sseu->eu_total);
	drm_printf(p, "EU per subslice: %u\n", sseu->eu_per_subslice);
	drm_printf(p, "has slice power gating: %s\n",
		   yesno(sseu->has_slice_pg));
	drm_printf(p, "has subslice power gating: %s\n",
		   yesno(sseu->has_subslice_pg));
	drm_printf(p, "has EU power gating: %s\n", yesno(sseu->has_eu_pg));
}

133 134
void intel_device_info_print_runtime(const struct intel_runtime_info *info,
				     struct drm_printer *p)
135 136 137
{
	sseu_dump(&info->sseu, p);

138
	drm_printf(p, "rawclk rate: %u kHz\n", info->rawclk_freq);
139 140
	drm_printf(p, "CS timestamp frequency: %u Hz\n",
		   info->cs_timestamp_frequency_hz);
141 142
}

143 144 145
static int sseu_eu_idx(const struct sseu_dev_info *sseu, int slice,
		       int subslice)
{
146
	int slice_stride = sseu->max_subslices * sseu->eu_stride;
147

148
	return slice * slice_stride + subslice * sseu->eu_stride;
149 150 151 152 153 154 155 156
}

static u16 sseu_get_eus(const struct sseu_dev_info *sseu, int slice,
			int subslice)
{
	int i, offset = sseu_eu_idx(sseu, slice, subslice);
	u16 eu_mask = 0;

157
	for (i = 0; i < sseu->eu_stride; i++) {
158 159 160 161 162 163 164 165 166 167 168 169
		eu_mask |= ((u16)sseu->eu_mask[offset + i]) <<
			(i * BITS_PER_BYTE);
	}

	return eu_mask;
}

static void sseu_set_eus(struct sseu_dev_info *sseu, int slice, int subslice,
			 u16 eu_mask)
{
	int i, offset = sseu_eu_idx(sseu, slice, subslice);

170
	for (i = 0; i < sseu->eu_stride; i++) {
171 172 173 174 175
		sseu->eu_mask[offset + i] =
			(eu_mask >> (BITS_PER_BYTE * i)) & 0xff;
	}
}

176 177
void intel_device_info_print_topology(const struct sseu_dev_info *sseu,
				      struct drm_printer *p)
178 179 180 181 182 183 184 185 186
{
	int s, ss;

	if (sseu->max_slices == 0) {
		drm_printf(p, "Unavailable\n");
		return;
	}

	for (s = 0; s < sseu->max_slices; s++) {
S
Stuart Summers 已提交
187
		drm_printf(p, "slice%d: %u subslice(s) (0x%08x):\n",
188
			   s, intel_sseu_subslices_per_slice(sseu, s),
S
Stuart Summers 已提交
189
			   intel_sseu_get_subslices(sseu, s));
190 191 192 193 194 195 196 197 198 199

		for (ss = 0; ss < sseu->max_subslices; ss++) {
			u16 enabled_eus = sseu_get_eus(sseu, s, ss);

			drm_printf(p, "\tsubslice%d: %u EUs (0x%hx)\n",
				   ss, hweight16(enabled_eus), enabled_eus);
		}
	}
}

200 201 202 203 204 205 206 207 208 209
static u16 compute_eu_total(const struct sseu_dev_info *sseu)
{
	u16 i, total = 0;

	for (i = 0; i < ARRAY_SIZE(sseu->eu_mask); i++)
		total += hweight8(sseu->eu_mask[i]);

	return total;
}

210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266
static void gen11_compute_sseu_info(struct sseu_dev_info *sseu,
				    u8 s_en, u32 ss_en, u16 eu_en)
{
	int s, ss;

	/* ss_en represents entire subslice mask across all slices */
	GEM_BUG_ON(sseu->max_slices * sseu->max_subslices >
		   sizeof(ss_en) * BITS_PER_BYTE);

	for (s = 0; s < sseu->max_slices; s++) {
		if ((s_en & BIT(s)) == 0)
			continue;

		sseu->slice_mask |= BIT(s);

		intel_sseu_set_subslices(sseu, s, ss_en);

		for (ss = 0; ss < sseu->max_subslices; ss++)
			if (intel_sseu_has_subslice(sseu, s, ss))
				sseu_set_eus(sseu, s, ss, eu_en);
	}
	sseu->eu_per_subslice = hweight16(eu_en);
	sseu->eu_total = compute_eu_total(sseu);
}

static void gen12_sseu_info_init(struct drm_i915_private *dev_priv)
{
	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
	u8 s_en;
	u32 dss_en;
	u16 eu_en = 0;
	u8 eu_en_fuse;
	int eu;

	/*
	 * Gen12 has Dual-Subslices, which behave similarly to 2 gen11 SS.
	 * Instead of splitting these, provide userspace with an array
	 * of DSS to more closely represent the hardware resource.
	 */
	intel_sseu_set_info(sseu, 1, 6, 16);

	s_en = I915_READ(GEN11_GT_SLICE_ENABLE) & GEN11_GT_S_ENA_MASK;

	dss_en = I915_READ(GEN12_GT_DSS_ENABLE);

	/* one bit per pair of EUs */
	eu_en_fuse = ~(I915_READ(GEN11_EU_DISABLE) & GEN11_EU_DIS_MASK);
	for (eu = 0; eu < sseu->max_eus_per_subslice / 2; eu++)
		if (eu_en_fuse & BIT(eu))
			eu_en |= BIT(eu * 2) | BIT(eu * 2 + 1);

	gen11_compute_sseu_info(sseu, s_en, dss_en, eu_en);

	/* TGL only supports slice-level power gating */
	sseu->has_slice_pg = 1;
}

267 268
static void gen11_sseu_info_init(struct drm_i915_private *dev_priv)
{
269
	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
270
	u8 s_en;
271
	u32 ss_en;
272 273
	u8 eu_en;

274 275 276 277
	if (IS_ELKHARTLAKE(dev_priv))
		intel_sseu_set_info(sseu, 1, 4, 8);
	else
		intel_sseu_set_info(sseu, 1, 8, 8);
278 279 280 281 282

	s_en = I915_READ(GEN11_GT_SLICE_ENABLE) & GEN11_GT_S_ENA_MASK;
	ss_en = ~I915_READ(GEN11_GT_SUBSLICE_DISABLE);
	eu_en = ~(I915_READ(GEN11_EU_DISABLE) & GEN11_EU_DIS_MASK);

283
	gen11_compute_sseu_info(sseu, s_en, ss_en, eu_en);
284 285 286 287 288 289 290

	/* ICL has no power gating restrictions. */
	sseu->has_slice_pg = 1;
	sseu->has_subslice_pg = 1;
	sseu->has_eu_pg = 1;
}

291 292
static void gen10_sseu_info_init(struct drm_i915_private *dev_priv)
{
293
	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
294
	const u32 fuse2 = I915_READ(GEN8_FUSE2);
295 296 297
	int s, ss;
	const int eu_mask = 0xff;
	u32 subslice_mask, eu_en;
298

299 300
	intel_sseu_set_info(sseu, 6, 4, 8);

301 302
	sseu->slice_mask = (fuse2 & GEN10_F2_S_ENA_MASK) >>
			    GEN10_F2_S_ENA_SHIFT;
303

304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326
	/* Slice0 */
	eu_en = ~I915_READ(GEN8_EU_DISABLE0);
	for (ss = 0; ss < sseu->max_subslices; ss++)
		sseu_set_eus(sseu, 0, ss, (eu_en >> (8 * ss)) & eu_mask);
	/* Slice1 */
	sseu_set_eus(sseu, 1, 0, (eu_en >> 24) & eu_mask);
	eu_en = ~I915_READ(GEN8_EU_DISABLE1);
	sseu_set_eus(sseu, 1, 1, eu_en & eu_mask);
	/* Slice2 */
	sseu_set_eus(sseu, 2, 0, (eu_en >> 8) & eu_mask);
	sseu_set_eus(sseu, 2, 1, (eu_en >> 16) & eu_mask);
	/* Slice3 */
	sseu_set_eus(sseu, 3, 0, (eu_en >> 24) & eu_mask);
	eu_en = ~I915_READ(GEN8_EU_DISABLE2);
	sseu_set_eus(sseu, 3, 1, eu_en & eu_mask);
	/* Slice4 */
	sseu_set_eus(sseu, 4, 0, (eu_en >> 8) & eu_mask);
	sseu_set_eus(sseu, 4, 1, (eu_en >> 16) & eu_mask);
	/* Slice5 */
	sseu_set_eus(sseu, 5, 0, (eu_en >> 24) & eu_mask);
	eu_en = ~I915_READ(GEN10_EU_DISABLE3);
	sseu_set_eus(sseu, 5, 1, eu_en & eu_mask);

327 328 329 330
	subslice_mask = (1 << 4) - 1;
	subslice_mask &= ~((fuse2 & GEN10_F2_SS_DIS_MASK) >>
			   GEN10_F2_SS_DIS_SHIFT);

331
	for (s = 0; s < sseu->max_slices; s++) {
332 333
		u32 subslice_mask_with_eus = subslice_mask;

334 335
		for (ss = 0; ss < sseu->max_subslices; ss++) {
			if (sseu_get_eus(sseu, s, ss) == 0)
336
				subslice_mask_with_eus &= ~BIT(ss);
337
		}
338 339 340 341 342

		/*
		 * Slice0 can have up to 3 subslices, but there are only 2 in
		 * slice1/2.
		 */
343 344 345
		intel_sseu_set_subslices(sseu, s, s == 0 ?
						  subslice_mask_with_eus :
						  subslice_mask_with_eus & 0x3);
346 347 348
	}

	sseu->eu_total = compute_eu_total(sseu);
349 350 351 352 353 354 355

	/*
	 * CNL is expected to always have a uniform distribution
	 * of EU across subslices with the exception that any one
	 * EU in any one subslice may be fused off for die
	 * recovery.
	 */
356
	sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
357
				DIV_ROUND_UP(sseu->eu_total,
358 359
					     intel_sseu_subslice_total(sseu)) :
				0;
360 361 362 363 364 365 366

	/* No restrictions on Power Gating */
	sseu->has_slice_pg = 1;
	sseu->has_subslice_pg = 1;
	sseu->has_eu_pg = 1;
}

367 368
static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
{
369
	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
370
	u32 fuse;
371
	u8 subslice_mask = 0;
372 373 374

	fuse = I915_READ(CHV_FUSE_GT);

375
	sseu->slice_mask = BIT(0);
376
	intel_sseu_set_info(sseu, 1, 2, 8);
377 378

	if (!(fuse & CHV_FGT_DISABLE_SS0)) {
379 380 381 382 383 384
		u8 disabled_mask =
			((fuse & CHV_FGT_EU_DIS_SS0_R0_MASK) >>
			 CHV_FGT_EU_DIS_SS0_R0_SHIFT) |
			(((fuse & CHV_FGT_EU_DIS_SS0_R1_MASK) >>
			  CHV_FGT_EU_DIS_SS0_R1_SHIFT) << 4);

385
		subslice_mask |= BIT(0);
386
		sseu_set_eus(sseu, 0, 0, ~disabled_mask);
387 388 389
	}

	if (!(fuse & CHV_FGT_DISABLE_SS1)) {
390 391 392 393 394 395
		u8 disabled_mask =
			((fuse & CHV_FGT_EU_DIS_SS1_R0_MASK) >>
			 CHV_FGT_EU_DIS_SS1_R0_SHIFT) |
			(((fuse & CHV_FGT_EU_DIS_SS1_R1_MASK) >>
			  CHV_FGT_EU_DIS_SS1_R1_SHIFT) << 4);

396
		subslice_mask |= BIT(1);
397
		sseu_set_eus(sseu, 0, 1, ~disabled_mask);
398 399
	}

400
	intel_sseu_set_subslices(sseu, 0, subslice_mask);
401

402 403
	sseu->eu_total = compute_eu_total(sseu);

404 405 406 407
	/*
	 * CHV expected to always have a uniform distribution of EU
	 * across subslices.
	*/
408 409 410
	sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
				sseu->eu_total /
					intel_sseu_subslice_total(sseu) :
411 412 413 414 415 416
				0;
	/*
	 * CHV supports subslice power gating on devices with more than
	 * one subslice, and supports EU power gating on devices with
	 * more than one EU pair per subslice.
	*/
417
	sseu->has_slice_pg = 0;
418
	sseu->has_subslice_pg = intel_sseu_subslice_total(sseu) > 1;
419
	sseu->has_eu_pg = (sseu->eu_per_subslice > 2);
420 421 422 423 424
}

static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
{
	struct intel_device_info *info = mkwrite_device_info(dev_priv);
425
	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
426
	int s, ss;
427 428
	u32 fuse2, eu_disable, subslice_mask;
	const u8 eu_mask = 0xff;
429 430

	fuse2 = I915_READ(GEN8_FUSE2);
431
	sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
432

433
	/* BXT has a single slice and at most 3 subslices. */
434 435
	intel_sseu_set_info(sseu, IS_GEN9_LP(dev_priv) ? 1 : 3,
			    IS_GEN9_LP(dev_priv) ? 3 : 4, 8);
436

437 438 439 440
	/*
	 * The subslice disable field is global, i.e. it applies
	 * to each of the enabled slices.
	*/
441 442 443
	subslice_mask = (1 << sseu->max_subslices) - 1;
	subslice_mask &= ~((fuse2 & GEN9_F2_SS_DIS_MASK) >>
			   GEN9_F2_SS_DIS_SHIFT);
444 445 446 447 448

	/*
	 * Iterate through enabled slices and subslices to
	 * count the total enabled EU.
	*/
449
	for (s = 0; s < sseu->max_slices; s++) {
450
		if (!(sseu->slice_mask & BIT(s)))
451 452 453
			/* skip disabled slice */
			continue;

454
		intel_sseu_set_subslices(sseu, s, subslice_mask);
455

456
		eu_disable = I915_READ(GEN9_EU_DISABLE(s));
457
		for (ss = 0; ss < sseu->max_subslices; ss++) {
458
			int eu_per_ss;
459
			u8 eu_disabled_mask;
460

461
			if (!intel_sseu_has_subslice(sseu, s, ss))
462 463 464
				/* skip disabled subslice */
				continue;

465
			eu_disabled_mask = (eu_disable >> (ss * 8)) & eu_mask;
466 467 468 469 470

			sseu_set_eus(sseu, s, ss, ~eu_disabled_mask);

			eu_per_ss = sseu->max_eus_per_subslice -
				hweight8(eu_disabled_mask);
471 472 473 474 475 476 477

			/*
			 * Record which subslice(s) has(have) 7 EUs. we
			 * can tune the hash used to spread work among
			 * subslices if they are unbalanced.
			 */
			if (eu_per_ss == 7)
478
				sseu->subslice_7eu[s] |= BIT(ss);
479 480 481
		}
	}

482 483
	sseu->eu_total = compute_eu_total(sseu);

484 485 486 487 488 489 490
	/*
	 * SKL is expected to always have a uniform distribution
	 * of EU across subslices with the exception that any one
	 * EU in any one subslice may be fused off for die
	 * recovery. BXT is expected to be perfectly uniform in EU
	 * distribution.
	*/
491
	sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
492
				DIV_ROUND_UP(sseu->eu_total,
493 494
					     intel_sseu_subslice_total(sseu)) :
				0;
495
	/*
496
	 * SKL+ supports slice power gating on devices with more than
497
	 * one slice, and supports EU power gating on devices with
498
	 * more than one EU pair per subslice. BXT+ supports subslice
499 500 501 502
	 * power gating on devices with more than one subslice, and
	 * supports EU power gating on devices with more than one EU
	 * pair per subslice.
	*/
503
	sseu->has_slice_pg =
504
		!IS_GEN9_LP(dev_priv) && hweight8(sseu->slice_mask) > 1;
505
	sseu->has_subslice_pg =
506
		IS_GEN9_LP(dev_priv) && intel_sseu_subslice_total(sseu) > 1;
507
	sseu->has_eu_pg = sseu->eu_per_subslice > 2;
508

509
	if (IS_GEN9_LP(dev_priv)) {
510 511
#define IS_SS_DISABLED(ss)	(!(sseu->subslice_mask[0] & BIT(ss)))
		info->has_pooled_eu = hweight8(sseu->subslice_mask[0]) == 3;
512

513
		sseu->min_eu_in_pool = 0;
514
		if (info->has_pooled_eu) {
515
			if (IS_SS_DISABLED(2) || IS_SS_DISABLED(0))
516
				sseu->min_eu_in_pool = 3;
517
			else if (IS_SS_DISABLED(1))
518
				sseu->min_eu_in_pool = 6;
519
			else
520
				sseu->min_eu_in_pool = 9;
521 522 523 524 525
		}
#undef IS_SS_DISABLED
	}
}

526
static void bdw_sseu_info_init(struct drm_i915_private *dev_priv)
527
{
528
	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
529
	int s, ss;
530
	u32 fuse2, subslice_mask, eu_disable[3]; /* s_max */
531 532

	fuse2 = I915_READ(GEN8_FUSE2);
533
	sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
534
	intel_sseu_set_info(sseu, 3, 3, 8);
535

536 537 538 539
	/*
	 * The subslice disable field is global, i.e. it applies
	 * to each of the enabled slices.
	 */
540 541 542
	subslice_mask = GENMASK(sseu->max_subslices - 1, 0);
	subslice_mask &= ~((fuse2 & GEN8_F2_SS_DIS_MASK) >>
			   GEN8_F2_SS_DIS_SHIFT);
543 544 545 546 547 548 549 550 551 552 553 554 555

	eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK;
	eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) |
			((I915_READ(GEN8_EU_DISABLE1) & GEN8_EU_DIS1_S1_MASK) <<
			 (32 - GEN8_EU_DIS0_S1_SHIFT));
	eu_disable[2] = (I915_READ(GEN8_EU_DISABLE1) >> GEN8_EU_DIS1_S2_SHIFT) |
			((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) <<
			 (32 - GEN8_EU_DIS1_S2_SHIFT));

	/*
	 * Iterate through enabled slices and subslices to
	 * count the total enabled EU.
	 */
556
	for (s = 0; s < sseu->max_slices; s++) {
557
		if (!(sseu->slice_mask & BIT(s)))
558 559 560
			/* skip disabled slice */
			continue;

561
		intel_sseu_set_subslices(sseu, s, subslice_mask);
562 563 564

		for (ss = 0; ss < sseu->max_subslices; ss++) {
			u8 eu_disabled_mask;
565 566
			u32 n_disabled;

567
			if (!intel_sseu_has_subslice(sseu, s, ss))
568 569 570
				/* skip disabled subslice */
				continue;

571
			eu_disabled_mask =
572
				eu_disable[s] >> (ss * sseu->max_eus_per_subslice);
573 574 575 576

			sseu_set_eus(sseu, s, ss, ~eu_disabled_mask);

			n_disabled = hweight8(eu_disabled_mask);
577 578 579 580

			/*
			 * Record which subslices have 7 EUs.
			 */
581
			if (sseu->max_eus_per_subslice - n_disabled == 7)
582
				sseu->subslice_7eu[s] |= 1 << ss;
583 584 585
		}
	}

586 587
	sseu->eu_total = compute_eu_total(sseu);

588 589 590 591 592
	/*
	 * BDW is expected to always have a uniform distribution of EU across
	 * subslices with the exception that any one EU in any one subslice may
	 * be fused off for die recovery.
	 */
593
	sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
594
				DIV_ROUND_UP(sseu->eu_total,
595 596
					     intel_sseu_subslice_total(sseu)) :
				0;
597 598 599 600 601

	/*
	 * BDW supports slice power gating on devices with more than
	 * one slice.
	 */
602
	sseu->has_slice_pg = hweight8(sseu->slice_mask) > 1;
603 604
	sseu->has_subslice_pg = 0;
	sseu->has_eu_pg = 0;
605 606
}

607
static void hsw_sseu_info_init(struct drm_i915_private *dev_priv)
608
{
609
	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
610
	u32 fuse1;
611
	u8 subslice_mask = 0;
612
	int s, ss;
613 614 615 616 617

	/*
	 * There isn't a register to tell us how many slices/subslices. We
	 * work off the PCI-ids here.
	 */
618
	switch (INTEL_INFO(dev_priv)->gt) {
619
	default:
620
		MISSING_CASE(INTEL_INFO(dev_priv)->gt);
621 622 623
		/* fall through */
	case 1:
		sseu->slice_mask = BIT(0);
624
		subslice_mask = BIT(0);
625 626 627
		break;
	case 2:
		sseu->slice_mask = BIT(0);
628
		subslice_mask = BIT(0) | BIT(1);
629 630 631
		break;
	case 3:
		sseu->slice_mask = BIT(0) | BIT(1);
632
		subslice_mask = BIT(0) | BIT(1);
633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651
		break;
	}

	fuse1 = I915_READ(HSW_PAVP_FUSE1);
	switch ((fuse1 & HSW_F1_EU_DIS_MASK) >> HSW_F1_EU_DIS_SHIFT) {
	default:
		MISSING_CASE((fuse1 & HSW_F1_EU_DIS_MASK) >>
			     HSW_F1_EU_DIS_SHIFT);
		/* fall through */
	case HSW_F1_EU_DIS_10EUS:
		sseu->eu_per_subslice = 10;
		break;
	case HSW_F1_EU_DIS_8EUS:
		sseu->eu_per_subslice = 8;
		break;
	case HSW_F1_EU_DIS_6EUS:
		sseu->eu_per_subslice = 6;
		break;
	}
652 653

	intel_sseu_set_info(sseu, hweight8(sseu->slice_mask),
654
			    hweight8(subslice_mask),
655
			    sseu->eu_per_subslice);
656 657

	for (s = 0; s < sseu->max_slices; s++) {
658
		intel_sseu_set_subslices(sseu, s, subslice_mask);
659

660 661 662 663 664
		for (ss = 0; ss < sseu->max_subslices; ss++) {
			sseu_set_eus(sseu, s, ss,
				     (1UL << sseu->eu_per_subslice) - 1);
		}
	}
665

666
	sseu->eu_total = compute_eu_total(sseu);
667 668 669 670 671 672 673

	/* No powergating for you. */
	sseu->has_slice_pg = 0;
	sseu->has_subslice_pg = 0;
	sseu->has_eu_pg = 0;
}

L
Lionel Landwerlin 已提交
674
static u32 read_reference_ts_freq(struct drm_i915_private *dev_priv)
675 676
{
	u32 ts_override = I915_READ(GEN9_TIMESTAMP_OVERRIDE);
L
Lionel Landwerlin 已提交
677
	u32 base_freq, frac_freq;
678 679 680

	base_freq = ((ts_override & GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK) >>
		     GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_SHIFT) + 1;
681
	base_freq *= 1000000;
682 683 684 685

	frac_freq = ((ts_override &
		      GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_MASK) >>
		     GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_SHIFT);
686
	frac_freq = 1000000 / (frac_freq + 1);
687 688 689 690

	return base_freq + frac_freq;
}

691 692 693
static u32 gen10_get_crystal_clock_freq(struct drm_i915_private *dev_priv,
					u32 rpm_config_reg)
{
694 695
	u32 f19_2_mhz = 19200000;
	u32 f24_mhz = 24000000;
696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713
	u32 crystal_clock = (rpm_config_reg &
			     GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
			    GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;

	switch (crystal_clock) {
	case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
		return f19_2_mhz;
	case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
		return f24_mhz;
	default:
		MISSING_CASE(crystal_clock);
		return 0;
	}
}

static u32 gen11_get_crystal_clock_freq(struct drm_i915_private *dev_priv,
					u32 rpm_config_reg)
{
714 715 716 717
	u32 f19_2_mhz = 19200000;
	u32 f24_mhz = 24000000;
	u32 f25_mhz = 25000000;
	u32 f38_4_mhz = 38400000;
718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736
	u32 crystal_clock = (rpm_config_reg &
			     GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
			    GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;

	switch (crystal_clock) {
	case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
		return f24_mhz;
	case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
		return f19_2_mhz;
	case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_38_4_MHZ:
		return f38_4_mhz;
	case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ:
		return f25_mhz;
	default:
		MISSING_CASE(crystal_clock);
		return 0;
	}
}

L
Lionel Landwerlin 已提交
737
static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
738
{
739 740 741
	u32 f12_5_mhz = 12500000;
	u32 f19_2_mhz = 19200000;
	u32 f24_mhz = 24000000;
742 743 744 745 746 747 748 749

	if (INTEL_GEN(dev_priv) <= 4) {
		/* PRMs say:
		 *
		 *     "The value in this register increments once every 16
		 *      hclks." (through the “Clocking Configuration”
		 *      (“CLKCFG”) MCHBAR register)
		 */
750
		return RUNTIME_INFO(dev_priv)->rawclk_freq * 1000 / 16;
751 752 753 754 755 756 757 758 759 760
	} else if (INTEL_GEN(dev_priv) <= 8) {
		/* PRMs say:
		 *
		 *     "The PCU TSC counts 10ns increments; this timestamp
		 *      reflects bits 38:3 of the TSC (i.e. 80ns granularity,
		 *      rolling over every 1.5 hours).
		 */
		return f12_5_mhz;
	} else if (INTEL_GEN(dev_priv) <= 9) {
		u32 ctc_reg = I915_READ(CTC_MODE);
L
Lionel Landwerlin 已提交
761
		u32 freq = 0;
762 763 764 765 766 767 768 769 770 771 772 773 774 775 776

		if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
			freq = read_reference_ts_freq(dev_priv);
		} else {
			freq = IS_GEN9_LP(dev_priv) ? f19_2_mhz : f24_mhz;

			/* Now figure out how the command stream's timestamp
			 * register increments from this frequency (it might
			 * increment only every few clock cycle).
			 */
			freq >>= 3 - ((ctc_reg & CTC_SHIFT_PARAMETER_MASK) >>
				      CTC_SHIFT_PARAMETER_SHIFT);
		}

		return freq;
777
	} else if (INTEL_GEN(dev_priv) <= 12) {
778
		u32 ctc_reg = I915_READ(CTC_MODE);
L
Lionel Landwerlin 已提交
779
		u32 freq = 0;
780 781 782 783 784 785 786 787 788

		/* First figure out the reference frequency. There are 2 ways
		 * we can compute the frequency, either through the
		 * TIMESTAMP_OVERRIDE register or through RPM_CONFIG. CTC_MODE
		 * tells us which one we should use.
		 */
		if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
			freq = read_reference_ts_freq(dev_priv);
		} else {
789 790 791 792 793 794 795 796
			u32 rpm_config_reg = I915_READ(RPM_CONFIG0);

			if (INTEL_GEN(dev_priv) <= 10)
				freq = gen10_get_crystal_clock_freq(dev_priv,
								rpm_config_reg);
			else
				freq = gen11_get_crystal_clock_freq(dev_priv,
								rpm_config_reg);
797

798 799 800 801 802 803 804 805
			/* Now figure out how the command stream's timestamp
			 * register increments from this frequency (it might
			 * increment only every few clock cycle).
			 */
			freq >>= 3 - ((rpm_config_reg &
				       GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
				      GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT);
		}
806 807 808 809

		return freq;
	}

810
	MISSING_CASE("Unknown gen, unable to read command streamer timestamp frequency\n");
811 812 813
	return 0;
}

814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834
#undef INTEL_VGA_DEVICE
#define INTEL_VGA_DEVICE(id, info) (id)

static const u16 subplatform_ult_ids[] = {
	INTEL_HSW_ULT_GT1_IDS(0),
	INTEL_HSW_ULT_GT2_IDS(0),
	INTEL_HSW_ULT_GT3_IDS(0),
	INTEL_BDW_ULT_GT1_IDS(0),
	INTEL_BDW_ULT_GT2_IDS(0),
	INTEL_BDW_ULT_GT3_IDS(0),
	INTEL_BDW_ULT_RSVD_IDS(0),
	INTEL_SKL_ULT_GT1_IDS(0),
	INTEL_SKL_ULT_GT2_IDS(0),
	INTEL_SKL_ULT_GT3_IDS(0),
	INTEL_KBL_ULT_GT1_IDS(0),
	INTEL_KBL_ULT_GT2_IDS(0),
	INTEL_KBL_ULT_GT3_IDS(0),
	INTEL_CFL_U_GT2_IDS(0),
	INTEL_CFL_U_GT3_IDS(0),
	INTEL_WHL_U_GT1_IDS(0),
	INTEL_WHL_U_GT2_IDS(0),
835
	INTEL_WHL_U_GT3_IDS(0),
836 837
	INTEL_CML_U_GT1_IDS(0),
	INTEL_CML_U_GT2_IDS(0),
838 839 840 841 842 843 844 845 846 847 848 849
};

static const u16 subplatform_ulx_ids[] = {
	INTEL_HSW_ULX_GT1_IDS(0),
	INTEL_HSW_ULX_GT2_IDS(0),
	INTEL_BDW_ULX_GT1_IDS(0),
	INTEL_BDW_ULX_GT2_IDS(0),
	INTEL_BDW_ULX_GT3_IDS(0),
	INTEL_BDW_ULX_RSVD_IDS(0),
	INTEL_SKL_ULX_GT1_IDS(0),
	INTEL_SKL_ULX_GT2_IDS(0),
	INTEL_KBL_ULX_GT1_IDS(0),
850
	INTEL_KBL_ULX_GT2_IDS(0),
851
	INTEL_AML_KBL_GT2_IDS(0),
852
	INTEL_AML_CFL_GT2_IDS(0),
853 854 855 856
};

static const u16 subplatform_portf_ids[] = {
	INTEL_CNL_PORT_F_IDS(0),
857
	INTEL_ICL_PORT_F_IDS(0),
858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876
};

static bool find_devid(u16 id, const u16 *p, unsigned int num)
{
	for (; num; num--, p++) {
		if (*p == id)
			return true;
	}

	return false;
}

void intel_device_info_subplatform_init(struct drm_i915_private *i915)
{
	const struct intel_device_info *info = INTEL_INFO(i915);
	const struct intel_runtime_info *rinfo = RUNTIME_INFO(i915);
	const unsigned int pi = __platform_mask_index(rinfo, info->platform);
	const unsigned int pb = __platform_mask_bit(rinfo, info->platform);
	u16 devid = INTEL_DEVID(i915);
877
	u32 mask = 0;
878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902

	/* Make sure IS_<platform> checks are working. */
	RUNTIME_INFO(i915)->platform_mask[pi] = BIT(pb);

	/* Find and mark subplatform bits based on the PCI device id. */
	if (find_devid(devid, subplatform_ult_ids,
		       ARRAY_SIZE(subplatform_ult_ids))) {
		mask = BIT(INTEL_SUBPLATFORM_ULT);
	} else if (find_devid(devid, subplatform_ulx_ids,
			      ARRAY_SIZE(subplatform_ulx_ids))) {
		mask = BIT(INTEL_SUBPLATFORM_ULX);
		if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
			/* ULX machines are also considered ULT. */
			mask |= BIT(INTEL_SUBPLATFORM_ULT);
		}
	} else if (find_devid(devid, subplatform_portf_ids,
			      ARRAY_SIZE(subplatform_portf_ids))) {
		mask = BIT(INTEL_SUBPLATFORM_PORTF);
	}

	GEM_BUG_ON(mask & ~INTEL_SUBPLATFORM_BITS);

	RUNTIME_INFO(i915)->platform_mask[pi] |= mask;
}

903 904
/**
 * intel_device_info_runtime_init - initialize runtime info
905
 * @dev_priv: the i915 device
906
 *
907 908 909 910 911 912 913 914 915 916 917 918
 * Determine various intel_device_info fields at runtime.
 *
 * Use it when either:
 *   - it's judged too laborious to fill n static structures with the limit
 *     when a simple if statement does the job,
 *   - run-time checks (eg read fuse/strap registers) are needed.
 *
 * This function needs to be called:
 *   - after the MMIO has been setup as we are reading registers,
 *   - after the PCH has been detected,
 *   - before the first usage of the fields it can tweak.
 */
919
void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
920
{
921
	struct intel_device_info *info = mkwrite_device_info(dev_priv);
922
	struct intel_runtime_info *runtime = RUNTIME_INFO(dev_priv);
923 924
	enum pipe pipe;

925 926
	if (INTEL_GEN(dev_priv) >= 10) {
		for_each_pipe(dev_priv, pipe)
927
			runtime->num_scalers[pipe] = 2;
928
	} else if (IS_GEN(dev_priv, 9)) {
929 930 931
		runtime->num_scalers[PIPE_A] = 2;
		runtime->num_scalers[PIPE_B] = 2;
		runtime->num_scalers[PIPE_C] = 1;
932 933
	}

934
	BUILD_BUG_ON(BITS_PER_TYPE(intel_engine_mask_t) < I915_NUM_ENGINES);
935

936
	if (INTEL_GEN(dev_priv) >= 11)
937
		for_each_pipe(dev_priv, pipe)
938
			runtime->num_sprites[pipe] = 6;
939
	else if (IS_GEN(dev_priv, 10) || IS_GEMINILAKE(dev_priv))
940
		for_each_pipe(dev_priv, pipe)
941
			runtime->num_sprites[pipe] = 3;
942
	else if (IS_BROXTON(dev_priv)) {
943 944 945 946 947 948 949 950 951
		/*
		 * Skylake and Broxton currently don't expose the topmost plane as its
		 * use is exclusive with the legacy cursor and we only want to expose
		 * one of those, not both. Until we can safely expose the topmost plane
		 * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
		 * we don't expose the topmost plane at all to prevent ABI breakage
		 * down the line.
		 */

952 953 954
		runtime->num_sprites[PIPE_A] = 2;
		runtime->num_sprites[PIPE_B] = 2;
		runtime->num_sprites[PIPE_C] = 1;
955
	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
956
		for_each_pipe(dev_priv, pipe)
957
			runtime->num_sprites[pipe] = 2;
958
	} else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) {
959
		for_each_pipe(dev_priv, pipe)
960
			runtime->num_sprites[pipe] = 1;
961
	}
962

963 964
	if (HAS_DISPLAY(dev_priv) && IS_GEN_RANGE(dev_priv, 7, 8) &&
	    HAS_PCH_SPLIT(dev_priv)) {
965 966 967 968 969 970 971 972 973 974 975 976 977 978
		u32 fuse_strap = I915_READ(FUSE_STRAP);
		u32 sfuse_strap = I915_READ(SFUSE_STRAP);

		/*
		 * SFUSE_STRAP is supposed to have a bit signalling the display
		 * is fused off. Unfortunately it seems that, at least in
		 * certain cases, fused off display means that PCH display
		 * reads don't land anywhere. In that case, we read 0s.
		 *
		 * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK
		 * should be set when taking over after the firmware.
		 */
		if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
		    sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
979
		    (HAS_PCH_CPT(dev_priv) &&
980
		     !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
981 982
			drm_info(&dev_priv->drm,
				 "Display fused off, disabling\n");
983
			info->pipe_mask = 0;
984
			info->cpu_transcoder_mask = 0;
985
		} else if (fuse_strap & IVB_PIPE_C_DISABLE) {
986
			drm_info(&dev_priv->drm, "PipeC fused off\n");
987
			info->pipe_mask &= ~BIT(PIPE_C);
988
			info->cpu_transcoder_mask &= ~BIT(TRANSCODER_C);
989
		}
990
	} else if (HAS_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 9) {
991 992
		u32 dfsm = I915_READ(SKL_DFSM);

993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009
		if (dfsm & SKL_DFSM_PIPE_A_DISABLE) {
			info->pipe_mask &= ~BIT(PIPE_A);
			info->cpu_transcoder_mask &= ~BIT(TRANSCODER_A);
		}
		if (dfsm & SKL_DFSM_PIPE_B_DISABLE) {
			info->pipe_mask &= ~BIT(PIPE_B);
			info->cpu_transcoder_mask &= ~BIT(TRANSCODER_B);
		}
		if (dfsm & SKL_DFSM_PIPE_C_DISABLE) {
			info->pipe_mask &= ~BIT(PIPE_C);
			info->cpu_transcoder_mask &= ~BIT(TRANSCODER_C);
		}
		if (INTEL_GEN(dev_priv) >= 12 &&
		    (dfsm & TGL_DFSM_PIPE_D_DISABLE)) {
			info->pipe_mask &= ~BIT(PIPE_D);
			info->cpu_transcoder_mask &= ~BIT(TRANSCODER_D);
		}
1010 1011 1012

		if (dfsm & SKL_DFSM_DISPLAY_HDCP_DISABLE)
			info->display.has_hdcp = 0;
1013 1014 1015

		if (dfsm & SKL_DFSM_DISPLAY_PM_DISABLE)
			info->display.has_fbc = 0;
1016 1017 1018

		if (INTEL_GEN(dev_priv) >= 11 && (dfsm & ICL_DFSM_DMC_DISABLE))
			info->display.has_csr = 0;
1019 1020 1021 1022

		if (INTEL_GEN(dev_priv) >= 10 &&
		    (dfsm & CNL_DFSM_DISPLAY_DSC_DISABLE))
			info->display.has_dsc = 0;
1023 1024 1025
	}

	/* Initialize slice/subslice/EU info */
1026
	if (IS_HASWELL(dev_priv))
1027
		hsw_sseu_info_init(dev_priv);
1028
	else if (IS_CHERRYVIEW(dev_priv))
1029 1030
		cherryview_sseu_info_init(dev_priv);
	else if (IS_BROADWELL(dev_priv))
1031
		bdw_sseu_info_init(dev_priv);
1032
	else if (IS_GEN(dev_priv, 9))
1033
		gen9_sseu_info_init(dev_priv);
1034
	else if (IS_GEN(dev_priv, 10))
1035
		gen10_sseu_info_init(dev_priv);
1036
	else if (IS_GEN(dev_priv, 11))
1037
		gen11_sseu_info_init(dev_priv);
1038 1039
	else if (INTEL_GEN(dev_priv) >= 12)
		gen12_sseu_info_init(dev_priv);
1040

1041
	if (IS_GEN(dev_priv, 6) && intel_vtd_active()) {
1042 1043
		drm_info(&dev_priv->drm,
			 "Disabling ppGTT for VT-d support\n");
1044
		info->ppgtt_type = INTEL_PPGTT_NONE;
1045 1046
	}

1047 1048 1049
	runtime->rawclk_freq = intel_read_rawclk(dev_priv);
	drm_dbg(&dev_priv->drm, "rawclk rate: %d kHz\n", runtime->rawclk_freq);

1050
	/* Initialize command stream timestamp frequency */
1051
	runtime->cs_timestamp_frequency_hz =
1052
		read_timestamp_frequency(dev_priv);
1053
	if (runtime->cs_timestamp_frequency_hz) {
1054
		runtime->cs_timestamp_period_ns =
1055
			div_u64(1e9, runtime->cs_timestamp_frequency_hz);
1056 1057 1058 1059 1060 1061
		drm_dbg(&dev_priv->drm,
			"CS timestamp wraparound in %lldms\n",
			div_u64(mul_u32_u32(runtime->cs_timestamp_period_ns,
					    S32_MAX),
				USEC_PER_SEC));
	}
1062
}
1063 1064 1065 1066

void intel_driver_caps_print(const struct intel_driver_caps *caps,
			     struct drm_printer *p)
{
1067 1068
	drm_printf(p, "Has logical contexts? %s\n",
		   yesno(caps->has_logical_contexts));
1069 1070
	drm_printf(p, "scheduler: %x\n", caps->scheduler);
}
1071 1072 1073 1074 1075 1076 1077 1078 1079 1080

/*
 * Determine which engines are fused off in our particular hardware. Since the
 * fuse register is in the blitter powerwell, we need forcewake to be ready at
 * this point (but later we need to prune the forcewake domains for engines that
 * are indeed fused off).
 */
void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
{
	struct intel_device_info *info = mkwrite_device_info(dev_priv);
1081
	unsigned int logical_vdbox = 0;
1082
	unsigned int i;
1083
	u32 media_fuse;
1084 1085
	u16 vdbox_mask;
	u16 vebox_mask;
1086 1087 1088 1089

	if (INTEL_GEN(dev_priv) < 11)
		return;

1090
	media_fuse = ~I915_READ(GEN11_GT_VEBOX_VDBOX_DISABLE);
1091

1092 1093 1094
	vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
	vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
		      GEN11_GT_VEBOX_DISABLE_SHIFT;
1095 1096

	for (i = 0; i < I915_MAX_VCS; i++) {
1097 1098
		if (!HAS_ENGINE(dev_priv, _VCS(i))) {
			vdbox_mask &= ~BIT(i);
1099
			continue;
1100
		}
1101

1102
		if (!(BIT(i) & vdbox_mask)) {
1103
			info->engine_mask &= ~BIT(_VCS(i));
1104
			drm_dbg(&dev_priv->drm, "vcs%u fused off\n", i);
1105
			continue;
1106
		}
1107 1108 1109 1110

		/*
		 * In Gen11, only even numbered logical VDBOXes are
		 * hooked up to an SFC (Scaler & Format Converter) unit.
1111
		 * In TGL each VDBOX has access to an SFC.
1112
		 */
1113
		if (INTEL_GEN(dev_priv) >= 12 || logical_vdbox++ % 2 == 0)
1114
			RUNTIME_INFO(dev_priv)->vdbox_sfc_access |= BIT(i);
1115
	}
1116 1117
	drm_dbg(&dev_priv->drm, "vdbox enable: %04x, instances: %04lx\n",
		vdbox_mask, VDBOX_MASK(dev_priv));
1118
	GEM_BUG_ON(vdbox_mask != VDBOX_MASK(dev_priv));
1119 1120

	for (i = 0; i < I915_MAX_VECS; i++) {
1121 1122
		if (!HAS_ENGINE(dev_priv, _VECS(i))) {
			vebox_mask &= ~BIT(i);
1123
			continue;
1124
		}
1125

1126
		if (!(BIT(i) & vebox_mask)) {
1127
			info->engine_mask &= ~BIT(_VECS(i));
1128
			drm_dbg(&dev_priv->drm, "vecs%u fused off\n", i);
1129
		}
1130
	}
1131 1132
	drm_dbg(&dev_priv->drm, "vebox enable: %04x, instances: %04lx\n",
		vebox_mask, VEBOX_MASK(dev_priv));
1133
	GEM_BUG_ON(vebox_mask != VEBOX_MASK(dev_priv));
1134
}