intel_device_info.c 31.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 * Copyright © 2016 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 */

25 26
#include <drm/drm_print.h>

27
#include "intel_device_info.h"
28 29
#include "i915_drv.h"

30 31 32 33 34 35 36 37 38 39 40 41
#define PLATFORM_NAME(x) [INTEL_##x] = #x
static const char * const platform_names[] = {
	PLATFORM_NAME(I830),
	PLATFORM_NAME(I845G),
	PLATFORM_NAME(I85X),
	PLATFORM_NAME(I865G),
	PLATFORM_NAME(I915G),
	PLATFORM_NAME(I915GM),
	PLATFORM_NAME(I945G),
	PLATFORM_NAME(I945GM),
	PLATFORM_NAME(G33),
	PLATFORM_NAME(PINEVIEW),
42 43
	PLATFORM_NAME(I965G),
	PLATFORM_NAME(I965GM),
44 45
	PLATFORM_NAME(G45),
	PLATFORM_NAME(GM45),
46 47 48 49 50 51 52 53 54 55 56
	PLATFORM_NAME(IRONLAKE),
	PLATFORM_NAME(SANDYBRIDGE),
	PLATFORM_NAME(IVYBRIDGE),
	PLATFORM_NAME(VALLEYVIEW),
	PLATFORM_NAME(HASWELL),
	PLATFORM_NAME(BROADWELL),
	PLATFORM_NAME(CHERRYVIEW),
	PLATFORM_NAME(SKYLAKE),
	PLATFORM_NAME(BROXTON),
	PLATFORM_NAME(KABYLAKE),
	PLATFORM_NAME(GEMINILAKE),
57
	PLATFORM_NAME(COFFEELAKE),
58
	PLATFORM_NAME(CANNONLAKE),
59
	PLATFORM_NAME(ICELAKE),
60
	PLATFORM_NAME(ELKHARTLAKE),
61
	PLATFORM_NAME(TIGERLAKE),
62 63 64 65 66
};
#undef PLATFORM_NAME

const char *intel_platform_name(enum intel_platform platform)
{
67 68
	BUILD_BUG_ON(ARRAY_SIZE(platform_names) != INTEL_MAX_PLATFORMS);

69 70 71 72 73 74 75
	if (WARN_ON_ONCE(platform >= ARRAY_SIZE(platform_names) ||
			 platform_names[platform] == NULL))
		return "<unknown>";

	return platform_names[platform];
}

76
static const char *iommu_name(void)
77
{
78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
	const char *msg = "n/a";

#ifdef CONFIG_INTEL_IOMMU
	msg = enableddisabled(intel_iommu_gfx_mapped);
#endif

	return msg;
}

void intel_device_info_print_static(const struct intel_device_info *info,
				    struct drm_printer *p)
{
	drm_printf(p, "engines: %x\n", info->engine_mask);
	drm_printf(p, "gen: %d\n", info->gen);
	drm_printf(p, "gt: %d\n", info->gt);
	drm_printf(p, "iommu: %s\n", iommu_name());
	drm_printf(p, "memory-regions: %x\n", info->memory_regions);
	drm_printf(p, "page-sizes: %x\n", info->page_sizes);
	drm_printf(p, "platform: %s\n", intel_platform_name(info->platform));
	drm_printf(p, "ppgtt-size: %d\n", info->ppgtt_size);
	drm_printf(p, "ppgtt-type: %d\n", info->ppgtt_type);

100 101 102
#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->name));
	DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
#undef PRINT_FLAG
103 104 105 106

#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->display.name));
	DEV_INFO_DISPLAY_FOR_EACH_FLAG(PRINT_FLAG);
#undef PRINT_FLAG
107 108
}

109 110
static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p)
{
111 112
	int s;

113 114
	drm_printf(p, "slice total: %u, mask=%04x\n",
		   hweight8(sseu->slice_mask), sseu->slice_mask);
115
	drm_printf(p, "subslice total: %u\n", intel_sseu_subslice_total(sseu));
116
	for (s = 0; s < sseu->max_slices; s++) {
S
Stuart Summers 已提交
117
		drm_printf(p, "slice%d: %u subslices, mask=%08x\n",
118
			   s, intel_sseu_subslices_per_slice(sseu, s),
S
Stuart Summers 已提交
119
			   intel_sseu_get_subslices(sseu, s));
120
	}
121 122 123 124 125 126 127 128 129
	drm_printf(p, "EU total: %u\n", sseu->eu_total);
	drm_printf(p, "EU per subslice: %u\n", sseu->eu_per_subslice);
	drm_printf(p, "has slice power gating: %s\n",
		   yesno(sseu->has_slice_pg));
	drm_printf(p, "has subslice power gating: %s\n",
		   yesno(sseu->has_subslice_pg));
	drm_printf(p, "has EU power gating: %s\n", yesno(sseu->has_eu_pg));
}

130 131
void intel_device_info_print_runtime(const struct intel_runtime_info *info,
				     struct drm_printer *p)
132 133 134 135 136 137 138
{
	sseu_dump(&info->sseu, p);

	drm_printf(p, "CS timestamp frequency: %u kHz\n",
		   info->cs_timestamp_frequency_khz);
}

139 140 141
static int sseu_eu_idx(const struct sseu_dev_info *sseu, int slice,
		       int subslice)
{
142
	int slice_stride = sseu->max_subslices * sseu->eu_stride;
143

144
	return slice * slice_stride + subslice * sseu->eu_stride;
145 146 147 148 149 150 151 152
}

static u16 sseu_get_eus(const struct sseu_dev_info *sseu, int slice,
			int subslice)
{
	int i, offset = sseu_eu_idx(sseu, slice, subslice);
	u16 eu_mask = 0;

153
	for (i = 0; i < sseu->eu_stride; i++) {
154 155 156 157 158 159 160 161 162 163 164 165
		eu_mask |= ((u16)sseu->eu_mask[offset + i]) <<
			(i * BITS_PER_BYTE);
	}

	return eu_mask;
}

static void sseu_set_eus(struct sseu_dev_info *sseu, int slice, int subslice,
			 u16 eu_mask)
{
	int i, offset = sseu_eu_idx(sseu, slice, subslice);

166
	for (i = 0; i < sseu->eu_stride; i++) {
167 168 169 170 171
		sseu->eu_mask[offset + i] =
			(eu_mask >> (BITS_PER_BYTE * i)) & 0xff;
	}
}

172 173
void intel_device_info_print_topology(const struct sseu_dev_info *sseu,
				      struct drm_printer *p)
174 175 176 177 178 179 180 181 182
{
	int s, ss;

	if (sseu->max_slices == 0) {
		drm_printf(p, "Unavailable\n");
		return;
	}

	for (s = 0; s < sseu->max_slices; s++) {
S
Stuart Summers 已提交
183
		drm_printf(p, "slice%d: %u subslice(s) (0x%08x):\n",
184
			   s, intel_sseu_subslices_per_slice(sseu, s),
S
Stuart Summers 已提交
185
			   intel_sseu_get_subslices(sseu, s));
186 187 188 189 190 191 192 193 194 195

		for (ss = 0; ss < sseu->max_subslices; ss++) {
			u16 enabled_eus = sseu_get_eus(sseu, s, ss);

			drm_printf(p, "\tsubslice%d: %u EUs (0x%hx)\n",
				   ss, hweight16(enabled_eus), enabled_eus);
		}
	}
}

196 197 198 199 200 201 202 203 204 205
static u16 compute_eu_total(const struct sseu_dev_info *sseu)
{
	u16 i, total = 0;

	for (i = 0; i < ARRAY_SIZE(sseu->eu_mask); i++)
		total += hweight8(sseu->eu_mask[i]);

	return total;
}

206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262
static void gen11_compute_sseu_info(struct sseu_dev_info *sseu,
				    u8 s_en, u32 ss_en, u16 eu_en)
{
	int s, ss;

	/* ss_en represents entire subslice mask across all slices */
	GEM_BUG_ON(sseu->max_slices * sseu->max_subslices >
		   sizeof(ss_en) * BITS_PER_BYTE);

	for (s = 0; s < sseu->max_slices; s++) {
		if ((s_en & BIT(s)) == 0)
			continue;

		sseu->slice_mask |= BIT(s);

		intel_sseu_set_subslices(sseu, s, ss_en);

		for (ss = 0; ss < sseu->max_subslices; ss++)
			if (intel_sseu_has_subslice(sseu, s, ss))
				sseu_set_eus(sseu, s, ss, eu_en);
	}
	sseu->eu_per_subslice = hweight16(eu_en);
	sseu->eu_total = compute_eu_total(sseu);
}

static void gen12_sseu_info_init(struct drm_i915_private *dev_priv)
{
	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
	u8 s_en;
	u32 dss_en;
	u16 eu_en = 0;
	u8 eu_en_fuse;
	int eu;

	/*
	 * Gen12 has Dual-Subslices, which behave similarly to 2 gen11 SS.
	 * Instead of splitting these, provide userspace with an array
	 * of DSS to more closely represent the hardware resource.
	 */
	intel_sseu_set_info(sseu, 1, 6, 16);

	s_en = I915_READ(GEN11_GT_SLICE_ENABLE) & GEN11_GT_S_ENA_MASK;

	dss_en = I915_READ(GEN12_GT_DSS_ENABLE);

	/* one bit per pair of EUs */
	eu_en_fuse = ~(I915_READ(GEN11_EU_DISABLE) & GEN11_EU_DIS_MASK);
	for (eu = 0; eu < sseu->max_eus_per_subslice / 2; eu++)
		if (eu_en_fuse & BIT(eu))
			eu_en |= BIT(eu * 2) | BIT(eu * 2 + 1);

	gen11_compute_sseu_info(sseu, s_en, dss_en, eu_en);

	/* TGL only supports slice-level power gating */
	sseu->has_slice_pg = 1;
}

263 264
static void gen11_sseu_info_init(struct drm_i915_private *dev_priv)
{
265
	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
266
	u8 s_en;
267
	u32 ss_en;
268 269
	u8 eu_en;

270 271 272 273
	if (IS_ELKHARTLAKE(dev_priv))
		intel_sseu_set_info(sseu, 1, 4, 8);
	else
		intel_sseu_set_info(sseu, 1, 8, 8);
274 275 276 277 278

	s_en = I915_READ(GEN11_GT_SLICE_ENABLE) & GEN11_GT_S_ENA_MASK;
	ss_en = ~I915_READ(GEN11_GT_SUBSLICE_DISABLE);
	eu_en = ~(I915_READ(GEN11_EU_DISABLE) & GEN11_EU_DIS_MASK);

279
	gen11_compute_sseu_info(sseu, s_en, ss_en, eu_en);
280 281 282 283 284 285 286

	/* ICL has no power gating restrictions. */
	sseu->has_slice_pg = 1;
	sseu->has_subslice_pg = 1;
	sseu->has_eu_pg = 1;
}

287 288
static void gen10_sseu_info_init(struct drm_i915_private *dev_priv)
{
289
	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
290
	const u32 fuse2 = I915_READ(GEN8_FUSE2);
291 292 293
	int s, ss;
	const int eu_mask = 0xff;
	u32 subslice_mask, eu_en;
294

295 296
	intel_sseu_set_info(sseu, 6, 4, 8);

297 298
	sseu->slice_mask = (fuse2 & GEN10_F2_S_ENA_MASK) >>
			    GEN10_F2_S_ENA_SHIFT;
299

300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322
	/* Slice0 */
	eu_en = ~I915_READ(GEN8_EU_DISABLE0);
	for (ss = 0; ss < sseu->max_subslices; ss++)
		sseu_set_eus(sseu, 0, ss, (eu_en >> (8 * ss)) & eu_mask);
	/* Slice1 */
	sseu_set_eus(sseu, 1, 0, (eu_en >> 24) & eu_mask);
	eu_en = ~I915_READ(GEN8_EU_DISABLE1);
	sseu_set_eus(sseu, 1, 1, eu_en & eu_mask);
	/* Slice2 */
	sseu_set_eus(sseu, 2, 0, (eu_en >> 8) & eu_mask);
	sseu_set_eus(sseu, 2, 1, (eu_en >> 16) & eu_mask);
	/* Slice3 */
	sseu_set_eus(sseu, 3, 0, (eu_en >> 24) & eu_mask);
	eu_en = ~I915_READ(GEN8_EU_DISABLE2);
	sseu_set_eus(sseu, 3, 1, eu_en & eu_mask);
	/* Slice4 */
	sseu_set_eus(sseu, 4, 0, (eu_en >> 8) & eu_mask);
	sseu_set_eus(sseu, 4, 1, (eu_en >> 16) & eu_mask);
	/* Slice5 */
	sseu_set_eus(sseu, 5, 0, (eu_en >> 24) & eu_mask);
	eu_en = ~I915_READ(GEN10_EU_DISABLE3);
	sseu_set_eus(sseu, 5, 1, eu_en & eu_mask);

323 324 325 326
	subslice_mask = (1 << 4) - 1;
	subslice_mask &= ~((fuse2 & GEN10_F2_SS_DIS_MASK) >>
			   GEN10_F2_SS_DIS_SHIFT);

327
	for (s = 0; s < sseu->max_slices; s++) {
328 329
		u32 subslice_mask_with_eus = subslice_mask;

330 331
		for (ss = 0; ss < sseu->max_subslices; ss++) {
			if (sseu_get_eus(sseu, s, ss) == 0)
332
				subslice_mask_with_eus &= ~BIT(ss);
333
		}
334 335 336 337 338

		/*
		 * Slice0 can have up to 3 subslices, but there are only 2 in
		 * slice1/2.
		 */
339 340 341
		intel_sseu_set_subslices(sseu, s, s == 0 ?
						  subslice_mask_with_eus :
						  subslice_mask_with_eus & 0x3);
342 343 344
	}

	sseu->eu_total = compute_eu_total(sseu);
345 346 347 348 349 350 351

	/*
	 * CNL is expected to always have a uniform distribution
	 * of EU across subslices with the exception that any one
	 * EU in any one subslice may be fused off for die
	 * recovery.
	 */
352
	sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
353
				DIV_ROUND_UP(sseu->eu_total,
354 355
					     intel_sseu_subslice_total(sseu)) :
				0;
356 357 358 359 360 361 362

	/* No restrictions on Power Gating */
	sseu->has_slice_pg = 1;
	sseu->has_subslice_pg = 1;
	sseu->has_eu_pg = 1;
}

363 364
static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
{
365
	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
366
	u32 fuse;
367
	u8 subslice_mask = 0;
368 369 370

	fuse = I915_READ(CHV_FUSE_GT);

371
	sseu->slice_mask = BIT(0);
372
	intel_sseu_set_info(sseu, 1, 2, 8);
373 374

	if (!(fuse & CHV_FGT_DISABLE_SS0)) {
375 376 377 378 379 380
		u8 disabled_mask =
			((fuse & CHV_FGT_EU_DIS_SS0_R0_MASK) >>
			 CHV_FGT_EU_DIS_SS0_R0_SHIFT) |
			(((fuse & CHV_FGT_EU_DIS_SS0_R1_MASK) >>
			  CHV_FGT_EU_DIS_SS0_R1_SHIFT) << 4);

381
		subslice_mask |= BIT(0);
382
		sseu_set_eus(sseu, 0, 0, ~disabled_mask);
383 384 385
	}

	if (!(fuse & CHV_FGT_DISABLE_SS1)) {
386 387 388 389 390 391
		u8 disabled_mask =
			((fuse & CHV_FGT_EU_DIS_SS1_R0_MASK) >>
			 CHV_FGT_EU_DIS_SS1_R0_SHIFT) |
			(((fuse & CHV_FGT_EU_DIS_SS1_R1_MASK) >>
			  CHV_FGT_EU_DIS_SS1_R1_SHIFT) << 4);

392
		subslice_mask |= BIT(1);
393
		sseu_set_eus(sseu, 0, 1, ~disabled_mask);
394 395
	}

396
	intel_sseu_set_subslices(sseu, 0, subslice_mask);
397

398 399
	sseu->eu_total = compute_eu_total(sseu);

400 401 402 403
	/*
	 * CHV expected to always have a uniform distribution of EU
	 * across subslices.
	*/
404 405 406
	sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
				sseu->eu_total /
					intel_sseu_subslice_total(sseu) :
407 408 409 410 411 412
				0;
	/*
	 * CHV supports subslice power gating on devices with more than
	 * one subslice, and supports EU power gating on devices with
	 * more than one EU pair per subslice.
	*/
413
	sseu->has_slice_pg = 0;
414
	sseu->has_subslice_pg = intel_sseu_subslice_total(sseu) > 1;
415
	sseu->has_eu_pg = (sseu->eu_per_subslice > 2);
416 417 418 419 420
}

static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
{
	struct intel_device_info *info = mkwrite_device_info(dev_priv);
421
	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
422
	int s, ss;
423 424
	u32 fuse2, eu_disable, subslice_mask;
	const u8 eu_mask = 0xff;
425 426

	fuse2 = I915_READ(GEN8_FUSE2);
427
	sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
428

429
	/* BXT has a single slice and at most 3 subslices. */
430 431
	intel_sseu_set_info(sseu, IS_GEN9_LP(dev_priv) ? 1 : 3,
			    IS_GEN9_LP(dev_priv) ? 3 : 4, 8);
432

433 434 435 436
	/*
	 * The subslice disable field is global, i.e. it applies
	 * to each of the enabled slices.
	*/
437 438 439
	subslice_mask = (1 << sseu->max_subslices) - 1;
	subslice_mask &= ~((fuse2 & GEN9_F2_SS_DIS_MASK) >>
			   GEN9_F2_SS_DIS_SHIFT);
440 441 442 443 444

	/*
	 * Iterate through enabled slices and subslices to
	 * count the total enabled EU.
	*/
445
	for (s = 0; s < sseu->max_slices; s++) {
446
		if (!(sseu->slice_mask & BIT(s)))
447 448 449
			/* skip disabled slice */
			continue;

450
		intel_sseu_set_subslices(sseu, s, subslice_mask);
451

452
		eu_disable = I915_READ(GEN9_EU_DISABLE(s));
453
		for (ss = 0; ss < sseu->max_subslices; ss++) {
454
			int eu_per_ss;
455
			u8 eu_disabled_mask;
456

457
			if (!intel_sseu_has_subslice(sseu, s, ss))
458 459 460
				/* skip disabled subslice */
				continue;

461
			eu_disabled_mask = (eu_disable >> (ss * 8)) & eu_mask;
462 463 464 465 466

			sseu_set_eus(sseu, s, ss, ~eu_disabled_mask);

			eu_per_ss = sseu->max_eus_per_subslice -
				hweight8(eu_disabled_mask);
467 468 469 470 471 472 473

			/*
			 * Record which subslice(s) has(have) 7 EUs. we
			 * can tune the hash used to spread work among
			 * subslices if they are unbalanced.
			 */
			if (eu_per_ss == 7)
474
				sseu->subslice_7eu[s] |= BIT(ss);
475 476 477
		}
	}

478 479
	sseu->eu_total = compute_eu_total(sseu);

480 481 482 483 484 485 486
	/*
	 * SKL is expected to always have a uniform distribution
	 * of EU across subslices with the exception that any one
	 * EU in any one subslice may be fused off for die
	 * recovery. BXT is expected to be perfectly uniform in EU
	 * distribution.
	*/
487
	sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
488
				DIV_ROUND_UP(sseu->eu_total,
489 490
					     intel_sseu_subslice_total(sseu)) :
				0;
491
	/*
492
	 * SKL+ supports slice power gating on devices with more than
493
	 * one slice, and supports EU power gating on devices with
494
	 * more than one EU pair per subslice. BXT+ supports subslice
495 496 497 498
	 * power gating on devices with more than one subslice, and
	 * supports EU power gating on devices with more than one EU
	 * pair per subslice.
	*/
499
	sseu->has_slice_pg =
500
		!IS_GEN9_LP(dev_priv) && hweight8(sseu->slice_mask) > 1;
501
	sseu->has_subslice_pg =
502
		IS_GEN9_LP(dev_priv) && intel_sseu_subslice_total(sseu) > 1;
503
	sseu->has_eu_pg = sseu->eu_per_subslice > 2;
504

505
	if (IS_GEN9_LP(dev_priv)) {
506 507
#define IS_SS_DISABLED(ss)	(!(sseu->subslice_mask[0] & BIT(ss)))
		info->has_pooled_eu = hweight8(sseu->subslice_mask[0]) == 3;
508

509
		sseu->min_eu_in_pool = 0;
510
		if (info->has_pooled_eu) {
511
			if (IS_SS_DISABLED(2) || IS_SS_DISABLED(0))
512
				sseu->min_eu_in_pool = 3;
513
			else if (IS_SS_DISABLED(1))
514
				sseu->min_eu_in_pool = 6;
515
			else
516
				sseu->min_eu_in_pool = 9;
517 518 519 520 521 522 523
		}
#undef IS_SS_DISABLED
	}
}

static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
{
524
	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
525
	int s, ss;
526
	u32 fuse2, subslice_mask, eu_disable[3]; /* s_max */
527 528

	fuse2 = I915_READ(GEN8_FUSE2);
529
	sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
530
	intel_sseu_set_info(sseu, 3, 3, 8);
531

532 533 534 535
	/*
	 * The subslice disable field is global, i.e. it applies
	 * to each of the enabled slices.
	 */
536 537 538
	subslice_mask = GENMASK(sseu->max_subslices - 1, 0);
	subslice_mask &= ~((fuse2 & GEN8_F2_SS_DIS_MASK) >>
			   GEN8_F2_SS_DIS_SHIFT);
539 540 541 542 543 544 545 546 547 548 549 550 551

	eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK;
	eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) |
			((I915_READ(GEN8_EU_DISABLE1) & GEN8_EU_DIS1_S1_MASK) <<
			 (32 - GEN8_EU_DIS0_S1_SHIFT));
	eu_disable[2] = (I915_READ(GEN8_EU_DISABLE1) >> GEN8_EU_DIS1_S2_SHIFT) |
			((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) <<
			 (32 - GEN8_EU_DIS1_S2_SHIFT));

	/*
	 * Iterate through enabled slices and subslices to
	 * count the total enabled EU.
	 */
552
	for (s = 0; s < sseu->max_slices; s++) {
553
		if (!(sseu->slice_mask & BIT(s)))
554 555 556
			/* skip disabled slice */
			continue;

557
		intel_sseu_set_subslices(sseu, s, subslice_mask);
558 559 560

		for (ss = 0; ss < sseu->max_subslices; ss++) {
			u8 eu_disabled_mask;
561 562
			u32 n_disabled;

563
			if (!intel_sseu_has_subslice(sseu, s, ss))
564 565 566
				/* skip disabled subslice */
				continue;

567
			eu_disabled_mask =
568
				eu_disable[s] >> (ss * sseu->max_eus_per_subslice);
569 570 571 572

			sseu_set_eus(sseu, s, ss, ~eu_disabled_mask);

			n_disabled = hweight8(eu_disabled_mask);
573 574 575 576

			/*
			 * Record which subslices have 7 EUs.
			 */
577
			if (sseu->max_eus_per_subslice - n_disabled == 7)
578
				sseu->subslice_7eu[s] |= 1 << ss;
579 580 581
		}
	}

582 583
	sseu->eu_total = compute_eu_total(sseu);

584 585 586 587 588
	/*
	 * BDW is expected to always have a uniform distribution of EU across
	 * subslices with the exception that any one EU in any one subslice may
	 * be fused off for die recovery.
	 */
589
	sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
590
				DIV_ROUND_UP(sseu->eu_total,
591 592
					     intel_sseu_subslice_total(sseu)) :
				0;
593 594 595 596 597

	/*
	 * BDW supports slice power gating on devices with more than
	 * one slice.
	 */
598
	sseu->has_slice_pg = hweight8(sseu->slice_mask) > 1;
599 600
	sseu->has_subslice_pg = 0;
	sseu->has_eu_pg = 0;
601 602
}

603 604
static void haswell_sseu_info_init(struct drm_i915_private *dev_priv)
{
605
	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
606
	u32 fuse1;
607
	u8 subslice_mask = 0;
608
	int s, ss;
609 610 611 612 613

	/*
	 * There isn't a register to tell us how many slices/subslices. We
	 * work off the PCI-ids here.
	 */
614
	switch (INTEL_INFO(dev_priv)->gt) {
615
	default:
616
		MISSING_CASE(INTEL_INFO(dev_priv)->gt);
617 618 619
		/* fall through */
	case 1:
		sseu->slice_mask = BIT(0);
620
		subslice_mask = BIT(0);
621 622 623
		break;
	case 2:
		sseu->slice_mask = BIT(0);
624
		subslice_mask = BIT(0) | BIT(1);
625 626 627
		break;
	case 3:
		sseu->slice_mask = BIT(0) | BIT(1);
628
		subslice_mask = BIT(0) | BIT(1);
629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647
		break;
	}

	fuse1 = I915_READ(HSW_PAVP_FUSE1);
	switch ((fuse1 & HSW_F1_EU_DIS_MASK) >> HSW_F1_EU_DIS_SHIFT) {
	default:
		MISSING_CASE((fuse1 & HSW_F1_EU_DIS_MASK) >>
			     HSW_F1_EU_DIS_SHIFT);
		/* fall through */
	case HSW_F1_EU_DIS_10EUS:
		sseu->eu_per_subslice = 10;
		break;
	case HSW_F1_EU_DIS_8EUS:
		sseu->eu_per_subslice = 8;
		break;
	case HSW_F1_EU_DIS_6EUS:
		sseu->eu_per_subslice = 6;
		break;
	}
648 649

	intel_sseu_set_info(sseu, hweight8(sseu->slice_mask),
650
			    hweight8(subslice_mask),
651
			    sseu->eu_per_subslice);
652 653

	for (s = 0; s < sseu->max_slices; s++) {
654
		intel_sseu_set_subslices(sseu, s, subslice_mask);
655

656 657 658 659 660
		for (ss = 0; ss < sseu->max_subslices; ss++) {
			sseu_set_eus(sseu, s, ss,
				     (1UL << sseu->eu_per_subslice) - 1);
		}
	}
661

662
	sseu->eu_total = compute_eu_total(sseu);
663 664 665 666 667 668 669

	/* No powergating for you. */
	sseu->has_slice_pg = 0;
	sseu->has_subslice_pg = 0;
	sseu->has_eu_pg = 0;
}

L
Lionel Landwerlin 已提交
670
static u32 read_reference_ts_freq(struct drm_i915_private *dev_priv)
671 672
{
	u32 ts_override = I915_READ(GEN9_TIMESTAMP_OVERRIDE);
L
Lionel Landwerlin 已提交
673
	u32 base_freq, frac_freq;
674 675 676

	base_freq = ((ts_override & GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK) >>
		     GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_SHIFT) + 1;
L
Lionel Landwerlin 已提交
677
	base_freq *= 1000;
678 679 680 681

	frac_freq = ((ts_override &
		      GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_MASK) >>
		     GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_SHIFT);
L
Lionel Landwerlin 已提交
682
	frac_freq = 1000 / (frac_freq + 1);
683 684 685 686

	return base_freq + frac_freq;
}

687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732
static u32 gen10_get_crystal_clock_freq(struct drm_i915_private *dev_priv,
					u32 rpm_config_reg)
{
	u32 f19_2_mhz = 19200;
	u32 f24_mhz = 24000;
	u32 crystal_clock = (rpm_config_reg &
			     GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
			    GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;

	switch (crystal_clock) {
	case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
		return f19_2_mhz;
	case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
		return f24_mhz;
	default:
		MISSING_CASE(crystal_clock);
		return 0;
	}
}

static u32 gen11_get_crystal_clock_freq(struct drm_i915_private *dev_priv,
					u32 rpm_config_reg)
{
	u32 f19_2_mhz = 19200;
	u32 f24_mhz = 24000;
	u32 f25_mhz = 25000;
	u32 f38_4_mhz = 38400;
	u32 crystal_clock = (rpm_config_reg &
			     GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
			    GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;

	switch (crystal_clock) {
	case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
		return f24_mhz;
	case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
		return f19_2_mhz;
	case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_38_4_MHZ:
		return f38_4_mhz;
	case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ:
		return f25_mhz;
	default:
		MISSING_CASE(crystal_clock);
		return 0;
	}
}

L
Lionel Landwerlin 已提交
733
static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
734
{
L
Lionel Landwerlin 已提交
735 736 737
	u32 f12_5_mhz = 12500;
	u32 f19_2_mhz = 19200;
	u32 f24_mhz = 24000;
738 739 740 741 742 743 744 745

	if (INTEL_GEN(dev_priv) <= 4) {
		/* PRMs say:
		 *
		 *     "The value in this register increments once every 16
		 *      hclks." (through the “Clocking Configuration”
		 *      (“CLKCFG”) MCHBAR register)
		 */
L
Lionel Landwerlin 已提交
746
		return dev_priv->rawclk_freq / 16;
747 748 749 750 751 752 753 754 755 756
	} else if (INTEL_GEN(dev_priv) <= 8) {
		/* PRMs say:
		 *
		 *     "The PCU TSC counts 10ns increments; this timestamp
		 *      reflects bits 38:3 of the TSC (i.e. 80ns granularity,
		 *      rolling over every 1.5 hours).
		 */
		return f12_5_mhz;
	} else if (INTEL_GEN(dev_priv) <= 9) {
		u32 ctc_reg = I915_READ(CTC_MODE);
L
Lionel Landwerlin 已提交
757
		u32 freq = 0;
758 759 760 761 762 763 764 765 766 767 768 769 770 771 772

		if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
			freq = read_reference_ts_freq(dev_priv);
		} else {
			freq = IS_GEN9_LP(dev_priv) ? f19_2_mhz : f24_mhz;

			/* Now figure out how the command stream's timestamp
			 * register increments from this frequency (it might
			 * increment only every few clock cycle).
			 */
			freq >>= 3 - ((ctc_reg & CTC_SHIFT_PARAMETER_MASK) >>
				      CTC_SHIFT_PARAMETER_SHIFT);
		}

		return freq;
773
	} else if (INTEL_GEN(dev_priv) <= 12) {
774
		u32 ctc_reg = I915_READ(CTC_MODE);
L
Lionel Landwerlin 已提交
775
		u32 freq = 0;
776 777 778 779 780 781 782 783 784

		/* First figure out the reference frequency. There are 2 ways
		 * we can compute the frequency, either through the
		 * TIMESTAMP_OVERRIDE register or through RPM_CONFIG. CTC_MODE
		 * tells us which one we should use.
		 */
		if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
			freq = read_reference_ts_freq(dev_priv);
		} else {
785 786 787 788 789 790 791 792
			u32 rpm_config_reg = I915_READ(RPM_CONFIG0);

			if (INTEL_GEN(dev_priv) <= 10)
				freq = gen10_get_crystal_clock_freq(dev_priv,
								rpm_config_reg);
			else
				freq = gen11_get_crystal_clock_freq(dev_priv,
								rpm_config_reg);
793

794 795 796 797 798 799 800 801
			/* Now figure out how the command stream's timestamp
			 * register increments from this frequency (it might
			 * increment only every few clock cycle).
			 */
			freq >>= 3 - ((rpm_config_reg &
				       GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
				      GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT);
		}
802 803 804 805

		return freq;
	}

806
	MISSING_CASE("Unknown gen, unable to read command streamer timestamp frequency\n");
807 808 809
	return 0;
}

810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830
#undef INTEL_VGA_DEVICE
#define INTEL_VGA_DEVICE(id, info) (id)

static const u16 subplatform_ult_ids[] = {
	INTEL_HSW_ULT_GT1_IDS(0),
	INTEL_HSW_ULT_GT2_IDS(0),
	INTEL_HSW_ULT_GT3_IDS(0),
	INTEL_BDW_ULT_GT1_IDS(0),
	INTEL_BDW_ULT_GT2_IDS(0),
	INTEL_BDW_ULT_GT3_IDS(0),
	INTEL_BDW_ULT_RSVD_IDS(0),
	INTEL_SKL_ULT_GT1_IDS(0),
	INTEL_SKL_ULT_GT2_IDS(0),
	INTEL_SKL_ULT_GT3_IDS(0),
	INTEL_KBL_ULT_GT1_IDS(0),
	INTEL_KBL_ULT_GT2_IDS(0),
	INTEL_KBL_ULT_GT3_IDS(0),
	INTEL_CFL_U_GT2_IDS(0),
	INTEL_CFL_U_GT3_IDS(0),
	INTEL_WHL_U_GT1_IDS(0),
	INTEL_WHL_U_GT2_IDS(0),
831
	INTEL_WHL_U_GT3_IDS(0),
832 833
	INTEL_CML_U_GT1_IDS(0),
	INTEL_CML_U_GT2_IDS(0),
834 835 836 837 838 839 840 841 842 843 844 845
};

static const u16 subplatform_ulx_ids[] = {
	INTEL_HSW_ULX_GT1_IDS(0),
	INTEL_HSW_ULX_GT2_IDS(0),
	INTEL_BDW_ULX_GT1_IDS(0),
	INTEL_BDW_ULX_GT2_IDS(0),
	INTEL_BDW_ULX_GT3_IDS(0),
	INTEL_BDW_ULX_RSVD_IDS(0),
	INTEL_SKL_ULX_GT1_IDS(0),
	INTEL_SKL_ULX_GT2_IDS(0),
	INTEL_KBL_ULX_GT1_IDS(0),
846
	INTEL_KBL_ULX_GT2_IDS(0),
847
	INTEL_AML_KBL_GT2_IDS(0),
848
	INTEL_AML_CFL_GT2_IDS(0),
849 850 851 852
};

static const u16 subplatform_portf_ids[] = {
	INTEL_CNL_PORT_F_IDS(0),
853
	INTEL_ICL_PORT_F_IDS(0),
854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872
};

static bool find_devid(u16 id, const u16 *p, unsigned int num)
{
	for (; num; num--, p++) {
		if (*p == id)
			return true;
	}

	return false;
}

void intel_device_info_subplatform_init(struct drm_i915_private *i915)
{
	const struct intel_device_info *info = INTEL_INFO(i915);
	const struct intel_runtime_info *rinfo = RUNTIME_INFO(i915);
	const unsigned int pi = __platform_mask_index(rinfo, info->platform);
	const unsigned int pb = __platform_mask_bit(rinfo, info->platform);
	u16 devid = INTEL_DEVID(i915);
873
	u32 mask = 0;
874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898

	/* Make sure IS_<platform> checks are working. */
	RUNTIME_INFO(i915)->platform_mask[pi] = BIT(pb);

	/* Find and mark subplatform bits based on the PCI device id. */
	if (find_devid(devid, subplatform_ult_ids,
		       ARRAY_SIZE(subplatform_ult_ids))) {
		mask = BIT(INTEL_SUBPLATFORM_ULT);
	} else if (find_devid(devid, subplatform_ulx_ids,
			      ARRAY_SIZE(subplatform_ulx_ids))) {
		mask = BIT(INTEL_SUBPLATFORM_ULX);
		if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
			/* ULX machines are also considered ULT. */
			mask |= BIT(INTEL_SUBPLATFORM_ULT);
		}
	} else if (find_devid(devid, subplatform_portf_ids,
			      ARRAY_SIZE(subplatform_portf_ids))) {
		mask = BIT(INTEL_SUBPLATFORM_PORTF);
	}

	GEM_BUG_ON(mask & ~INTEL_SUBPLATFORM_BITS);

	RUNTIME_INFO(i915)->platform_mask[pi] |= mask;
}

899 900
/**
 * intel_device_info_runtime_init - initialize runtime info
901
 * @dev_priv: the i915 device
902
 *
903 904 905 906 907 908 909 910 911 912 913 914
 * Determine various intel_device_info fields at runtime.
 *
 * Use it when either:
 *   - it's judged too laborious to fill n static structures with the limit
 *     when a simple if statement does the job,
 *   - run-time checks (eg read fuse/strap registers) are needed.
 *
 * This function needs to be called:
 *   - after the MMIO has been setup as we are reading registers,
 *   - after the PCH has been detected,
 *   - before the first usage of the fields it can tweak.
 */
915
void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
916
{
917
	struct intel_device_info *info = mkwrite_device_info(dev_priv);
918
	struct intel_runtime_info *runtime = RUNTIME_INFO(dev_priv);
919 920
	enum pipe pipe;

921 922
	if (INTEL_GEN(dev_priv) >= 10) {
		for_each_pipe(dev_priv, pipe)
923
			runtime->num_scalers[pipe] = 2;
924
	} else if (IS_GEN(dev_priv, 9)) {
925 926 927
		runtime->num_scalers[PIPE_A] = 2;
		runtime->num_scalers[PIPE_B] = 2;
		runtime->num_scalers[PIPE_C] = 1;
928 929
	}

930
	BUILD_BUG_ON(BITS_PER_TYPE(intel_engine_mask_t) < I915_NUM_ENGINES);
931

932
	if (INTEL_GEN(dev_priv) >= 11)
933
		for_each_pipe(dev_priv, pipe)
934
			runtime->num_sprites[pipe] = 6;
935
	else if (IS_GEN(dev_priv, 10) || IS_GEMINILAKE(dev_priv))
936
		for_each_pipe(dev_priv, pipe)
937
			runtime->num_sprites[pipe] = 3;
938
	else if (IS_BROXTON(dev_priv)) {
939 940 941 942 943 944 945 946 947
		/*
		 * Skylake and Broxton currently don't expose the topmost plane as its
		 * use is exclusive with the legacy cursor and we only want to expose
		 * one of those, not both. Until we can safely expose the topmost plane
		 * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
		 * we don't expose the topmost plane at all to prevent ABI breakage
		 * down the line.
		 */

948 949 950
		runtime->num_sprites[PIPE_A] = 2;
		runtime->num_sprites[PIPE_B] = 2;
		runtime->num_sprites[PIPE_C] = 1;
951
	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
952
		for_each_pipe(dev_priv, pipe)
953
			runtime->num_sprites[pipe] = 2;
954
	} else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) {
955
		for_each_pipe(dev_priv, pipe)
956
			runtime->num_sprites[pipe] = 1;
957
	}
958

959 960
	if (HAS_DISPLAY(dev_priv) && IS_GEN_RANGE(dev_priv, 7, 8) &&
	    HAS_PCH_SPLIT(dev_priv)) {
961 962 963 964 965 966 967 968 969 970 971 972 973 974
		u32 fuse_strap = I915_READ(FUSE_STRAP);
		u32 sfuse_strap = I915_READ(SFUSE_STRAP);

		/*
		 * SFUSE_STRAP is supposed to have a bit signalling the display
		 * is fused off. Unfortunately it seems that, at least in
		 * certain cases, fused off display means that PCH display
		 * reads don't land anywhere. In that case, we read 0s.
		 *
		 * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK
		 * should be set when taking over after the firmware.
		 */
		if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
		    sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
975
		    (HAS_PCH_CPT(dev_priv) &&
976 977
		     !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
			DRM_INFO("Display fused off, disabling\n");
978
			info->pipe_mask = 0;
979 980
		} else if (fuse_strap & IVB_PIPE_C_DISABLE) {
			DRM_INFO("PipeC fused off\n");
981
			info->pipe_mask &= ~BIT(PIPE_C);
982
		}
983
	} else if (HAS_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 9) {
984
		u32 dfsm = I915_READ(SKL_DFSM);
985
		u8 enabled_mask = info->pipe_mask;
986 987

		if (dfsm & SKL_DFSM_PIPE_A_DISABLE)
988
			enabled_mask &= ~BIT(PIPE_A);
989
		if (dfsm & SKL_DFSM_PIPE_B_DISABLE)
990
			enabled_mask &= ~BIT(PIPE_B);
991
		if (dfsm & SKL_DFSM_PIPE_C_DISABLE)
992
			enabled_mask &= ~BIT(PIPE_C);
993 994 995
		if (INTEL_GEN(dev_priv) >= 12 &&
		    (dfsm & TGL_DFSM_PIPE_D_DISABLE))
			enabled_mask &= ~BIT(PIPE_D);
996

997 998 999 1000 1001 1002 1003 1004
		/*
		 * At least one pipe should be enabled and if there are
		 * disabled pipes, they should be the last ones, with no holes
		 * in the mask.
		 */
		if (enabled_mask == 0 || !is_power_of_2(enabled_mask + 1))
			DRM_ERROR("invalid pipe fuse configuration: enabled_mask=0x%x\n",
				  enabled_mask);
1005
		else
1006
			info->pipe_mask = enabled_mask;
1007 1008 1009

		if (dfsm & SKL_DFSM_DISPLAY_HDCP_DISABLE)
			info->display.has_hdcp = 0;
1010 1011 1012

		if (dfsm & SKL_DFSM_DISPLAY_PM_DISABLE)
			info->display.has_fbc = 0;
1013 1014 1015

		if (INTEL_GEN(dev_priv) >= 11 && (dfsm & ICL_DFSM_DMC_DISABLE))
			info->display.has_csr = 0;
1016 1017 1018 1019

		if (INTEL_GEN(dev_priv) >= 10 &&
		    (dfsm & CNL_DFSM_DISPLAY_DSC_DISABLE))
			info->display.has_dsc = 0;
1020 1021 1022
	}

	/* Initialize slice/subslice/EU info */
1023 1024 1025
	if (IS_HASWELL(dev_priv))
		haswell_sseu_info_init(dev_priv);
	else if (IS_CHERRYVIEW(dev_priv))
1026 1027 1028
		cherryview_sseu_info_init(dev_priv);
	else if (IS_BROADWELL(dev_priv))
		broadwell_sseu_info_init(dev_priv);
1029
	else if (IS_GEN(dev_priv, 9))
1030
		gen9_sseu_info_init(dev_priv);
1031
	else if (IS_GEN(dev_priv, 10))
1032
		gen10_sseu_info_init(dev_priv);
1033
	else if (IS_GEN(dev_priv, 11))
1034
		gen11_sseu_info_init(dev_priv);
1035 1036
	else if (INTEL_GEN(dev_priv) >= 12)
		gen12_sseu_info_init(dev_priv);
1037

1038
	if (IS_GEN(dev_priv, 6) && intel_vtd_active()) {
1039
		DRM_INFO("Disabling ppGTT for VT-d support\n");
1040
		info->ppgtt_type = INTEL_PPGTT_NONE;
1041 1042
	}

1043
	/* Initialize command stream timestamp frequency */
1044
	runtime->cs_timestamp_frequency_khz = read_timestamp_frequency(dev_priv);
1045
}
1046 1047 1048 1049

void intel_driver_caps_print(const struct intel_driver_caps *caps,
			     struct drm_printer *p)
{
1050 1051
	drm_printf(p, "Has logical contexts? %s\n",
		   yesno(caps->has_logical_contexts));
1052 1053
	drm_printf(p, "scheduler: %x\n", caps->scheduler);
}
1054 1055 1056 1057 1058 1059 1060 1061 1062 1063

/*
 * Determine which engines are fused off in our particular hardware. Since the
 * fuse register is in the blitter powerwell, we need forcewake to be ready at
 * this point (but later we need to prune the forcewake domains for engines that
 * are indeed fused off).
 */
void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
{
	struct intel_device_info *info = mkwrite_device_info(dev_priv);
1064
	unsigned int logical_vdbox = 0;
1065
	unsigned int i;
1066
	u32 media_fuse;
1067 1068
	u16 vdbox_mask;
	u16 vebox_mask;
1069 1070 1071 1072

	if (INTEL_GEN(dev_priv) < 11)
		return;

1073
	media_fuse = ~I915_READ(GEN11_GT_VEBOX_VDBOX_DISABLE);
1074

1075 1076 1077
	vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
	vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
		      GEN11_GT_VEBOX_DISABLE_SHIFT;
1078 1079

	for (i = 0; i < I915_MAX_VCS; i++) {
1080 1081
		if (!HAS_ENGINE(dev_priv, _VCS(i))) {
			vdbox_mask &= ~BIT(i);
1082
			continue;
1083
		}
1084

1085
		if (!(BIT(i) & vdbox_mask)) {
1086
			info->engine_mask &= ~BIT(_VCS(i));
1087
			DRM_DEBUG_DRIVER("vcs%u fused off\n", i);
1088
			continue;
1089
		}
1090 1091 1092 1093

		/*
		 * In Gen11, only even numbered logical VDBOXes are
		 * hooked up to an SFC (Scaler & Format Converter) unit.
1094
		 * In TGL each VDBOX has access to an SFC.
1095
		 */
1096
		if (IS_TIGERLAKE(dev_priv) || logical_vdbox++ % 2 == 0)
1097
			RUNTIME_INFO(dev_priv)->vdbox_sfc_access |= BIT(i);
1098
	}
1099 1100 1101
	DRM_DEBUG_DRIVER("vdbox enable: %04x, instances: %04lx\n",
			 vdbox_mask, VDBOX_MASK(dev_priv));
	GEM_BUG_ON(vdbox_mask != VDBOX_MASK(dev_priv));
1102 1103

	for (i = 0; i < I915_MAX_VECS; i++) {
1104 1105
		if (!HAS_ENGINE(dev_priv, _VECS(i))) {
			vebox_mask &= ~BIT(i);
1106
			continue;
1107
		}
1108

1109
		if (!(BIT(i) & vebox_mask)) {
1110
			info->engine_mask &= ~BIT(_VECS(i));
1111 1112
			DRM_DEBUG_DRIVER("vecs%u fused off\n", i);
		}
1113
	}
1114 1115 1116
	DRM_DEBUG_DRIVER("vebox enable: %04x, instances: %04lx\n",
			 vebox_mask, VEBOX_MASK(dev_priv));
	GEM_BUG_ON(vebox_mask != VEBOX_MASK(dev_priv));
1117
}