intel_device_info.c 32.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 * Copyright © 2016 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 */

25
#include <drm/drm_print.h>
26
#include <drm/i915_pciids.h>
27

28
#include "display/intel_cdclk.h"
29
#include "intel_device_info.h"
30 31
#include "i915_drv.h"

32 33 34 35 36 37 38 39 40 41 42 43
#define PLATFORM_NAME(x) [INTEL_##x] = #x
static const char * const platform_names[] = {
	PLATFORM_NAME(I830),
	PLATFORM_NAME(I845G),
	PLATFORM_NAME(I85X),
	PLATFORM_NAME(I865G),
	PLATFORM_NAME(I915G),
	PLATFORM_NAME(I915GM),
	PLATFORM_NAME(I945G),
	PLATFORM_NAME(I945GM),
	PLATFORM_NAME(G33),
	PLATFORM_NAME(PINEVIEW),
44 45
	PLATFORM_NAME(I965G),
	PLATFORM_NAME(I965GM),
46 47
	PLATFORM_NAME(G45),
	PLATFORM_NAME(GM45),
48 49 50 51 52 53 54 55 56 57 58
	PLATFORM_NAME(IRONLAKE),
	PLATFORM_NAME(SANDYBRIDGE),
	PLATFORM_NAME(IVYBRIDGE),
	PLATFORM_NAME(VALLEYVIEW),
	PLATFORM_NAME(HASWELL),
	PLATFORM_NAME(BROADWELL),
	PLATFORM_NAME(CHERRYVIEW),
	PLATFORM_NAME(SKYLAKE),
	PLATFORM_NAME(BROXTON),
	PLATFORM_NAME(KABYLAKE),
	PLATFORM_NAME(GEMINILAKE),
59
	PLATFORM_NAME(COFFEELAKE),
60
	PLATFORM_NAME(COMETLAKE),
61
	PLATFORM_NAME(CANNONLAKE),
62
	PLATFORM_NAME(ICELAKE),
63
	PLATFORM_NAME(ELKHARTLAKE),
64
	PLATFORM_NAME(TIGERLAKE),
65
	PLATFORM_NAME(ROCKETLAKE),
66 67 68 69 70
};
#undef PLATFORM_NAME

const char *intel_platform_name(enum intel_platform platform)
{
71 72
	BUILD_BUG_ON(ARRAY_SIZE(platform_names) != INTEL_MAX_PLATFORMS);

73 74 75 76 77 78 79
	if (WARN_ON_ONCE(platform >= ARRAY_SIZE(platform_names) ||
			 platform_names[platform] == NULL))
		return "<unknown>";

	return platform_names[platform];
}

80
static const char *iommu_name(void)
81
{
82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102
	const char *msg = "n/a";

#ifdef CONFIG_INTEL_IOMMU
	msg = enableddisabled(intel_iommu_gfx_mapped);
#endif

	return msg;
}

void intel_device_info_print_static(const struct intel_device_info *info,
				    struct drm_printer *p)
{
	drm_printf(p, "engines: %x\n", info->engine_mask);
	drm_printf(p, "gen: %d\n", info->gen);
	drm_printf(p, "gt: %d\n", info->gt);
	drm_printf(p, "iommu: %s\n", iommu_name());
	drm_printf(p, "memory-regions: %x\n", info->memory_regions);
	drm_printf(p, "page-sizes: %x\n", info->page_sizes);
	drm_printf(p, "platform: %s\n", intel_platform_name(info->platform));
	drm_printf(p, "ppgtt-size: %d\n", info->ppgtt_size);
	drm_printf(p, "ppgtt-type: %d\n", info->ppgtt_type);
103
	drm_printf(p, "dma_mask_size: %u\n", info->dma_mask_size);
104

105 106 107
#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->name));
	DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
#undef PRINT_FLAG
108 109 110 111

#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->display.name));
	DEV_INFO_DISPLAY_FOR_EACH_FLAG(PRINT_FLAG);
#undef PRINT_FLAG
112 113
}

114 115
static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p)
{
116 117
	int s;

118 119
	drm_printf(p, "slice total: %u, mask=%04x\n",
		   hweight8(sseu->slice_mask), sseu->slice_mask);
120
	drm_printf(p, "subslice total: %u\n", intel_sseu_subslice_total(sseu));
121
	for (s = 0; s < sseu->max_slices; s++) {
S
Stuart Summers 已提交
122
		drm_printf(p, "slice%d: %u subslices, mask=%08x\n",
123
			   s, intel_sseu_subslices_per_slice(sseu, s),
S
Stuart Summers 已提交
124
			   intel_sseu_get_subslices(sseu, s));
125
	}
126 127 128 129 130 131 132 133 134
	drm_printf(p, "EU total: %u\n", sseu->eu_total);
	drm_printf(p, "EU per subslice: %u\n", sseu->eu_per_subslice);
	drm_printf(p, "has slice power gating: %s\n",
		   yesno(sseu->has_slice_pg));
	drm_printf(p, "has subslice power gating: %s\n",
		   yesno(sseu->has_subslice_pg));
	drm_printf(p, "has EU power gating: %s\n", yesno(sseu->has_eu_pg));
}

135 136
void intel_device_info_print_runtime(const struct intel_runtime_info *info,
				     struct drm_printer *p)
137 138 139
{
	sseu_dump(&info->sseu, p);

140
	drm_printf(p, "rawclk rate: %u kHz\n", info->rawclk_freq);
141 142
	drm_printf(p, "CS timestamp frequency: %u Hz\n",
		   info->cs_timestamp_frequency_hz);
143 144
}

145 146 147
static int sseu_eu_idx(const struct sseu_dev_info *sseu, int slice,
		       int subslice)
{
148
	int slice_stride = sseu->max_subslices * sseu->eu_stride;
149

150
	return slice * slice_stride + subslice * sseu->eu_stride;
151 152 153 154 155 156 157 158
}

static u16 sseu_get_eus(const struct sseu_dev_info *sseu, int slice,
			int subslice)
{
	int i, offset = sseu_eu_idx(sseu, slice, subslice);
	u16 eu_mask = 0;

159
	for (i = 0; i < sseu->eu_stride; i++) {
160 161 162 163 164 165 166 167 168 169 170 171
		eu_mask |= ((u16)sseu->eu_mask[offset + i]) <<
			(i * BITS_PER_BYTE);
	}

	return eu_mask;
}

static void sseu_set_eus(struct sseu_dev_info *sseu, int slice, int subslice,
			 u16 eu_mask)
{
	int i, offset = sseu_eu_idx(sseu, slice, subslice);

172
	for (i = 0; i < sseu->eu_stride; i++) {
173 174 175 176 177
		sseu->eu_mask[offset + i] =
			(eu_mask >> (BITS_PER_BYTE * i)) & 0xff;
	}
}

178 179
void intel_device_info_print_topology(const struct sseu_dev_info *sseu,
				      struct drm_printer *p)
180 181 182 183 184 185 186 187 188
{
	int s, ss;

	if (sseu->max_slices == 0) {
		drm_printf(p, "Unavailable\n");
		return;
	}

	for (s = 0; s < sseu->max_slices; s++) {
S
Stuart Summers 已提交
189
		drm_printf(p, "slice%d: %u subslice(s) (0x%08x):\n",
190
			   s, intel_sseu_subslices_per_slice(sseu, s),
S
Stuart Summers 已提交
191
			   intel_sseu_get_subslices(sseu, s));
192 193 194 195 196 197 198 199 200 201

		for (ss = 0; ss < sseu->max_subslices; ss++) {
			u16 enabled_eus = sseu_get_eus(sseu, s, ss);

			drm_printf(p, "\tsubslice%d: %u EUs (0x%hx)\n",
				   ss, hweight16(enabled_eus), enabled_eus);
		}
	}
}

202 203 204 205 206 207 208 209 210 211
static u16 compute_eu_total(const struct sseu_dev_info *sseu)
{
	u16 i, total = 0;

	for (i = 0; i < ARRAY_SIZE(sseu->eu_mask); i++)
		total += hweight8(sseu->eu_mask[i]);

	return total;
}

212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268
static void gen11_compute_sseu_info(struct sseu_dev_info *sseu,
				    u8 s_en, u32 ss_en, u16 eu_en)
{
	int s, ss;

	/* ss_en represents entire subslice mask across all slices */
	GEM_BUG_ON(sseu->max_slices * sseu->max_subslices >
		   sizeof(ss_en) * BITS_PER_BYTE);

	for (s = 0; s < sseu->max_slices; s++) {
		if ((s_en & BIT(s)) == 0)
			continue;

		sseu->slice_mask |= BIT(s);

		intel_sseu_set_subslices(sseu, s, ss_en);

		for (ss = 0; ss < sseu->max_subslices; ss++)
			if (intel_sseu_has_subslice(sseu, s, ss))
				sseu_set_eus(sseu, s, ss, eu_en);
	}
	sseu->eu_per_subslice = hweight16(eu_en);
	sseu->eu_total = compute_eu_total(sseu);
}

static void gen12_sseu_info_init(struct drm_i915_private *dev_priv)
{
	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
	u8 s_en;
	u32 dss_en;
	u16 eu_en = 0;
	u8 eu_en_fuse;
	int eu;

	/*
	 * Gen12 has Dual-Subslices, which behave similarly to 2 gen11 SS.
	 * Instead of splitting these, provide userspace with an array
	 * of DSS to more closely represent the hardware resource.
	 */
	intel_sseu_set_info(sseu, 1, 6, 16);

	s_en = I915_READ(GEN11_GT_SLICE_ENABLE) & GEN11_GT_S_ENA_MASK;

	dss_en = I915_READ(GEN12_GT_DSS_ENABLE);

	/* one bit per pair of EUs */
	eu_en_fuse = ~(I915_READ(GEN11_EU_DISABLE) & GEN11_EU_DIS_MASK);
	for (eu = 0; eu < sseu->max_eus_per_subslice / 2; eu++)
		if (eu_en_fuse & BIT(eu))
			eu_en |= BIT(eu * 2) | BIT(eu * 2 + 1);

	gen11_compute_sseu_info(sseu, s_en, dss_en, eu_en);

	/* TGL only supports slice-level power gating */
	sseu->has_slice_pg = 1;
}

269 270
static void gen11_sseu_info_init(struct drm_i915_private *dev_priv)
{
271
	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
272
	u8 s_en;
273
	u32 ss_en;
274 275
	u8 eu_en;

276 277 278 279
	if (IS_ELKHARTLAKE(dev_priv))
		intel_sseu_set_info(sseu, 1, 4, 8);
	else
		intel_sseu_set_info(sseu, 1, 8, 8);
280 281 282 283 284

	s_en = I915_READ(GEN11_GT_SLICE_ENABLE) & GEN11_GT_S_ENA_MASK;
	ss_en = ~I915_READ(GEN11_GT_SUBSLICE_DISABLE);
	eu_en = ~(I915_READ(GEN11_EU_DISABLE) & GEN11_EU_DIS_MASK);

285
	gen11_compute_sseu_info(sseu, s_en, ss_en, eu_en);
286 287 288 289 290 291 292

	/* ICL has no power gating restrictions. */
	sseu->has_slice_pg = 1;
	sseu->has_subslice_pg = 1;
	sseu->has_eu_pg = 1;
}

293 294
static void gen10_sseu_info_init(struct drm_i915_private *dev_priv)
{
295
	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
296
	const u32 fuse2 = I915_READ(GEN8_FUSE2);
297 298 299
	int s, ss;
	const int eu_mask = 0xff;
	u32 subslice_mask, eu_en;
300

301 302
	intel_sseu_set_info(sseu, 6, 4, 8);

303 304
	sseu->slice_mask = (fuse2 & GEN10_F2_S_ENA_MASK) >>
			    GEN10_F2_S_ENA_SHIFT;
305

306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328
	/* Slice0 */
	eu_en = ~I915_READ(GEN8_EU_DISABLE0);
	for (ss = 0; ss < sseu->max_subslices; ss++)
		sseu_set_eus(sseu, 0, ss, (eu_en >> (8 * ss)) & eu_mask);
	/* Slice1 */
	sseu_set_eus(sseu, 1, 0, (eu_en >> 24) & eu_mask);
	eu_en = ~I915_READ(GEN8_EU_DISABLE1);
	sseu_set_eus(sseu, 1, 1, eu_en & eu_mask);
	/* Slice2 */
	sseu_set_eus(sseu, 2, 0, (eu_en >> 8) & eu_mask);
	sseu_set_eus(sseu, 2, 1, (eu_en >> 16) & eu_mask);
	/* Slice3 */
	sseu_set_eus(sseu, 3, 0, (eu_en >> 24) & eu_mask);
	eu_en = ~I915_READ(GEN8_EU_DISABLE2);
	sseu_set_eus(sseu, 3, 1, eu_en & eu_mask);
	/* Slice4 */
	sseu_set_eus(sseu, 4, 0, (eu_en >> 8) & eu_mask);
	sseu_set_eus(sseu, 4, 1, (eu_en >> 16) & eu_mask);
	/* Slice5 */
	sseu_set_eus(sseu, 5, 0, (eu_en >> 24) & eu_mask);
	eu_en = ~I915_READ(GEN10_EU_DISABLE3);
	sseu_set_eus(sseu, 5, 1, eu_en & eu_mask);

329 330 331 332
	subslice_mask = (1 << 4) - 1;
	subslice_mask &= ~((fuse2 & GEN10_F2_SS_DIS_MASK) >>
			   GEN10_F2_SS_DIS_SHIFT);

333
	for (s = 0; s < sseu->max_slices; s++) {
334 335
		u32 subslice_mask_with_eus = subslice_mask;

336 337
		for (ss = 0; ss < sseu->max_subslices; ss++) {
			if (sseu_get_eus(sseu, s, ss) == 0)
338
				subslice_mask_with_eus &= ~BIT(ss);
339
		}
340 341 342 343 344

		/*
		 * Slice0 can have up to 3 subslices, but there are only 2 in
		 * slice1/2.
		 */
345 346 347
		intel_sseu_set_subslices(sseu, s, s == 0 ?
						  subslice_mask_with_eus :
						  subslice_mask_with_eus & 0x3);
348 349 350
	}

	sseu->eu_total = compute_eu_total(sseu);
351 352 353 354 355 356 357

	/*
	 * CNL is expected to always have a uniform distribution
	 * of EU across subslices with the exception that any one
	 * EU in any one subslice may be fused off for die
	 * recovery.
	 */
358
	sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
359
				DIV_ROUND_UP(sseu->eu_total,
360 361
					     intel_sseu_subslice_total(sseu)) :
				0;
362 363 364 365 366 367 368

	/* No restrictions on Power Gating */
	sseu->has_slice_pg = 1;
	sseu->has_subslice_pg = 1;
	sseu->has_eu_pg = 1;
}

369 370
static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
{
371
	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
372
	u32 fuse;
373
	u8 subslice_mask = 0;
374 375 376

	fuse = I915_READ(CHV_FUSE_GT);

377
	sseu->slice_mask = BIT(0);
378
	intel_sseu_set_info(sseu, 1, 2, 8);
379 380

	if (!(fuse & CHV_FGT_DISABLE_SS0)) {
381 382 383 384 385 386
		u8 disabled_mask =
			((fuse & CHV_FGT_EU_DIS_SS0_R0_MASK) >>
			 CHV_FGT_EU_DIS_SS0_R0_SHIFT) |
			(((fuse & CHV_FGT_EU_DIS_SS0_R1_MASK) >>
			  CHV_FGT_EU_DIS_SS0_R1_SHIFT) << 4);

387
		subslice_mask |= BIT(0);
388
		sseu_set_eus(sseu, 0, 0, ~disabled_mask);
389 390 391
	}

	if (!(fuse & CHV_FGT_DISABLE_SS1)) {
392 393 394 395 396 397
		u8 disabled_mask =
			((fuse & CHV_FGT_EU_DIS_SS1_R0_MASK) >>
			 CHV_FGT_EU_DIS_SS1_R0_SHIFT) |
			(((fuse & CHV_FGT_EU_DIS_SS1_R1_MASK) >>
			  CHV_FGT_EU_DIS_SS1_R1_SHIFT) << 4);

398
		subslice_mask |= BIT(1);
399
		sseu_set_eus(sseu, 0, 1, ~disabled_mask);
400 401
	}

402
	intel_sseu_set_subslices(sseu, 0, subslice_mask);
403

404 405
	sseu->eu_total = compute_eu_total(sseu);

406 407 408 409
	/*
	 * CHV expected to always have a uniform distribution of EU
	 * across subslices.
	*/
410 411 412
	sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
				sseu->eu_total /
					intel_sseu_subslice_total(sseu) :
413 414 415 416 417 418
				0;
	/*
	 * CHV supports subslice power gating on devices with more than
	 * one subslice, and supports EU power gating on devices with
	 * more than one EU pair per subslice.
	*/
419
	sseu->has_slice_pg = 0;
420
	sseu->has_subslice_pg = intel_sseu_subslice_total(sseu) > 1;
421
	sseu->has_eu_pg = (sseu->eu_per_subslice > 2);
422 423 424 425 426
}

static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
{
	struct intel_device_info *info = mkwrite_device_info(dev_priv);
427
	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
428
	int s, ss;
429 430
	u32 fuse2, eu_disable, subslice_mask;
	const u8 eu_mask = 0xff;
431 432

	fuse2 = I915_READ(GEN8_FUSE2);
433
	sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
434

435
	/* BXT has a single slice and at most 3 subslices. */
436 437
	intel_sseu_set_info(sseu, IS_GEN9_LP(dev_priv) ? 1 : 3,
			    IS_GEN9_LP(dev_priv) ? 3 : 4, 8);
438

439 440 441 442
	/*
	 * The subslice disable field is global, i.e. it applies
	 * to each of the enabled slices.
	*/
443 444 445
	subslice_mask = (1 << sseu->max_subslices) - 1;
	subslice_mask &= ~((fuse2 & GEN9_F2_SS_DIS_MASK) >>
			   GEN9_F2_SS_DIS_SHIFT);
446 447 448 449 450

	/*
	 * Iterate through enabled slices and subslices to
	 * count the total enabled EU.
	*/
451
	for (s = 0; s < sseu->max_slices; s++) {
452
		if (!(sseu->slice_mask & BIT(s)))
453 454 455
			/* skip disabled slice */
			continue;

456
		intel_sseu_set_subslices(sseu, s, subslice_mask);
457

458
		eu_disable = I915_READ(GEN9_EU_DISABLE(s));
459
		for (ss = 0; ss < sseu->max_subslices; ss++) {
460
			int eu_per_ss;
461
			u8 eu_disabled_mask;
462

463
			if (!intel_sseu_has_subslice(sseu, s, ss))
464 465 466
				/* skip disabled subslice */
				continue;

467
			eu_disabled_mask = (eu_disable >> (ss * 8)) & eu_mask;
468 469 470 471 472

			sseu_set_eus(sseu, s, ss, ~eu_disabled_mask);

			eu_per_ss = sseu->max_eus_per_subslice -
				hweight8(eu_disabled_mask);
473 474 475 476 477 478 479

			/*
			 * Record which subslice(s) has(have) 7 EUs. we
			 * can tune the hash used to spread work among
			 * subslices if they are unbalanced.
			 */
			if (eu_per_ss == 7)
480
				sseu->subslice_7eu[s] |= BIT(ss);
481 482 483
		}
	}

484 485
	sseu->eu_total = compute_eu_total(sseu);

486 487 488 489 490 491 492
	/*
	 * SKL is expected to always have a uniform distribution
	 * of EU across subslices with the exception that any one
	 * EU in any one subslice may be fused off for die
	 * recovery. BXT is expected to be perfectly uniform in EU
	 * distribution.
	*/
493
	sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
494
				DIV_ROUND_UP(sseu->eu_total,
495 496
					     intel_sseu_subslice_total(sseu)) :
				0;
497
	/*
498
	 * SKL+ supports slice power gating on devices with more than
499
	 * one slice, and supports EU power gating on devices with
500
	 * more than one EU pair per subslice. BXT+ supports subslice
501 502 503 504
	 * power gating on devices with more than one subslice, and
	 * supports EU power gating on devices with more than one EU
	 * pair per subslice.
	*/
505
	sseu->has_slice_pg =
506
		!IS_GEN9_LP(dev_priv) && hweight8(sseu->slice_mask) > 1;
507
	sseu->has_subslice_pg =
508
		IS_GEN9_LP(dev_priv) && intel_sseu_subslice_total(sseu) > 1;
509
	sseu->has_eu_pg = sseu->eu_per_subslice > 2;
510

511
	if (IS_GEN9_LP(dev_priv)) {
512 513
#define IS_SS_DISABLED(ss)	(!(sseu->subslice_mask[0] & BIT(ss)))
		info->has_pooled_eu = hweight8(sseu->subslice_mask[0]) == 3;
514

515
		sseu->min_eu_in_pool = 0;
516
		if (info->has_pooled_eu) {
517
			if (IS_SS_DISABLED(2) || IS_SS_DISABLED(0))
518
				sseu->min_eu_in_pool = 3;
519
			else if (IS_SS_DISABLED(1))
520
				sseu->min_eu_in_pool = 6;
521
			else
522
				sseu->min_eu_in_pool = 9;
523 524 525 526 527
		}
#undef IS_SS_DISABLED
	}
}

528
static void bdw_sseu_info_init(struct drm_i915_private *dev_priv)
529
{
530
	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
531
	int s, ss;
532
	u32 fuse2, subslice_mask, eu_disable[3]; /* s_max */
533 534

	fuse2 = I915_READ(GEN8_FUSE2);
535
	sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
536
	intel_sseu_set_info(sseu, 3, 3, 8);
537

538 539 540 541
	/*
	 * The subslice disable field is global, i.e. it applies
	 * to each of the enabled slices.
	 */
542 543 544
	subslice_mask = GENMASK(sseu->max_subslices - 1, 0);
	subslice_mask &= ~((fuse2 & GEN8_F2_SS_DIS_MASK) >>
			   GEN8_F2_SS_DIS_SHIFT);
545 546 547 548 549 550 551 552 553 554 555 556 557

	eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK;
	eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) |
			((I915_READ(GEN8_EU_DISABLE1) & GEN8_EU_DIS1_S1_MASK) <<
			 (32 - GEN8_EU_DIS0_S1_SHIFT));
	eu_disable[2] = (I915_READ(GEN8_EU_DISABLE1) >> GEN8_EU_DIS1_S2_SHIFT) |
			((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) <<
			 (32 - GEN8_EU_DIS1_S2_SHIFT));

	/*
	 * Iterate through enabled slices and subslices to
	 * count the total enabled EU.
	 */
558
	for (s = 0; s < sseu->max_slices; s++) {
559
		if (!(sseu->slice_mask & BIT(s)))
560 561 562
			/* skip disabled slice */
			continue;

563
		intel_sseu_set_subslices(sseu, s, subslice_mask);
564 565 566

		for (ss = 0; ss < sseu->max_subslices; ss++) {
			u8 eu_disabled_mask;
567 568
			u32 n_disabled;

569
			if (!intel_sseu_has_subslice(sseu, s, ss))
570 571 572
				/* skip disabled subslice */
				continue;

573
			eu_disabled_mask =
574
				eu_disable[s] >> (ss * sseu->max_eus_per_subslice);
575 576 577 578

			sseu_set_eus(sseu, s, ss, ~eu_disabled_mask);

			n_disabled = hweight8(eu_disabled_mask);
579 580 581 582

			/*
			 * Record which subslices have 7 EUs.
			 */
583
			if (sseu->max_eus_per_subslice - n_disabled == 7)
584
				sseu->subslice_7eu[s] |= 1 << ss;
585 586 587
		}
	}

588 589
	sseu->eu_total = compute_eu_total(sseu);

590 591 592 593 594
	/*
	 * BDW is expected to always have a uniform distribution of EU across
	 * subslices with the exception that any one EU in any one subslice may
	 * be fused off for die recovery.
	 */
595
	sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
596
				DIV_ROUND_UP(sseu->eu_total,
597 598
					     intel_sseu_subslice_total(sseu)) :
				0;
599 600 601 602 603

	/*
	 * BDW supports slice power gating on devices with more than
	 * one slice.
	 */
604
	sseu->has_slice_pg = hweight8(sseu->slice_mask) > 1;
605 606
	sseu->has_subslice_pg = 0;
	sseu->has_eu_pg = 0;
607 608
}

609
static void hsw_sseu_info_init(struct drm_i915_private *dev_priv)
610
{
611
	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
612
	u32 fuse1;
613
	u8 subslice_mask = 0;
614
	int s, ss;
615 616 617 618 619

	/*
	 * There isn't a register to tell us how many slices/subslices. We
	 * work off the PCI-ids here.
	 */
620
	switch (INTEL_INFO(dev_priv)->gt) {
621
	default:
622
		MISSING_CASE(INTEL_INFO(dev_priv)->gt);
623 624 625
		/* fall through */
	case 1:
		sseu->slice_mask = BIT(0);
626
		subslice_mask = BIT(0);
627 628 629
		break;
	case 2:
		sseu->slice_mask = BIT(0);
630
		subslice_mask = BIT(0) | BIT(1);
631 632 633
		break;
	case 3:
		sseu->slice_mask = BIT(0) | BIT(1);
634
		subslice_mask = BIT(0) | BIT(1);
635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653
		break;
	}

	fuse1 = I915_READ(HSW_PAVP_FUSE1);
	switch ((fuse1 & HSW_F1_EU_DIS_MASK) >> HSW_F1_EU_DIS_SHIFT) {
	default:
		MISSING_CASE((fuse1 & HSW_F1_EU_DIS_MASK) >>
			     HSW_F1_EU_DIS_SHIFT);
		/* fall through */
	case HSW_F1_EU_DIS_10EUS:
		sseu->eu_per_subslice = 10;
		break;
	case HSW_F1_EU_DIS_8EUS:
		sseu->eu_per_subslice = 8;
		break;
	case HSW_F1_EU_DIS_6EUS:
		sseu->eu_per_subslice = 6;
		break;
	}
654 655

	intel_sseu_set_info(sseu, hweight8(sseu->slice_mask),
656
			    hweight8(subslice_mask),
657
			    sseu->eu_per_subslice);
658 659

	for (s = 0; s < sseu->max_slices; s++) {
660
		intel_sseu_set_subslices(sseu, s, subslice_mask);
661

662 663 664 665 666
		for (ss = 0; ss < sseu->max_subslices; ss++) {
			sseu_set_eus(sseu, s, ss,
				     (1UL << sseu->eu_per_subslice) - 1);
		}
	}
667

668
	sseu->eu_total = compute_eu_total(sseu);
669 670 671 672 673 674 675

	/* No powergating for you. */
	sseu->has_slice_pg = 0;
	sseu->has_subslice_pg = 0;
	sseu->has_eu_pg = 0;
}

L
Lionel Landwerlin 已提交
676
static u32 read_reference_ts_freq(struct drm_i915_private *dev_priv)
677 678
{
	u32 ts_override = I915_READ(GEN9_TIMESTAMP_OVERRIDE);
L
Lionel Landwerlin 已提交
679
	u32 base_freq, frac_freq;
680 681 682

	base_freq = ((ts_override & GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK) >>
		     GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_SHIFT) + 1;
683
	base_freq *= 1000000;
684 685 686 687

	frac_freq = ((ts_override &
		      GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_MASK) >>
		     GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_SHIFT);
688
	frac_freq = 1000000 / (frac_freq + 1);
689 690 691 692

	return base_freq + frac_freq;
}

693 694 695
static u32 gen10_get_crystal_clock_freq(struct drm_i915_private *dev_priv,
					u32 rpm_config_reg)
{
696 697
	u32 f19_2_mhz = 19200000;
	u32 f24_mhz = 24000000;
698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715
	u32 crystal_clock = (rpm_config_reg &
			     GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
			    GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;

	switch (crystal_clock) {
	case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
		return f19_2_mhz;
	case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
		return f24_mhz;
	default:
		MISSING_CASE(crystal_clock);
		return 0;
	}
}

static u32 gen11_get_crystal_clock_freq(struct drm_i915_private *dev_priv,
					u32 rpm_config_reg)
{
716 717 718 719
	u32 f19_2_mhz = 19200000;
	u32 f24_mhz = 24000000;
	u32 f25_mhz = 25000000;
	u32 f38_4_mhz = 38400000;
720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738
	u32 crystal_clock = (rpm_config_reg &
			     GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
			    GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;

	switch (crystal_clock) {
	case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
		return f24_mhz;
	case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
		return f19_2_mhz;
	case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_38_4_MHZ:
		return f38_4_mhz;
	case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ:
		return f25_mhz;
	default:
		MISSING_CASE(crystal_clock);
		return 0;
	}
}

L
Lionel Landwerlin 已提交
739
static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
740
{
741 742 743
	u32 f12_5_mhz = 12500000;
	u32 f19_2_mhz = 19200000;
	u32 f24_mhz = 24000000;
744 745 746 747 748 749 750 751

	if (INTEL_GEN(dev_priv) <= 4) {
		/* PRMs say:
		 *
		 *     "The value in this register increments once every 16
		 *      hclks." (through the “Clocking Configuration”
		 *      (“CLKCFG”) MCHBAR register)
		 */
752
		return RUNTIME_INFO(dev_priv)->rawclk_freq * 1000 / 16;
753 754 755 756 757 758 759 760 761 762
	} else if (INTEL_GEN(dev_priv) <= 8) {
		/* PRMs say:
		 *
		 *     "The PCU TSC counts 10ns increments; this timestamp
		 *      reflects bits 38:3 of the TSC (i.e. 80ns granularity,
		 *      rolling over every 1.5 hours).
		 */
		return f12_5_mhz;
	} else if (INTEL_GEN(dev_priv) <= 9) {
		u32 ctc_reg = I915_READ(CTC_MODE);
L
Lionel Landwerlin 已提交
763
		u32 freq = 0;
764 765 766 767 768 769 770 771 772 773 774 775 776 777 778

		if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
			freq = read_reference_ts_freq(dev_priv);
		} else {
			freq = IS_GEN9_LP(dev_priv) ? f19_2_mhz : f24_mhz;

			/* Now figure out how the command stream's timestamp
			 * register increments from this frequency (it might
			 * increment only every few clock cycle).
			 */
			freq >>= 3 - ((ctc_reg & CTC_SHIFT_PARAMETER_MASK) >>
				      CTC_SHIFT_PARAMETER_SHIFT);
		}

		return freq;
779
	} else if (INTEL_GEN(dev_priv) <= 12) {
780
		u32 ctc_reg = I915_READ(CTC_MODE);
L
Lionel Landwerlin 已提交
781
		u32 freq = 0;
782 783 784 785 786 787 788 789 790

		/* First figure out the reference frequency. There are 2 ways
		 * we can compute the frequency, either through the
		 * TIMESTAMP_OVERRIDE register or through RPM_CONFIG. CTC_MODE
		 * tells us which one we should use.
		 */
		if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
			freq = read_reference_ts_freq(dev_priv);
		} else {
791 792 793 794 795 796 797 798
			u32 rpm_config_reg = I915_READ(RPM_CONFIG0);

			if (INTEL_GEN(dev_priv) <= 10)
				freq = gen10_get_crystal_clock_freq(dev_priv,
								rpm_config_reg);
			else
				freq = gen11_get_crystal_clock_freq(dev_priv,
								rpm_config_reg);
799

800 801 802 803 804 805 806 807
			/* Now figure out how the command stream's timestamp
			 * register increments from this frequency (it might
			 * increment only every few clock cycle).
			 */
			freq >>= 3 - ((rpm_config_reg &
				       GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
				      GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT);
		}
808 809 810 811

		return freq;
	}

812
	MISSING_CASE("Unknown gen, unable to read command streamer timestamp frequency\n");
813 814 815
	return 0;
}

816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836
#undef INTEL_VGA_DEVICE
#define INTEL_VGA_DEVICE(id, info) (id)

static const u16 subplatform_ult_ids[] = {
	INTEL_HSW_ULT_GT1_IDS(0),
	INTEL_HSW_ULT_GT2_IDS(0),
	INTEL_HSW_ULT_GT3_IDS(0),
	INTEL_BDW_ULT_GT1_IDS(0),
	INTEL_BDW_ULT_GT2_IDS(0),
	INTEL_BDW_ULT_GT3_IDS(0),
	INTEL_BDW_ULT_RSVD_IDS(0),
	INTEL_SKL_ULT_GT1_IDS(0),
	INTEL_SKL_ULT_GT2_IDS(0),
	INTEL_SKL_ULT_GT3_IDS(0),
	INTEL_KBL_ULT_GT1_IDS(0),
	INTEL_KBL_ULT_GT2_IDS(0),
	INTEL_KBL_ULT_GT3_IDS(0),
	INTEL_CFL_U_GT2_IDS(0),
	INTEL_CFL_U_GT3_IDS(0),
	INTEL_WHL_U_GT1_IDS(0),
	INTEL_WHL_U_GT2_IDS(0),
837
	INTEL_WHL_U_GT3_IDS(0),
838 839
	INTEL_CML_U_GT1_IDS(0),
	INTEL_CML_U_GT2_IDS(0),
840 841 842 843 844 845 846 847 848 849 850 851
};

static const u16 subplatform_ulx_ids[] = {
	INTEL_HSW_ULX_GT1_IDS(0),
	INTEL_HSW_ULX_GT2_IDS(0),
	INTEL_BDW_ULX_GT1_IDS(0),
	INTEL_BDW_ULX_GT2_IDS(0),
	INTEL_BDW_ULX_GT3_IDS(0),
	INTEL_BDW_ULX_RSVD_IDS(0),
	INTEL_SKL_ULX_GT1_IDS(0),
	INTEL_SKL_ULX_GT2_IDS(0),
	INTEL_KBL_ULX_GT1_IDS(0),
852
	INTEL_KBL_ULX_GT2_IDS(0),
853
	INTEL_AML_KBL_GT2_IDS(0),
854
	INTEL_AML_CFL_GT2_IDS(0),
855 856 857 858
};

static const u16 subplatform_portf_ids[] = {
	INTEL_CNL_PORT_F_IDS(0),
859
	INTEL_ICL_PORT_F_IDS(0),
860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878
};

static bool find_devid(u16 id, const u16 *p, unsigned int num)
{
	for (; num; num--, p++) {
		if (*p == id)
			return true;
	}

	return false;
}

void intel_device_info_subplatform_init(struct drm_i915_private *i915)
{
	const struct intel_device_info *info = INTEL_INFO(i915);
	const struct intel_runtime_info *rinfo = RUNTIME_INFO(i915);
	const unsigned int pi = __platform_mask_index(rinfo, info->platform);
	const unsigned int pb = __platform_mask_bit(rinfo, info->platform);
	u16 devid = INTEL_DEVID(i915);
879
	u32 mask = 0;
880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904

	/* Make sure IS_<platform> checks are working. */
	RUNTIME_INFO(i915)->platform_mask[pi] = BIT(pb);

	/* Find and mark subplatform bits based on the PCI device id. */
	if (find_devid(devid, subplatform_ult_ids,
		       ARRAY_SIZE(subplatform_ult_ids))) {
		mask = BIT(INTEL_SUBPLATFORM_ULT);
	} else if (find_devid(devid, subplatform_ulx_ids,
			      ARRAY_SIZE(subplatform_ulx_ids))) {
		mask = BIT(INTEL_SUBPLATFORM_ULX);
		if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
			/* ULX machines are also considered ULT. */
			mask |= BIT(INTEL_SUBPLATFORM_ULT);
		}
	} else if (find_devid(devid, subplatform_portf_ids,
			      ARRAY_SIZE(subplatform_portf_ids))) {
		mask = BIT(INTEL_SUBPLATFORM_PORTF);
	}

	GEM_BUG_ON(mask & ~INTEL_SUBPLATFORM_BITS);

	RUNTIME_INFO(i915)->platform_mask[pi] |= mask;
}

905 906
/**
 * intel_device_info_runtime_init - initialize runtime info
907
 * @dev_priv: the i915 device
908
 *
909 910 911 912 913 914 915 916 917 918 919 920
 * Determine various intel_device_info fields at runtime.
 *
 * Use it when either:
 *   - it's judged too laborious to fill n static structures with the limit
 *     when a simple if statement does the job,
 *   - run-time checks (eg read fuse/strap registers) are needed.
 *
 * This function needs to be called:
 *   - after the MMIO has been setup as we are reading registers,
 *   - after the PCH has been detected,
 *   - before the first usage of the fields it can tweak.
 */
921
void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
922
{
923
	struct intel_device_info *info = mkwrite_device_info(dev_priv);
924
	struct intel_runtime_info *runtime = RUNTIME_INFO(dev_priv);
925 926
	enum pipe pipe;

927 928
	if (INTEL_GEN(dev_priv) >= 10) {
		for_each_pipe(dev_priv, pipe)
929
			runtime->num_scalers[pipe] = 2;
930
	} else if (IS_GEN(dev_priv, 9)) {
931 932 933
		runtime->num_scalers[PIPE_A] = 2;
		runtime->num_scalers[PIPE_B] = 2;
		runtime->num_scalers[PIPE_C] = 1;
934 935
	}

936
	BUILD_BUG_ON(BITS_PER_TYPE(intel_engine_mask_t) < I915_NUM_ENGINES);
937

938 939 940 941
	if (IS_ROCKETLAKE(dev_priv))
		for_each_pipe(dev_priv, pipe)
			runtime->num_sprites[pipe] = 4;
	else if (INTEL_GEN(dev_priv) >= 11)
942
		for_each_pipe(dev_priv, pipe)
943
			runtime->num_sprites[pipe] = 6;
944
	else if (IS_GEN(dev_priv, 10) || IS_GEMINILAKE(dev_priv))
945
		for_each_pipe(dev_priv, pipe)
946
			runtime->num_sprites[pipe] = 3;
947
	else if (IS_BROXTON(dev_priv)) {
948 949 950 951 952 953 954 955 956
		/*
		 * Skylake and Broxton currently don't expose the topmost plane as its
		 * use is exclusive with the legacy cursor and we only want to expose
		 * one of those, not both. Until we can safely expose the topmost plane
		 * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
		 * we don't expose the topmost plane at all to prevent ABI breakage
		 * down the line.
		 */

957 958 959
		runtime->num_sprites[PIPE_A] = 2;
		runtime->num_sprites[PIPE_B] = 2;
		runtime->num_sprites[PIPE_C] = 1;
960
	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
961
		for_each_pipe(dev_priv, pipe)
962
			runtime->num_sprites[pipe] = 2;
963
	} else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) {
964
		for_each_pipe(dev_priv, pipe)
965
			runtime->num_sprites[pipe] = 1;
966
	}
967

968 969
	if (HAS_DISPLAY(dev_priv) && IS_GEN_RANGE(dev_priv, 7, 8) &&
	    HAS_PCH_SPLIT(dev_priv)) {
970 971 972 973 974 975 976 977 978 979 980 981 982 983
		u32 fuse_strap = I915_READ(FUSE_STRAP);
		u32 sfuse_strap = I915_READ(SFUSE_STRAP);

		/*
		 * SFUSE_STRAP is supposed to have a bit signalling the display
		 * is fused off. Unfortunately it seems that, at least in
		 * certain cases, fused off display means that PCH display
		 * reads don't land anywhere. In that case, we read 0s.
		 *
		 * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK
		 * should be set when taking over after the firmware.
		 */
		if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
		    sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
984
		    (HAS_PCH_CPT(dev_priv) &&
985
		     !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
986 987
			drm_info(&dev_priv->drm,
				 "Display fused off, disabling\n");
988
			info->pipe_mask = 0;
989
			info->cpu_transcoder_mask = 0;
990
		} else if (fuse_strap & IVB_PIPE_C_DISABLE) {
991
			drm_info(&dev_priv->drm, "PipeC fused off\n");
992
			info->pipe_mask &= ~BIT(PIPE_C);
993
			info->cpu_transcoder_mask &= ~BIT(TRANSCODER_C);
994
		}
995
	} else if (HAS_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 9) {
996 997
		u32 dfsm = I915_READ(SKL_DFSM);

998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014
		if (dfsm & SKL_DFSM_PIPE_A_DISABLE) {
			info->pipe_mask &= ~BIT(PIPE_A);
			info->cpu_transcoder_mask &= ~BIT(TRANSCODER_A);
		}
		if (dfsm & SKL_DFSM_PIPE_B_DISABLE) {
			info->pipe_mask &= ~BIT(PIPE_B);
			info->cpu_transcoder_mask &= ~BIT(TRANSCODER_B);
		}
		if (dfsm & SKL_DFSM_PIPE_C_DISABLE) {
			info->pipe_mask &= ~BIT(PIPE_C);
			info->cpu_transcoder_mask &= ~BIT(TRANSCODER_C);
		}
		if (INTEL_GEN(dev_priv) >= 12 &&
		    (dfsm & TGL_DFSM_PIPE_D_DISABLE)) {
			info->pipe_mask &= ~BIT(PIPE_D);
			info->cpu_transcoder_mask &= ~BIT(TRANSCODER_D);
		}
1015 1016 1017

		if (dfsm & SKL_DFSM_DISPLAY_HDCP_DISABLE)
			info->display.has_hdcp = 0;
1018 1019 1020

		if (dfsm & SKL_DFSM_DISPLAY_PM_DISABLE)
			info->display.has_fbc = 0;
1021 1022 1023

		if (INTEL_GEN(dev_priv) >= 11 && (dfsm & ICL_DFSM_DMC_DISABLE))
			info->display.has_csr = 0;
1024 1025 1026 1027

		if (INTEL_GEN(dev_priv) >= 10 &&
		    (dfsm & CNL_DFSM_DISPLAY_DSC_DISABLE))
			info->display.has_dsc = 0;
1028 1029 1030
	}

	/* Initialize slice/subslice/EU info */
1031
	if (IS_HASWELL(dev_priv))
1032
		hsw_sseu_info_init(dev_priv);
1033
	else if (IS_CHERRYVIEW(dev_priv))
1034 1035
		cherryview_sseu_info_init(dev_priv);
	else if (IS_BROADWELL(dev_priv))
1036
		bdw_sseu_info_init(dev_priv);
1037
	else if (IS_GEN(dev_priv, 9))
1038
		gen9_sseu_info_init(dev_priv);
1039
	else if (IS_GEN(dev_priv, 10))
1040
		gen10_sseu_info_init(dev_priv);
1041
	else if (IS_GEN(dev_priv, 11))
1042
		gen11_sseu_info_init(dev_priv);
1043 1044
	else if (INTEL_GEN(dev_priv) >= 12)
		gen12_sseu_info_init(dev_priv);
1045

1046
	if (IS_GEN(dev_priv, 6) && intel_vtd_active()) {
1047 1048
		drm_info(&dev_priv->drm,
			 "Disabling ppGTT for VT-d support\n");
1049
		info->ppgtt_type = INTEL_PPGTT_NONE;
1050 1051
	}

1052 1053 1054
	runtime->rawclk_freq = intel_read_rawclk(dev_priv);
	drm_dbg(&dev_priv->drm, "rawclk rate: %d kHz\n", runtime->rawclk_freq);

1055
	/* Initialize command stream timestamp frequency */
1056
	runtime->cs_timestamp_frequency_hz =
1057
		read_timestamp_frequency(dev_priv);
1058
	if (runtime->cs_timestamp_frequency_hz) {
1059
		runtime->cs_timestamp_period_ns =
1060
			i915_cs_timestamp_ticks_to_ns(dev_priv, 1);
1061 1062 1063 1064 1065 1066
		drm_dbg(&dev_priv->drm,
			"CS timestamp wraparound in %lldms\n",
			div_u64(mul_u32_u32(runtime->cs_timestamp_period_ns,
					    S32_MAX),
				USEC_PER_SEC));
	}
1067
}
1068 1069 1070 1071

void intel_driver_caps_print(const struct intel_driver_caps *caps,
			     struct drm_printer *p)
{
1072 1073
	drm_printf(p, "Has logical contexts? %s\n",
		   yesno(caps->has_logical_contexts));
1074 1075
	drm_printf(p, "scheduler: %x\n", caps->scheduler);
}
1076 1077 1078 1079 1080 1081 1082 1083 1084 1085

/*
 * Determine which engines are fused off in our particular hardware. Since the
 * fuse register is in the blitter powerwell, we need forcewake to be ready at
 * this point (but later we need to prune the forcewake domains for engines that
 * are indeed fused off).
 */
void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
{
	struct intel_device_info *info = mkwrite_device_info(dev_priv);
1086
	unsigned int logical_vdbox = 0;
1087
	unsigned int i;
1088
	u32 media_fuse;
1089 1090
	u16 vdbox_mask;
	u16 vebox_mask;
1091 1092 1093 1094

	if (INTEL_GEN(dev_priv) < 11)
		return;

1095
	media_fuse = ~I915_READ(GEN11_GT_VEBOX_VDBOX_DISABLE);
1096

1097 1098 1099
	vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
	vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
		      GEN11_GT_VEBOX_DISABLE_SHIFT;
1100 1101

	for (i = 0; i < I915_MAX_VCS; i++) {
1102 1103
		if (!HAS_ENGINE(dev_priv, _VCS(i))) {
			vdbox_mask &= ~BIT(i);
1104
			continue;
1105
		}
1106

1107
		if (!(BIT(i) & vdbox_mask)) {
1108
			info->engine_mask &= ~BIT(_VCS(i));
1109
			drm_dbg(&dev_priv->drm, "vcs%u fused off\n", i);
1110
			continue;
1111
		}
1112 1113 1114 1115

		/*
		 * In Gen11, only even numbered logical VDBOXes are
		 * hooked up to an SFC (Scaler & Format Converter) unit.
1116
		 * In TGL each VDBOX has access to an SFC.
1117
		 */
1118
		if (INTEL_GEN(dev_priv) >= 12 || logical_vdbox++ % 2 == 0)
1119
			RUNTIME_INFO(dev_priv)->vdbox_sfc_access |= BIT(i);
1120
	}
1121 1122
	drm_dbg(&dev_priv->drm, "vdbox enable: %04x, instances: %04lx\n",
		vdbox_mask, VDBOX_MASK(dev_priv));
1123
	GEM_BUG_ON(vdbox_mask != VDBOX_MASK(dev_priv));
1124 1125

	for (i = 0; i < I915_MAX_VECS; i++) {
1126 1127
		if (!HAS_ENGINE(dev_priv, _VECS(i))) {
			vebox_mask &= ~BIT(i);
1128
			continue;
1129
		}
1130

1131
		if (!(BIT(i) & vebox_mask)) {
1132
			info->engine_mask &= ~BIT(_VECS(i));
1133
			drm_dbg(&dev_priv->drm, "vecs%u fused off\n", i);
1134
		}
1135
	}
1136 1137
	drm_dbg(&dev_priv->drm, "vebox enable: %04x, instances: %04lx\n",
		vebox_mask, VEBOX_MASK(dev_priv));
1138
	GEM_BUG_ON(vebox_mask != VEBOX_MASK(dev_priv));
1139
}