intel_device_info.c 29.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 * Copyright © 2016 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 */

25 26
#include <drm/drm_print.h>

27
#include "intel_device_info.h"
28 29
#include "i915_drv.h"

30 31 32 33 34 35 36 37 38 39 40 41
#define PLATFORM_NAME(x) [INTEL_##x] = #x
static const char * const platform_names[] = {
	PLATFORM_NAME(I830),
	PLATFORM_NAME(I845G),
	PLATFORM_NAME(I85X),
	PLATFORM_NAME(I865G),
	PLATFORM_NAME(I915G),
	PLATFORM_NAME(I915GM),
	PLATFORM_NAME(I945G),
	PLATFORM_NAME(I945GM),
	PLATFORM_NAME(G33),
	PLATFORM_NAME(PINEVIEW),
42 43
	PLATFORM_NAME(I965G),
	PLATFORM_NAME(I965GM),
44 45
	PLATFORM_NAME(G45),
	PLATFORM_NAME(GM45),
46 47 48 49 50 51 52 53 54 55 56
	PLATFORM_NAME(IRONLAKE),
	PLATFORM_NAME(SANDYBRIDGE),
	PLATFORM_NAME(IVYBRIDGE),
	PLATFORM_NAME(VALLEYVIEW),
	PLATFORM_NAME(HASWELL),
	PLATFORM_NAME(BROADWELL),
	PLATFORM_NAME(CHERRYVIEW),
	PLATFORM_NAME(SKYLAKE),
	PLATFORM_NAME(BROXTON),
	PLATFORM_NAME(KABYLAKE),
	PLATFORM_NAME(GEMINILAKE),
57
	PLATFORM_NAME(COFFEELAKE),
58
	PLATFORM_NAME(CANNONLAKE),
59
	PLATFORM_NAME(ICELAKE),
60
	PLATFORM_NAME(ELKHARTLAKE),
61
	PLATFORM_NAME(TIGERLAKE),
62 63 64 65 66
};
#undef PLATFORM_NAME

const char *intel_platform_name(enum intel_platform platform)
{
67 68
	BUILD_BUG_ON(ARRAY_SIZE(platform_names) != INTEL_MAX_PLATFORMS);

69 70 71 72 73 74 75
	if (WARN_ON_ONCE(platform >= ARRAY_SIZE(platform_names) ||
			 platform_names[platform] == NULL))
		return "<unknown>";

	return platform_names[platform];
}

76 77 78 79 80 81
void intel_device_info_dump_flags(const struct intel_device_info *info,
				  struct drm_printer *p)
{
#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->name));
	DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
#undef PRINT_FLAG
82 83 84 85

#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->display.name));
	DEV_INFO_DISPLAY_FOR_EACH_FLAG(PRINT_FLAG);
#undef PRINT_FLAG
86 87
}

88 89
static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p)
{
90 91
	int s;

92 93
	drm_printf(p, "slice total: %u, mask=%04x\n",
		   hweight8(sseu->slice_mask), sseu->slice_mask);
94
	drm_printf(p, "subslice total: %u\n", intel_sseu_subslice_total(sseu));
95
	for (s = 0; s < sseu->max_slices; s++) {
S
Stuart Summers 已提交
96
		drm_printf(p, "slice%d: %u subslices, mask=%08x\n",
97
			   s, intel_sseu_subslices_per_slice(sseu, s),
S
Stuart Summers 已提交
98
			   intel_sseu_get_subslices(sseu, s));
99
	}
100 101 102 103 104 105 106 107 108
	drm_printf(p, "EU total: %u\n", sseu->eu_total);
	drm_printf(p, "EU per subslice: %u\n", sseu->eu_per_subslice);
	drm_printf(p, "has slice power gating: %s\n",
		   yesno(sseu->has_slice_pg));
	drm_printf(p, "has subslice power gating: %s\n",
		   yesno(sseu->has_subslice_pg));
	drm_printf(p, "has EU power gating: %s\n", yesno(sseu->has_eu_pg));
}

109
void intel_device_info_dump_runtime(const struct intel_runtime_info *info,
110 111 112 113 114 115 116 117
				    struct drm_printer *p)
{
	sseu_dump(&info->sseu, p);

	drm_printf(p, "CS timestamp frequency: %u kHz\n",
		   info->cs_timestamp_frequency_khz);
}

118 119 120
static int sseu_eu_idx(const struct sseu_dev_info *sseu, int slice,
		       int subslice)
{
121
	int slice_stride = sseu->max_subslices * sseu->eu_stride;
122

123
	return slice * slice_stride + subslice * sseu->eu_stride;
124 125 126 127 128 129 130 131
}

static u16 sseu_get_eus(const struct sseu_dev_info *sseu, int slice,
			int subslice)
{
	int i, offset = sseu_eu_idx(sseu, slice, subslice);
	u16 eu_mask = 0;

132
	for (i = 0; i < sseu->eu_stride; i++) {
133 134 135 136 137 138 139 140 141 142 143 144
		eu_mask |= ((u16)sseu->eu_mask[offset + i]) <<
			(i * BITS_PER_BYTE);
	}

	return eu_mask;
}

static void sseu_set_eus(struct sseu_dev_info *sseu, int slice, int subslice,
			 u16 eu_mask)
{
	int i, offset = sseu_eu_idx(sseu, slice, subslice);

145
	for (i = 0; i < sseu->eu_stride; i++) {
146 147 148 149 150
		sseu->eu_mask[offset + i] =
			(eu_mask >> (BITS_PER_BYTE * i)) & 0xff;
	}
}

151 152 153 154 155 156 157 158 159 160 161
void intel_device_info_dump_topology(const struct sseu_dev_info *sseu,
				     struct drm_printer *p)
{
	int s, ss;

	if (sseu->max_slices == 0) {
		drm_printf(p, "Unavailable\n");
		return;
	}

	for (s = 0; s < sseu->max_slices; s++) {
S
Stuart Summers 已提交
162
		drm_printf(p, "slice%d: %u subslice(s) (0x%08x):\n",
163
			   s, intel_sseu_subslices_per_slice(sseu, s),
S
Stuart Summers 已提交
164
			   intel_sseu_get_subslices(sseu, s));
165 166 167 168 169 170 171 172 173 174

		for (ss = 0; ss < sseu->max_subslices; ss++) {
			u16 enabled_eus = sseu_get_eus(sseu, s, ss);

			drm_printf(p, "\tsubslice%d: %u EUs (0x%hx)\n",
				   ss, hweight16(enabled_eus), enabled_eus);
		}
	}
}

175 176 177 178 179 180 181 182 183 184
static u16 compute_eu_total(const struct sseu_dev_info *sseu)
{
	u16 i, total = 0;

	for (i = 0; i < ARRAY_SIZE(sseu->eu_mask); i++)
		total += hweight8(sseu->eu_mask[i]);

	return total;
}

185 186
static void gen11_sseu_info_init(struct drm_i915_private *dev_priv)
{
187
	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
188 189 190 191 192
	u8 s_en;
	u32 ss_en, ss_en_mask;
	u8 eu_en;
	int s;

193 194 195 196
	if (IS_ELKHARTLAKE(dev_priv))
		intel_sseu_set_info(sseu, 1, 4, 8);
	else
		intel_sseu_set_info(sseu, 1, 8, 8);
197 198 199 200 201 202 203 204

	s_en = I915_READ(GEN11_GT_SLICE_ENABLE) & GEN11_GT_S_ENA_MASK;
	ss_en = ~I915_READ(GEN11_GT_SUBSLICE_DISABLE);
	ss_en_mask = BIT(sseu->max_subslices) - 1;
	eu_en = ~(I915_READ(GEN11_EU_DISABLE) & GEN11_EU_DIS_MASK);

	for (s = 0; s < sseu->max_slices; s++) {
		if (s_en & BIT(s)) {
205
			int ss_idx = sseu->max_subslices * s;
206 207 208
			int ss;

			sseu->slice_mask |= BIT(s);
209 210 211 212

			intel_sseu_set_subslices(sseu, s, (ss_en >> ss_idx) &
							  ss_en_mask);

213 214
			for (ss = 0; ss < sseu->max_subslices; ss++)
				if (intel_sseu_has_subslice(sseu, s, ss))
215 216 217 218 219 220 221 222 223 224 225 226
					sseu_set_eus(sseu, s, ss, eu_en);
		}
	}
	sseu->eu_per_subslice = hweight8(eu_en);
	sseu->eu_total = compute_eu_total(sseu);

	/* ICL has no power gating restrictions. */
	sseu->has_slice_pg = 1;
	sseu->has_subslice_pg = 1;
	sseu->has_eu_pg = 1;
}

227 228
static void gen10_sseu_info_init(struct drm_i915_private *dev_priv)
{
229
	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
230
	const u32 fuse2 = I915_READ(GEN8_FUSE2);
231 232 233
	int s, ss;
	const int eu_mask = 0xff;
	u32 subslice_mask, eu_en;
234

235 236
	intel_sseu_set_info(sseu, 6, 4, 8);

237 238
	sseu->slice_mask = (fuse2 & GEN10_F2_S_ENA_MASK) >>
			    GEN10_F2_S_ENA_SHIFT;
239

240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262
	/* Slice0 */
	eu_en = ~I915_READ(GEN8_EU_DISABLE0);
	for (ss = 0; ss < sseu->max_subslices; ss++)
		sseu_set_eus(sseu, 0, ss, (eu_en >> (8 * ss)) & eu_mask);
	/* Slice1 */
	sseu_set_eus(sseu, 1, 0, (eu_en >> 24) & eu_mask);
	eu_en = ~I915_READ(GEN8_EU_DISABLE1);
	sseu_set_eus(sseu, 1, 1, eu_en & eu_mask);
	/* Slice2 */
	sseu_set_eus(sseu, 2, 0, (eu_en >> 8) & eu_mask);
	sseu_set_eus(sseu, 2, 1, (eu_en >> 16) & eu_mask);
	/* Slice3 */
	sseu_set_eus(sseu, 3, 0, (eu_en >> 24) & eu_mask);
	eu_en = ~I915_READ(GEN8_EU_DISABLE2);
	sseu_set_eus(sseu, 3, 1, eu_en & eu_mask);
	/* Slice4 */
	sseu_set_eus(sseu, 4, 0, (eu_en >> 8) & eu_mask);
	sseu_set_eus(sseu, 4, 1, (eu_en >> 16) & eu_mask);
	/* Slice5 */
	sseu_set_eus(sseu, 5, 0, (eu_en >> 24) & eu_mask);
	eu_en = ~I915_READ(GEN10_EU_DISABLE3);
	sseu_set_eus(sseu, 5, 1, eu_en & eu_mask);

263 264 265 266
	subslice_mask = (1 << 4) - 1;
	subslice_mask &= ~((fuse2 & GEN10_F2_SS_DIS_MASK) >>
			   GEN10_F2_SS_DIS_SHIFT);

267
	for (s = 0; s < sseu->max_slices; s++) {
268 269
		u32 subslice_mask_with_eus = subslice_mask;

270 271
		for (ss = 0; ss < sseu->max_subslices; ss++) {
			if (sseu_get_eus(sseu, s, ss) == 0)
272
				subslice_mask_with_eus &= ~BIT(ss);
273
		}
274 275 276 277 278

		/*
		 * Slice0 can have up to 3 subslices, but there are only 2 in
		 * slice1/2.
		 */
279 280 281
		intel_sseu_set_subslices(sseu, s, s == 0 ?
						  subslice_mask_with_eus :
						  subslice_mask_with_eus & 0x3);
282 283 284
	}

	sseu->eu_total = compute_eu_total(sseu);
285 286 287 288 289 290 291

	/*
	 * CNL is expected to always have a uniform distribution
	 * of EU across subslices with the exception that any one
	 * EU in any one subslice may be fused off for die
	 * recovery.
	 */
292
	sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
293
				DIV_ROUND_UP(sseu->eu_total,
294 295
					     intel_sseu_subslice_total(sseu)) :
				0;
296 297 298 299 300 301 302

	/* No restrictions on Power Gating */
	sseu->has_slice_pg = 1;
	sseu->has_subslice_pg = 1;
	sseu->has_eu_pg = 1;
}

303 304
static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
{
305
	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
306
	u32 fuse;
307
	u8 subslice_mask = 0;
308 309 310

	fuse = I915_READ(CHV_FUSE_GT);

311
	sseu->slice_mask = BIT(0);
312
	intel_sseu_set_info(sseu, 1, 2, 8);
313 314

	if (!(fuse & CHV_FGT_DISABLE_SS0)) {
315 316 317 318 319 320
		u8 disabled_mask =
			((fuse & CHV_FGT_EU_DIS_SS0_R0_MASK) >>
			 CHV_FGT_EU_DIS_SS0_R0_SHIFT) |
			(((fuse & CHV_FGT_EU_DIS_SS0_R1_MASK) >>
			  CHV_FGT_EU_DIS_SS0_R1_SHIFT) << 4);

321
		subslice_mask |= BIT(0);
322
		sseu_set_eus(sseu, 0, 0, ~disabled_mask);
323 324 325
	}

	if (!(fuse & CHV_FGT_DISABLE_SS1)) {
326 327 328 329 330 331
		u8 disabled_mask =
			((fuse & CHV_FGT_EU_DIS_SS1_R0_MASK) >>
			 CHV_FGT_EU_DIS_SS1_R0_SHIFT) |
			(((fuse & CHV_FGT_EU_DIS_SS1_R1_MASK) >>
			  CHV_FGT_EU_DIS_SS1_R1_SHIFT) << 4);

332
		subslice_mask |= BIT(1);
333
		sseu_set_eus(sseu, 0, 1, ~disabled_mask);
334 335
	}

336
	intel_sseu_set_subslices(sseu, 0, subslice_mask);
337

338 339
	sseu->eu_total = compute_eu_total(sseu);

340 341 342 343
	/*
	 * CHV expected to always have a uniform distribution of EU
	 * across subslices.
	*/
344 345 346
	sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
				sseu->eu_total /
					intel_sseu_subslice_total(sseu) :
347 348 349 350 351 352
				0;
	/*
	 * CHV supports subslice power gating on devices with more than
	 * one subslice, and supports EU power gating on devices with
	 * more than one EU pair per subslice.
	*/
353
	sseu->has_slice_pg = 0;
354
	sseu->has_subslice_pg = intel_sseu_subslice_total(sseu) > 1;
355
	sseu->has_eu_pg = (sseu->eu_per_subslice > 2);
356 357 358 359 360
}

static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
{
	struct intel_device_info *info = mkwrite_device_info(dev_priv);
361
	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
362
	int s, ss;
363 364
	u32 fuse2, eu_disable, subslice_mask;
	const u8 eu_mask = 0xff;
365 366

	fuse2 = I915_READ(GEN8_FUSE2);
367
	sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
368

369
	/* BXT has a single slice and at most 3 subslices. */
370 371
	intel_sseu_set_info(sseu, IS_GEN9_LP(dev_priv) ? 1 : 3,
			    IS_GEN9_LP(dev_priv) ? 3 : 4, 8);
372

373 374 375 376
	/*
	 * The subslice disable field is global, i.e. it applies
	 * to each of the enabled slices.
	*/
377 378 379
	subslice_mask = (1 << sseu->max_subslices) - 1;
	subslice_mask &= ~((fuse2 & GEN9_F2_SS_DIS_MASK) >>
			   GEN9_F2_SS_DIS_SHIFT);
380 381 382 383 384

	/*
	 * Iterate through enabled slices and subslices to
	 * count the total enabled EU.
	*/
385
	for (s = 0; s < sseu->max_slices; s++) {
386
		if (!(sseu->slice_mask & BIT(s)))
387 388 389
			/* skip disabled slice */
			continue;

390
		intel_sseu_set_subslices(sseu, s, subslice_mask);
391

392
		eu_disable = I915_READ(GEN9_EU_DISABLE(s));
393
		for (ss = 0; ss < sseu->max_subslices; ss++) {
394
			int eu_per_ss;
395
			u8 eu_disabled_mask;
396

397
			if (!intel_sseu_has_subslice(sseu, s, ss))
398 399 400
				/* skip disabled subslice */
				continue;

401
			eu_disabled_mask = (eu_disable >> (ss * 8)) & eu_mask;
402 403 404 405 406

			sseu_set_eus(sseu, s, ss, ~eu_disabled_mask);

			eu_per_ss = sseu->max_eus_per_subslice -
				hweight8(eu_disabled_mask);
407 408 409 410 411 412 413

			/*
			 * Record which subslice(s) has(have) 7 EUs. we
			 * can tune the hash used to spread work among
			 * subslices if they are unbalanced.
			 */
			if (eu_per_ss == 7)
414
				sseu->subslice_7eu[s] |= BIT(ss);
415 416 417
		}
	}

418 419
	sseu->eu_total = compute_eu_total(sseu);

420 421 422 423 424 425 426
	/*
	 * SKL is expected to always have a uniform distribution
	 * of EU across subslices with the exception that any one
	 * EU in any one subslice may be fused off for die
	 * recovery. BXT is expected to be perfectly uniform in EU
	 * distribution.
	*/
427
	sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
428
				DIV_ROUND_UP(sseu->eu_total,
429 430
					     intel_sseu_subslice_total(sseu)) :
				0;
431
	/*
432
	 * SKL+ supports slice power gating on devices with more than
433
	 * one slice, and supports EU power gating on devices with
434
	 * more than one EU pair per subslice. BXT+ supports subslice
435 436 437 438
	 * power gating on devices with more than one subslice, and
	 * supports EU power gating on devices with more than one EU
	 * pair per subslice.
	*/
439
	sseu->has_slice_pg =
440
		!IS_GEN9_LP(dev_priv) && hweight8(sseu->slice_mask) > 1;
441
	sseu->has_subslice_pg =
442
		IS_GEN9_LP(dev_priv) && intel_sseu_subslice_total(sseu) > 1;
443
	sseu->has_eu_pg = sseu->eu_per_subslice > 2;
444

445
	if (IS_GEN9_LP(dev_priv)) {
446 447
#define IS_SS_DISABLED(ss)	(!(sseu->subslice_mask[0] & BIT(ss)))
		info->has_pooled_eu = hweight8(sseu->subslice_mask[0]) == 3;
448

449
		sseu->min_eu_in_pool = 0;
450
		if (info->has_pooled_eu) {
451
			if (IS_SS_DISABLED(2) || IS_SS_DISABLED(0))
452
				sseu->min_eu_in_pool = 3;
453
			else if (IS_SS_DISABLED(1))
454
				sseu->min_eu_in_pool = 6;
455
			else
456
				sseu->min_eu_in_pool = 9;
457 458 459 460 461 462 463
		}
#undef IS_SS_DISABLED
	}
}

static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
{
464
	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
465
	int s, ss;
466
	u32 fuse2, subslice_mask, eu_disable[3]; /* s_max */
467 468

	fuse2 = I915_READ(GEN8_FUSE2);
469
	sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
470
	intel_sseu_set_info(sseu, 3, 3, 8);
471

472 473 474 475
	/*
	 * The subslice disable field is global, i.e. it applies
	 * to each of the enabled slices.
	 */
476 477 478
	subslice_mask = GENMASK(sseu->max_subslices - 1, 0);
	subslice_mask &= ~((fuse2 & GEN8_F2_SS_DIS_MASK) >>
			   GEN8_F2_SS_DIS_SHIFT);
479 480 481 482 483 484 485 486 487 488 489 490 491

	eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK;
	eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) |
			((I915_READ(GEN8_EU_DISABLE1) & GEN8_EU_DIS1_S1_MASK) <<
			 (32 - GEN8_EU_DIS0_S1_SHIFT));
	eu_disable[2] = (I915_READ(GEN8_EU_DISABLE1) >> GEN8_EU_DIS1_S2_SHIFT) |
			((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) <<
			 (32 - GEN8_EU_DIS1_S2_SHIFT));

	/*
	 * Iterate through enabled slices and subslices to
	 * count the total enabled EU.
	 */
492
	for (s = 0; s < sseu->max_slices; s++) {
493
		if (!(sseu->slice_mask & BIT(s)))
494 495 496
			/* skip disabled slice */
			continue;

497
		intel_sseu_set_subslices(sseu, s, subslice_mask);
498 499 500

		for (ss = 0; ss < sseu->max_subslices; ss++) {
			u8 eu_disabled_mask;
501 502
			u32 n_disabled;

503
			if (!intel_sseu_has_subslice(sseu, s, ss))
504 505 506
				/* skip disabled subslice */
				continue;

507
			eu_disabled_mask =
508
				eu_disable[s] >> (ss * sseu->max_eus_per_subslice);
509 510 511 512

			sseu_set_eus(sseu, s, ss, ~eu_disabled_mask);

			n_disabled = hweight8(eu_disabled_mask);
513 514 515 516

			/*
			 * Record which subslices have 7 EUs.
			 */
517
			if (sseu->max_eus_per_subslice - n_disabled == 7)
518
				sseu->subslice_7eu[s] |= 1 << ss;
519 520 521
		}
	}

522 523
	sseu->eu_total = compute_eu_total(sseu);

524 525 526 527 528
	/*
	 * BDW is expected to always have a uniform distribution of EU across
	 * subslices with the exception that any one EU in any one subslice may
	 * be fused off for die recovery.
	 */
529
	sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
530
				DIV_ROUND_UP(sseu->eu_total,
531 532
					     intel_sseu_subslice_total(sseu)) :
				0;
533 534 535 536 537

	/*
	 * BDW supports slice power gating on devices with more than
	 * one slice.
	 */
538
	sseu->has_slice_pg = hweight8(sseu->slice_mask) > 1;
539 540
	sseu->has_subslice_pg = 0;
	sseu->has_eu_pg = 0;
541 542
}

543 544
static void haswell_sseu_info_init(struct drm_i915_private *dev_priv)
{
545
	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
546
	u32 fuse1;
547
	u8 subslice_mask = 0;
548
	int s, ss;
549 550 551 552 553

	/*
	 * There isn't a register to tell us how many slices/subslices. We
	 * work off the PCI-ids here.
	 */
554
	switch (INTEL_INFO(dev_priv)->gt) {
555
	default:
556
		MISSING_CASE(INTEL_INFO(dev_priv)->gt);
557 558 559
		/* fall through */
	case 1:
		sseu->slice_mask = BIT(0);
560
		subslice_mask = BIT(0);
561 562 563
		break;
	case 2:
		sseu->slice_mask = BIT(0);
564
		subslice_mask = BIT(0) | BIT(1);
565 566 567
		break;
	case 3:
		sseu->slice_mask = BIT(0) | BIT(1);
568
		subslice_mask = BIT(0) | BIT(1);
569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587
		break;
	}

	fuse1 = I915_READ(HSW_PAVP_FUSE1);
	switch ((fuse1 & HSW_F1_EU_DIS_MASK) >> HSW_F1_EU_DIS_SHIFT) {
	default:
		MISSING_CASE((fuse1 & HSW_F1_EU_DIS_MASK) >>
			     HSW_F1_EU_DIS_SHIFT);
		/* fall through */
	case HSW_F1_EU_DIS_10EUS:
		sseu->eu_per_subslice = 10;
		break;
	case HSW_F1_EU_DIS_8EUS:
		sseu->eu_per_subslice = 8;
		break;
	case HSW_F1_EU_DIS_6EUS:
		sseu->eu_per_subslice = 6;
		break;
	}
588 589

	intel_sseu_set_info(sseu, hweight8(sseu->slice_mask),
590
			    hweight8(subslice_mask),
591
			    sseu->eu_per_subslice);
592 593

	for (s = 0; s < sseu->max_slices; s++) {
594
		intel_sseu_set_subslices(sseu, s, subslice_mask);
595

596 597 598 599 600
		for (ss = 0; ss < sseu->max_subslices; ss++) {
			sseu_set_eus(sseu, s, ss,
				     (1UL << sseu->eu_per_subslice) - 1);
		}
	}
601

602
	sseu->eu_total = compute_eu_total(sseu);
603 604 605 606 607 608 609

	/* No powergating for you. */
	sseu->has_slice_pg = 0;
	sseu->has_subslice_pg = 0;
	sseu->has_eu_pg = 0;
}

L
Lionel Landwerlin 已提交
610
static u32 read_reference_ts_freq(struct drm_i915_private *dev_priv)
611 612
{
	u32 ts_override = I915_READ(GEN9_TIMESTAMP_OVERRIDE);
L
Lionel Landwerlin 已提交
613
	u32 base_freq, frac_freq;
614 615 616

	base_freq = ((ts_override & GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK) >>
		     GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_SHIFT) + 1;
L
Lionel Landwerlin 已提交
617
	base_freq *= 1000;
618 619 620 621

	frac_freq = ((ts_override &
		      GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_MASK) >>
		     GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_SHIFT);
L
Lionel Landwerlin 已提交
622
	frac_freq = 1000 / (frac_freq + 1);
623 624 625 626

	return base_freq + frac_freq;
}

627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672
static u32 gen10_get_crystal_clock_freq(struct drm_i915_private *dev_priv,
					u32 rpm_config_reg)
{
	u32 f19_2_mhz = 19200;
	u32 f24_mhz = 24000;
	u32 crystal_clock = (rpm_config_reg &
			     GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
			    GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;

	switch (crystal_clock) {
	case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
		return f19_2_mhz;
	case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
		return f24_mhz;
	default:
		MISSING_CASE(crystal_clock);
		return 0;
	}
}

static u32 gen11_get_crystal_clock_freq(struct drm_i915_private *dev_priv,
					u32 rpm_config_reg)
{
	u32 f19_2_mhz = 19200;
	u32 f24_mhz = 24000;
	u32 f25_mhz = 25000;
	u32 f38_4_mhz = 38400;
	u32 crystal_clock = (rpm_config_reg &
			     GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
			    GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;

	switch (crystal_clock) {
	case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
		return f24_mhz;
	case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
		return f19_2_mhz;
	case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_38_4_MHZ:
		return f38_4_mhz;
	case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ:
		return f25_mhz;
	default:
		MISSING_CASE(crystal_clock);
		return 0;
	}
}

L
Lionel Landwerlin 已提交
673
static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
674
{
L
Lionel Landwerlin 已提交
675 676 677
	u32 f12_5_mhz = 12500;
	u32 f19_2_mhz = 19200;
	u32 f24_mhz = 24000;
678 679 680 681 682 683 684 685

	if (INTEL_GEN(dev_priv) <= 4) {
		/* PRMs say:
		 *
		 *     "The value in this register increments once every 16
		 *      hclks." (through the “Clocking Configuration”
		 *      (“CLKCFG”) MCHBAR register)
		 */
L
Lionel Landwerlin 已提交
686
		return dev_priv->rawclk_freq / 16;
687 688 689 690 691 692 693 694 695 696
	} else if (INTEL_GEN(dev_priv) <= 8) {
		/* PRMs say:
		 *
		 *     "The PCU TSC counts 10ns increments; this timestamp
		 *      reflects bits 38:3 of the TSC (i.e. 80ns granularity,
		 *      rolling over every 1.5 hours).
		 */
		return f12_5_mhz;
	} else if (INTEL_GEN(dev_priv) <= 9) {
		u32 ctc_reg = I915_READ(CTC_MODE);
L
Lionel Landwerlin 已提交
697
		u32 freq = 0;
698 699 700 701 702 703 704 705 706 707 708 709 710 711 712

		if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
			freq = read_reference_ts_freq(dev_priv);
		} else {
			freq = IS_GEN9_LP(dev_priv) ? f19_2_mhz : f24_mhz;

			/* Now figure out how the command stream's timestamp
			 * register increments from this frequency (it might
			 * increment only every few clock cycle).
			 */
			freq >>= 3 - ((ctc_reg & CTC_SHIFT_PARAMETER_MASK) >>
				      CTC_SHIFT_PARAMETER_SHIFT);
		}

		return freq;
713
	} else if (INTEL_GEN(dev_priv) <= 12) {
714
		u32 ctc_reg = I915_READ(CTC_MODE);
L
Lionel Landwerlin 已提交
715
		u32 freq = 0;
716 717 718 719 720 721 722 723 724

		/* First figure out the reference frequency. There are 2 ways
		 * we can compute the frequency, either through the
		 * TIMESTAMP_OVERRIDE register or through RPM_CONFIG. CTC_MODE
		 * tells us which one we should use.
		 */
		if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
			freq = read_reference_ts_freq(dev_priv);
		} else {
725 726 727 728 729 730 731 732
			u32 rpm_config_reg = I915_READ(RPM_CONFIG0);

			if (INTEL_GEN(dev_priv) <= 10)
				freq = gen10_get_crystal_clock_freq(dev_priv,
								rpm_config_reg);
			else
				freq = gen11_get_crystal_clock_freq(dev_priv,
								rpm_config_reg);
733

734 735 736 737 738 739 740 741
			/* Now figure out how the command stream's timestamp
			 * register increments from this frequency (it might
			 * increment only every few clock cycle).
			 */
			freq >>= 3 - ((rpm_config_reg &
				       GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
				      GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT);
		}
742 743 744 745

		return freq;
	}

746
	MISSING_CASE("Unknown gen, unable to read command streamer timestamp frequency\n");
747 748 749
	return 0;
}

750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770
#undef INTEL_VGA_DEVICE
#define INTEL_VGA_DEVICE(id, info) (id)

static const u16 subplatform_ult_ids[] = {
	INTEL_HSW_ULT_GT1_IDS(0),
	INTEL_HSW_ULT_GT2_IDS(0),
	INTEL_HSW_ULT_GT3_IDS(0),
	INTEL_BDW_ULT_GT1_IDS(0),
	INTEL_BDW_ULT_GT2_IDS(0),
	INTEL_BDW_ULT_GT3_IDS(0),
	INTEL_BDW_ULT_RSVD_IDS(0),
	INTEL_SKL_ULT_GT1_IDS(0),
	INTEL_SKL_ULT_GT2_IDS(0),
	INTEL_SKL_ULT_GT3_IDS(0),
	INTEL_KBL_ULT_GT1_IDS(0),
	INTEL_KBL_ULT_GT2_IDS(0),
	INTEL_KBL_ULT_GT3_IDS(0),
	INTEL_CFL_U_GT2_IDS(0),
	INTEL_CFL_U_GT3_IDS(0),
	INTEL_WHL_U_GT1_IDS(0),
	INTEL_WHL_U_GT2_IDS(0),
771
	INTEL_WHL_U_GT3_IDS(0),
772 773 774 775 776 777 778 779 780 781 782 783
};

static const u16 subplatform_ulx_ids[] = {
	INTEL_HSW_ULX_GT1_IDS(0),
	INTEL_HSW_ULX_GT2_IDS(0),
	INTEL_BDW_ULX_GT1_IDS(0),
	INTEL_BDW_ULX_GT2_IDS(0),
	INTEL_BDW_ULX_GT3_IDS(0),
	INTEL_BDW_ULX_RSVD_IDS(0),
	INTEL_SKL_ULX_GT1_IDS(0),
	INTEL_SKL_ULX_GT2_IDS(0),
	INTEL_KBL_ULX_GT1_IDS(0),
784
	INTEL_KBL_ULX_GT2_IDS(0),
785
	INTEL_AML_KBL_GT2_IDS(0),
786
	INTEL_AML_CFL_GT2_IDS(0),
787 788 789 790
};

static const u16 subplatform_portf_ids[] = {
	INTEL_CNL_PORT_F_IDS(0),
791
	INTEL_ICL_PORT_F_IDS(0),
792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810
};

static bool find_devid(u16 id, const u16 *p, unsigned int num)
{
	for (; num; num--, p++) {
		if (*p == id)
			return true;
	}

	return false;
}

void intel_device_info_subplatform_init(struct drm_i915_private *i915)
{
	const struct intel_device_info *info = INTEL_INFO(i915);
	const struct intel_runtime_info *rinfo = RUNTIME_INFO(i915);
	const unsigned int pi = __platform_mask_index(rinfo, info->platform);
	const unsigned int pb = __platform_mask_bit(rinfo, info->platform);
	u16 devid = INTEL_DEVID(i915);
811
	u32 mask = 0;
812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836

	/* Make sure IS_<platform> checks are working. */
	RUNTIME_INFO(i915)->platform_mask[pi] = BIT(pb);

	/* Find and mark subplatform bits based on the PCI device id. */
	if (find_devid(devid, subplatform_ult_ids,
		       ARRAY_SIZE(subplatform_ult_ids))) {
		mask = BIT(INTEL_SUBPLATFORM_ULT);
	} else if (find_devid(devid, subplatform_ulx_ids,
			      ARRAY_SIZE(subplatform_ulx_ids))) {
		mask = BIT(INTEL_SUBPLATFORM_ULX);
		if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
			/* ULX machines are also considered ULT. */
			mask |= BIT(INTEL_SUBPLATFORM_ULT);
		}
	} else if (find_devid(devid, subplatform_portf_ids,
			      ARRAY_SIZE(subplatform_portf_ids))) {
		mask = BIT(INTEL_SUBPLATFORM_PORTF);
	}

	GEM_BUG_ON(mask & ~INTEL_SUBPLATFORM_BITS);

	RUNTIME_INFO(i915)->platform_mask[pi] |= mask;
}

837 838
/**
 * intel_device_info_runtime_init - initialize runtime info
839
 * @dev_priv: the i915 device
840
 *
841 842 843 844 845 846 847 848 849 850 851 852
 * Determine various intel_device_info fields at runtime.
 *
 * Use it when either:
 *   - it's judged too laborious to fill n static structures with the limit
 *     when a simple if statement does the job,
 *   - run-time checks (eg read fuse/strap registers) are needed.
 *
 * This function needs to be called:
 *   - after the MMIO has been setup as we are reading registers,
 *   - after the PCH has been detected,
 *   - before the first usage of the fields it can tweak.
 */
853
void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
854
{
855
	struct intel_device_info *info = mkwrite_device_info(dev_priv);
856
	struct intel_runtime_info *runtime = RUNTIME_INFO(dev_priv);
857 858
	enum pipe pipe;

859 860
	if (INTEL_GEN(dev_priv) >= 10) {
		for_each_pipe(dev_priv, pipe)
861
			runtime->num_scalers[pipe] = 2;
862
	} else if (IS_GEN(dev_priv, 9)) {
863 864 865
		runtime->num_scalers[PIPE_A] = 2;
		runtime->num_scalers[PIPE_B] = 2;
		runtime->num_scalers[PIPE_C] = 1;
866 867
	}

868
	BUILD_BUG_ON(BITS_PER_TYPE(intel_engine_mask_t) < I915_NUM_ENGINES);
869

870
	if (INTEL_GEN(dev_priv) >= 11)
871
		for_each_pipe(dev_priv, pipe)
872
			runtime->num_sprites[pipe] = 6;
873
	else if (IS_GEN(dev_priv, 10) || IS_GEMINILAKE(dev_priv))
874
		for_each_pipe(dev_priv, pipe)
875
			runtime->num_sprites[pipe] = 3;
876
	else if (IS_BROXTON(dev_priv)) {
877 878 879 880 881 882 883 884 885
		/*
		 * Skylake and Broxton currently don't expose the topmost plane as its
		 * use is exclusive with the legacy cursor and we only want to expose
		 * one of those, not both. Until we can safely expose the topmost plane
		 * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
		 * we don't expose the topmost plane at all to prevent ABI breakage
		 * down the line.
		 */

886 887 888
		runtime->num_sprites[PIPE_A] = 2;
		runtime->num_sprites[PIPE_B] = 2;
		runtime->num_sprites[PIPE_C] = 1;
889
	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
890
		for_each_pipe(dev_priv, pipe)
891
			runtime->num_sprites[pipe] = 2;
892
	} else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) {
893
		for_each_pipe(dev_priv, pipe)
894
			runtime->num_sprites[pipe] = 1;
895
	}
896

897
	if (i915_modparams.disable_display) {
898 899
		DRM_INFO("Display disabled (module parameter)\n");
		info->num_pipes = 0;
900
	} else if (HAS_DISPLAY(dev_priv) &&
901
		   (IS_GEN_RANGE(dev_priv, 7, 8)) &&
902 903 904 905 906 907 908 909 910 911 912 913 914 915 916
		   HAS_PCH_SPLIT(dev_priv)) {
		u32 fuse_strap = I915_READ(FUSE_STRAP);
		u32 sfuse_strap = I915_READ(SFUSE_STRAP);

		/*
		 * SFUSE_STRAP is supposed to have a bit signalling the display
		 * is fused off. Unfortunately it seems that, at least in
		 * certain cases, fused off display means that PCH display
		 * reads don't land anywhere. In that case, we read 0s.
		 *
		 * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK
		 * should be set when taking over after the firmware.
		 */
		if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
		    sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
917
		    (HAS_PCH_CPT(dev_priv) &&
918 919 920 921 922 923 924
		     !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
			DRM_INFO("Display fused off, disabling\n");
			info->num_pipes = 0;
		} else if (fuse_strap & IVB_PIPE_C_DISABLE) {
			DRM_INFO("PipeC fused off\n");
			info->num_pipes -= 1;
		}
925
	} else if (HAS_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 9) {
926
		u32 dfsm = I915_READ(SKL_DFSM);
927
		u8 enabled_mask = BIT(info->num_pipes) - 1;
928 929

		if (dfsm & SKL_DFSM_PIPE_A_DISABLE)
930
			enabled_mask &= ~BIT(PIPE_A);
931
		if (dfsm & SKL_DFSM_PIPE_B_DISABLE)
932
			enabled_mask &= ~BIT(PIPE_B);
933
		if (dfsm & SKL_DFSM_PIPE_C_DISABLE)
934
			enabled_mask &= ~BIT(PIPE_C);
935 936 937
		if (INTEL_GEN(dev_priv) >= 12 &&
		    (dfsm & TGL_DFSM_PIPE_D_DISABLE))
			enabled_mask &= ~BIT(PIPE_D);
938

939 940 941 942 943 944 945 946
		/*
		 * At least one pipe should be enabled and if there are
		 * disabled pipes, they should be the last ones, with no holes
		 * in the mask.
		 */
		if (enabled_mask == 0 || !is_power_of_2(enabled_mask + 1))
			DRM_ERROR("invalid pipe fuse configuration: enabled_mask=0x%x\n",
				  enabled_mask);
947
		else
948
			info->num_pipes = hweight8(enabled_mask);
949 950 951
	}

	/* Initialize slice/subslice/EU info */
952 953 954
	if (IS_HASWELL(dev_priv))
		haswell_sseu_info_init(dev_priv);
	else if (IS_CHERRYVIEW(dev_priv))
955 956 957
		cherryview_sseu_info_init(dev_priv);
	else if (IS_BROADWELL(dev_priv))
		broadwell_sseu_info_init(dev_priv);
958
	else if (IS_GEN(dev_priv, 9))
959
		gen9_sseu_info_init(dev_priv);
960
	else if (IS_GEN(dev_priv, 10))
961
		gen10_sseu_info_init(dev_priv);
962
	else if (INTEL_GEN(dev_priv) >= 11)
963
		gen11_sseu_info_init(dev_priv);
964

965
	if (IS_GEN(dev_priv, 6) && intel_vtd_active()) {
966
		DRM_INFO("Disabling ppGTT for VT-d support\n");
967
		info->ppgtt_type = INTEL_PPGTT_NONE;
968 969
	}

970
	/* Initialize command stream timestamp frequency */
971
	runtime->cs_timestamp_frequency_khz = read_timestamp_frequency(dev_priv);
972
}
973 974 975 976

void intel_driver_caps_print(const struct intel_driver_caps *caps,
			     struct drm_printer *p)
{
977 978
	drm_printf(p, "Has logical contexts? %s\n",
		   yesno(caps->has_logical_contexts));
979 980
	drm_printf(p, "scheduler: %x\n", caps->scheduler);
}
981 982 983 984 985 986 987 988 989 990

/*
 * Determine which engines are fused off in our particular hardware. Since the
 * fuse register is in the blitter powerwell, we need forcewake to be ready at
 * this point (but later we need to prune the forcewake domains for engines that
 * are indeed fused off).
 */
void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
{
	struct intel_device_info *info = mkwrite_device_info(dev_priv);
991
	unsigned int logical_vdbox = 0;
992
	unsigned int i;
993
	u32 media_fuse;
994 995
	u16 vdbox_mask;
	u16 vebox_mask;
996 997 998 999

	if (INTEL_GEN(dev_priv) < 11)
		return;

1000
	media_fuse = ~I915_READ(GEN11_GT_VEBOX_VDBOX_DISABLE);
1001

1002 1003 1004
	vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
	vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
		      GEN11_GT_VEBOX_DISABLE_SHIFT;
1005 1006 1007 1008 1009

	for (i = 0; i < I915_MAX_VCS; i++) {
		if (!HAS_ENGINE(dev_priv, _VCS(i)))
			continue;

1010
		if (!(BIT(i) & vdbox_mask)) {
1011
			info->engine_mask &= ~BIT(_VCS(i));
1012
			DRM_DEBUG_DRIVER("vcs%u fused off\n", i);
1013
			continue;
1014
		}
1015 1016 1017 1018

		/*
		 * In Gen11, only even numbered logical VDBOXes are
		 * hooked up to an SFC (Scaler & Format Converter) unit.
1019
		 * In TGL each VDBOX has access to an SFC.
1020
		 */
1021
		if (IS_TIGERLAKE(dev_priv) || logical_vdbox++ % 2 == 0)
1022
			RUNTIME_INFO(dev_priv)->vdbox_sfc_access |= BIT(i);
1023
	}
1024 1025 1026
	DRM_DEBUG_DRIVER("vdbox enable: %04x, instances: %04lx\n",
			 vdbox_mask, VDBOX_MASK(dev_priv));
	GEM_BUG_ON(vdbox_mask != VDBOX_MASK(dev_priv));
1027 1028 1029 1030 1031

	for (i = 0; i < I915_MAX_VECS; i++) {
		if (!HAS_ENGINE(dev_priv, _VECS(i)))
			continue;

1032
		if (!(BIT(i) & vebox_mask)) {
1033
			info->engine_mask &= ~BIT(_VECS(i));
1034 1035
			DRM_DEBUG_DRIVER("vecs%u fused off\n", i);
		}
1036
	}
1037 1038 1039
	DRM_DEBUG_DRIVER("vebox enable: %04x, instances: %04lx\n",
			 vebox_mask, VEBOX_MASK(dev_priv));
	GEM_BUG_ON(vebox_mask != VEBOX_MASK(dev_priv));
1040
}