intel_device_info.c 29.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 * Copyright © 2016 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 */

25 26
#include <drm/drm_print.h>

27
#include "intel_device_info.h"
28 29
#include "i915_drv.h"

30 31 32 33 34 35 36 37 38 39 40 41
#define PLATFORM_NAME(x) [INTEL_##x] = #x
static const char * const platform_names[] = {
	PLATFORM_NAME(I830),
	PLATFORM_NAME(I845G),
	PLATFORM_NAME(I85X),
	PLATFORM_NAME(I865G),
	PLATFORM_NAME(I915G),
	PLATFORM_NAME(I915GM),
	PLATFORM_NAME(I945G),
	PLATFORM_NAME(I945GM),
	PLATFORM_NAME(G33),
	PLATFORM_NAME(PINEVIEW),
42 43
	PLATFORM_NAME(I965G),
	PLATFORM_NAME(I965GM),
44 45
	PLATFORM_NAME(G45),
	PLATFORM_NAME(GM45),
46 47 48 49 50 51 52 53 54 55 56
	PLATFORM_NAME(IRONLAKE),
	PLATFORM_NAME(SANDYBRIDGE),
	PLATFORM_NAME(IVYBRIDGE),
	PLATFORM_NAME(VALLEYVIEW),
	PLATFORM_NAME(HASWELL),
	PLATFORM_NAME(BROADWELL),
	PLATFORM_NAME(CHERRYVIEW),
	PLATFORM_NAME(SKYLAKE),
	PLATFORM_NAME(BROXTON),
	PLATFORM_NAME(KABYLAKE),
	PLATFORM_NAME(GEMINILAKE),
57
	PLATFORM_NAME(COFFEELAKE),
58
	PLATFORM_NAME(CANNONLAKE),
59
	PLATFORM_NAME(ICELAKE),
60
	PLATFORM_NAME(ELKHARTLAKE),
61
	PLATFORM_NAME(TIGERLAKE),
62 63 64 65 66
};
#undef PLATFORM_NAME

const char *intel_platform_name(enum intel_platform platform)
{
67 68
	BUILD_BUG_ON(ARRAY_SIZE(platform_names) != INTEL_MAX_PLATFORMS);

69 70 71 72 73 74 75
	if (WARN_ON_ONCE(platform >= ARRAY_SIZE(platform_names) ||
			 platform_names[platform] == NULL))
		return "<unknown>";

	return platform_names[platform];
}

76 77 78 79 80 81
void intel_device_info_dump_flags(const struct intel_device_info *info,
				  struct drm_printer *p)
{
#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->name));
	DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
#undef PRINT_FLAG
82 83 84 85

#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->display.name));
	DEV_INFO_DISPLAY_FOR_EACH_FLAG(PRINT_FLAG);
#undef PRINT_FLAG
86 87
}

88 89
static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p)
{
90 91
	int s;

92 93
	drm_printf(p, "slice total: %u, mask=%04x\n",
		   hweight8(sseu->slice_mask), sseu->slice_mask);
94
	drm_printf(p, "subslice total: %u\n", intel_sseu_subslice_total(sseu));
95
	for (s = 0; s < sseu->max_slices; s++) {
96
		drm_printf(p, "slice%d: %u subslices, mask=%04x\n",
97
			   s, intel_sseu_subslices_per_slice(sseu, s),
98
			   sseu->subslice_mask[s]);
99
	}
100 101 102 103 104 105 106 107 108
	drm_printf(p, "EU total: %u\n", sseu->eu_total);
	drm_printf(p, "EU per subslice: %u\n", sseu->eu_per_subslice);
	drm_printf(p, "has slice power gating: %s\n",
		   yesno(sseu->has_slice_pg));
	drm_printf(p, "has subslice power gating: %s\n",
		   yesno(sseu->has_subslice_pg));
	drm_printf(p, "has EU power gating: %s\n", yesno(sseu->has_eu_pg));
}

109
void intel_device_info_dump_runtime(const struct intel_runtime_info *info,
110 111 112 113 114 115 116 117
				    struct drm_printer *p)
{
	sseu_dump(&info->sseu, p);

	drm_printf(p, "CS timestamp frequency: %u kHz\n",
		   info->cs_timestamp_frequency_khz);
}

118 119 120
static int sseu_eu_idx(const struct sseu_dev_info *sseu, int slice,
		       int subslice)
{
121
	int slice_stride = sseu->max_subslices * sseu->eu_stride;
122

123
	return slice * slice_stride + subslice * sseu->eu_stride;
124 125 126 127 128 129 130 131
}

static u16 sseu_get_eus(const struct sseu_dev_info *sseu, int slice,
			int subslice)
{
	int i, offset = sseu_eu_idx(sseu, slice, subslice);
	u16 eu_mask = 0;

132
	for (i = 0; i < sseu->eu_stride; i++) {
133 134 135 136 137 138 139 140 141 142 143 144
		eu_mask |= ((u16)sseu->eu_mask[offset + i]) <<
			(i * BITS_PER_BYTE);
	}

	return eu_mask;
}

static void sseu_set_eus(struct sseu_dev_info *sseu, int slice, int subslice,
			 u16 eu_mask)
{
	int i, offset = sseu_eu_idx(sseu, slice, subslice);

145
	for (i = 0; i < sseu->eu_stride; i++) {
146 147 148 149 150
		sseu->eu_mask[offset + i] =
			(eu_mask >> (BITS_PER_BYTE * i)) & 0xff;
	}
}

151 152 153 154 155 156 157 158 159 160 161
void intel_device_info_dump_topology(const struct sseu_dev_info *sseu,
				     struct drm_printer *p)
{
	int s, ss;

	if (sseu->max_slices == 0) {
		drm_printf(p, "Unavailable\n");
		return;
	}

	for (s = 0; s < sseu->max_slices; s++) {
162
		drm_printf(p, "slice%d: %u subslice(s) (0x%hhx):\n",
163
			   s, intel_sseu_subslices_per_slice(sseu, s),
164
			   sseu->subslice_mask[s]);
165 166 167 168 169 170 171 172 173 174

		for (ss = 0; ss < sseu->max_subslices; ss++) {
			u16 enabled_eus = sseu_get_eus(sseu, s, ss);

			drm_printf(p, "\tsubslice%d: %u EUs (0x%hx)\n",
				   ss, hweight16(enabled_eus), enabled_eus);
		}
	}
}

175 176 177 178 179 180 181 182 183 184
static u16 compute_eu_total(const struct sseu_dev_info *sseu)
{
	u16 i, total = 0;

	for (i = 0; i < ARRAY_SIZE(sseu->eu_mask); i++)
		total += hweight8(sseu->eu_mask[i]);

	return total;
}

185 186
static void gen11_sseu_info_init(struct drm_i915_private *dev_priv)
{
187
	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
188 189 190 191 192
	u8 s_en;
	u32 ss_en, ss_en_mask;
	u8 eu_en;
	int s;

193 194 195 196
	if (IS_ELKHARTLAKE(dev_priv))
		intel_sseu_set_info(sseu, 1, 4, 8);
	else
		intel_sseu_set_info(sseu, 1, 8, 8);
197 198 199 200 201 202 203 204

	s_en = I915_READ(GEN11_GT_SLICE_ENABLE) & GEN11_GT_S_ENA_MASK;
	ss_en = ~I915_READ(GEN11_GT_SUBSLICE_DISABLE);
	ss_en_mask = BIT(sseu->max_subslices) - 1;
	eu_en = ~(I915_READ(GEN11_EU_DISABLE) & GEN11_EU_DIS_MASK);

	for (s = 0; s < sseu->max_slices; s++) {
		if (s_en & BIT(s)) {
205
			int ss_idx = sseu->max_subslices * s;
206 207 208
			int ss;

			sseu->slice_mask |= BIT(s);
209 210 211
			sseu->subslice_mask[s] = (ss_en >> ss_idx) & ss_en_mask;
			for (ss = 0; ss < sseu->max_subslices; ss++) {
				if (sseu->subslice_mask[s] & BIT(ss))
212
					sseu_set_eus(sseu, s, ss, eu_en);
213
			}
214 215 216 217 218 219 220 221 222 223 224
		}
	}
	sseu->eu_per_subslice = hweight8(eu_en);
	sseu->eu_total = compute_eu_total(sseu);

	/* ICL has no power gating restrictions. */
	sseu->has_slice_pg = 1;
	sseu->has_subslice_pg = 1;
	sseu->has_eu_pg = 1;
}

225 226
static void gen10_sseu_info_init(struct drm_i915_private *dev_priv)
{
227
	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
228
	const u32 fuse2 = I915_READ(GEN8_FUSE2);
229 230 231
	int s, ss;
	const int eu_mask = 0xff;
	u32 subslice_mask, eu_en;
232

233 234
	intel_sseu_set_info(sseu, 6, 4, 8);

235 236
	sseu->slice_mask = (fuse2 & GEN10_F2_S_ENA_MASK) >>
			    GEN10_F2_S_ENA_SHIFT;
237 238 239 240 241 242 243 244 245 246 247 248

	subslice_mask = (1 << 4) - 1;
	subslice_mask &= ~((fuse2 & GEN10_F2_SS_DIS_MASK) >>
			   GEN10_F2_SS_DIS_SHIFT);

	/*
	 * Slice0 can have up to 3 subslices, but there are only 2 in
	 * slice1/2.
	 */
	sseu->subslice_mask[0] = subslice_mask;
	for (s = 1; s < sseu->max_slices; s++)
		sseu->subslice_mask[s] = subslice_mask & 0x3;
249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272

	/* Slice0 */
	eu_en = ~I915_READ(GEN8_EU_DISABLE0);
	for (ss = 0; ss < sseu->max_subslices; ss++)
		sseu_set_eus(sseu, 0, ss, (eu_en >> (8 * ss)) & eu_mask);
	/* Slice1 */
	sseu_set_eus(sseu, 1, 0, (eu_en >> 24) & eu_mask);
	eu_en = ~I915_READ(GEN8_EU_DISABLE1);
	sseu_set_eus(sseu, 1, 1, eu_en & eu_mask);
	/* Slice2 */
	sseu_set_eus(sseu, 2, 0, (eu_en >> 8) & eu_mask);
	sseu_set_eus(sseu, 2, 1, (eu_en >> 16) & eu_mask);
	/* Slice3 */
	sseu_set_eus(sseu, 3, 0, (eu_en >> 24) & eu_mask);
	eu_en = ~I915_READ(GEN8_EU_DISABLE2);
	sseu_set_eus(sseu, 3, 1, eu_en & eu_mask);
	/* Slice4 */
	sseu_set_eus(sseu, 4, 0, (eu_en >> 8) & eu_mask);
	sseu_set_eus(sseu, 4, 1, (eu_en >> 16) & eu_mask);
	/* Slice5 */
	sseu_set_eus(sseu, 5, 0, (eu_en >> 24) & eu_mask);
	eu_en = ~I915_READ(GEN10_EU_DISABLE3);
	sseu_set_eus(sseu, 5, 1, eu_en & eu_mask);

273 274 275
	/* Do a second pass where we mark the subslices disabled if all their
	 * eus are off.
	 */
276 277 278
	for (s = 0; s < sseu->max_slices; s++) {
		for (ss = 0; ss < sseu->max_subslices; ss++) {
			if (sseu_get_eus(sseu, s, ss) == 0)
279
				sseu->subslice_mask[s] &= ~BIT(ss);
280 281 282 283
		}
	}

	sseu->eu_total = compute_eu_total(sseu);
284 285 286 287 288 289 290

	/*
	 * CNL is expected to always have a uniform distribution
	 * of EU across subslices with the exception that any one
	 * EU in any one subslice may be fused off for die
	 * recovery.
	 */
291
	sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
292
				DIV_ROUND_UP(sseu->eu_total,
293 294
					     intel_sseu_subslice_total(sseu)) :
				0;
295 296 297 298 299 300 301

	/* No restrictions on Power Gating */
	sseu->has_slice_pg = 1;
	sseu->has_subslice_pg = 1;
	sseu->has_eu_pg = 1;
}

302 303
static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
{
304
	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
305
	u32 fuse;
306 307 308

	fuse = I915_READ(CHV_FUSE_GT);

309
	sseu->slice_mask = BIT(0);
310
	intel_sseu_set_info(sseu, 1, 2, 8);
311 312

	if (!(fuse & CHV_FGT_DISABLE_SS0)) {
313 314 315 316 317 318
		u8 disabled_mask =
			((fuse & CHV_FGT_EU_DIS_SS0_R0_MASK) >>
			 CHV_FGT_EU_DIS_SS0_R0_SHIFT) |
			(((fuse & CHV_FGT_EU_DIS_SS0_R1_MASK) >>
			  CHV_FGT_EU_DIS_SS0_R1_SHIFT) << 4);

319
		sseu->subslice_mask[0] |= BIT(0);
320
		sseu_set_eus(sseu, 0, 0, ~disabled_mask);
321 322 323
	}

	if (!(fuse & CHV_FGT_DISABLE_SS1)) {
324 325 326 327 328 329
		u8 disabled_mask =
			((fuse & CHV_FGT_EU_DIS_SS1_R0_MASK) >>
			 CHV_FGT_EU_DIS_SS1_R0_SHIFT) |
			(((fuse & CHV_FGT_EU_DIS_SS1_R1_MASK) >>
			  CHV_FGT_EU_DIS_SS1_R1_SHIFT) << 4);

330
		sseu->subslice_mask[0] |= BIT(1);
331
		sseu_set_eus(sseu, 0, 1, ~disabled_mask);
332 333
	}

334 335
	sseu->eu_total = compute_eu_total(sseu);

336 337 338 339
	/*
	 * CHV expected to always have a uniform distribution of EU
	 * across subslices.
	*/
340 341 342
	sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
				sseu->eu_total /
					intel_sseu_subslice_total(sseu) :
343 344 345 346 347 348
				0;
	/*
	 * CHV supports subslice power gating on devices with more than
	 * one subslice, and supports EU power gating on devices with
	 * more than one EU pair per subslice.
	*/
349
	sseu->has_slice_pg = 0;
350
	sseu->has_subslice_pg = intel_sseu_subslice_total(sseu) > 1;
351
	sseu->has_eu_pg = (sseu->eu_per_subslice > 2);
352 353 354 355 356
}

static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
{
	struct intel_device_info *info = mkwrite_device_info(dev_priv);
357
	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
358
	int s, ss;
359 360
	u32 fuse2, eu_disable, subslice_mask;
	const u8 eu_mask = 0xff;
361 362

	fuse2 = I915_READ(GEN8_FUSE2);
363
	sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
364

365
	/* BXT has a single slice and at most 3 subslices. */
366 367
	intel_sseu_set_info(sseu, IS_GEN9_LP(dev_priv) ? 1 : 3,
			    IS_GEN9_LP(dev_priv) ? 3 : 4, 8);
368

369 370 371 372
	/*
	 * The subslice disable field is global, i.e. it applies
	 * to each of the enabled slices.
	*/
373 374 375
	subslice_mask = (1 << sseu->max_subslices) - 1;
	subslice_mask &= ~((fuse2 & GEN9_F2_SS_DIS_MASK) >>
			   GEN9_F2_SS_DIS_SHIFT);
376 377 378 379 380

	/*
	 * Iterate through enabled slices and subslices to
	 * count the total enabled EU.
	*/
381
	for (s = 0; s < sseu->max_slices; s++) {
382
		if (!(sseu->slice_mask & BIT(s)))
383 384 385
			/* skip disabled slice */
			continue;

386
		sseu->subslice_mask[s] = subslice_mask;
387

388
		eu_disable = I915_READ(GEN9_EU_DISABLE(s));
389
		for (ss = 0; ss < sseu->max_subslices; ss++) {
390
			int eu_per_ss;
391
			u8 eu_disabled_mask;
392

393
			if (!(sseu->subslice_mask[s] & BIT(ss)))
394 395 396
				/* skip disabled subslice */
				continue;

397
			eu_disabled_mask = (eu_disable >> (ss * 8)) & eu_mask;
398 399 400 401 402

			sseu_set_eus(sseu, s, ss, ~eu_disabled_mask);

			eu_per_ss = sseu->max_eus_per_subslice -
				hweight8(eu_disabled_mask);
403 404 405 406 407 408 409

			/*
			 * Record which subslice(s) has(have) 7 EUs. we
			 * can tune the hash used to spread work among
			 * subslices if they are unbalanced.
			 */
			if (eu_per_ss == 7)
410
				sseu->subslice_7eu[s] |= BIT(ss);
411 412 413
		}
	}

414 415
	sseu->eu_total = compute_eu_total(sseu);

416 417 418 419 420 421 422
	/*
	 * SKL is expected to always have a uniform distribution
	 * of EU across subslices with the exception that any one
	 * EU in any one subslice may be fused off for die
	 * recovery. BXT is expected to be perfectly uniform in EU
	 * distribution.
	*/
423
	sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
424
				DIV_ROUND_UP(sseu->eu_total,
425 426
					     intel_sseu_subslice_total(sseu)) :
				0;
427
	/*
428
	 * SKL+ supports slice power gating on devices with more than
429
	 * one slice, and supports EU power gating on devices with
430
	 * more than one EU pair per subslice. BXT+ supports subslice
431 432 433 434
	 * power gating on devices with more than one subslice, and
	 * supports EU power gating on devices with more than one EU
	 * pair per subslice.
	*/
435
	sseu->has_slice_pg =
436
		!IS_GEN9_LP(dev_priv) && hweight8(sseu->slice_mask) > 1;
437
	sseu->has_subslice_pg =
438
		IS_GEN9_LP(dev_priv) && intel_sseu_subslice_total(sseu) > 1;
439
	sseu->has_eu_pg = sseu->eu_per_subslice > 2;
440

441
	if (IS_GEN9_LP(dev_priv)) {
442 443
#define IS_SS_DISABLED(ss)	(!(sseu->subslice_mask[0] & BIT(ss)))
		info->has_pooled_eu = hweight8(sseu->subslice_mask[0]) == 3;
444

445
		sseu->min_eu_in_pool = 0;
446
		if (info->has_pooled_eu) {
447
			if (IS_SS_DISABLED(2) || IS_SS_DISABLED(0))
448
				sseu->min_eu_in_pool = 3;
449
			else if (IS_SS_DISABLED(1))
450
				sseu->min_eu_in_pool = 6;
451
			else
452
				sseu->min_eu_in_pool = 9;
453 454 455 456 457 458 459
		}
#undef IS_SS_DISABLED
	}
}

static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
{
460
	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
461
	int s, ss;
462
	u32 fuse2, subslice_mask, eu_disable[3]; /* s_max */
463 464

	fuse2 = I915_READ(GEN8_FUSE2);
465
	sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
466
	intel_sseu_set_info(sseu, 3, 3, 8);
467

468 469 470 471
	/*
	 * The subslice disable field is global, i.e. it applies
	 * to each of the enabled slices.
	 */
472 473 474
	subslice_mask = GENMASK(sseu->max_subslices - 1, 0);
	subslice_mask &= ~((fuse2 & GEN8_F2_SS_DIS_MASK) >>
			   GEN8_F2_SS_DIS_SHIFT);
475 476 477 478 479 480 481 482 483 484 485 486 487

	eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK;
	eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) |
			((I915_READ(GEN8_EU_DISABLE1) & GEN8_EU_DIS1_S1_MASK) <<
			 (32 - GEN8_EU_DIS0_S1_SHIFT));
	eu_disable[2] = (I915_READ(GEN8_EU_DISABLE1) >> GEN8_EU_DIS1_S2_SHIFT) |
			((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) <<
			 (32 - GEN8_EU_DIS1_S2_SHIFT));

	/*
	 * Iterate through enabled slices and subslices to
	 * count the total enabled EU.
	 */
488
	for (s = 0; s < sseu->max_slices; s++) {
489
		if (!(sseu->slice_mask & BIT(s)))
490 491 492
			/* skip disabled slice */
			continue;

493
		sseu->subslice_mask[s] = subslice_mask;
494 495 496

		for (ss = 0; ss < sseu->max_subslices; ss++) {
			u8 eu_disabled_mask;
497 498
			u32 n_disabled;

499
			if (!(sseu->subslice_mask[s] & BIT(ss)))
500 501 502
				/* skip disabled subslice */
				continue;

503
			eu_disabled_mask =
504
				eu_disable[s] >> (ss * sseu->max_eus_per_subslice);
505 506 507 508

			sseu_set_eus(sseu, s, ss, ~eu_disabled_mask);

			n_disabled = hweight8(eu_disabled_mask);
509 510 511 512

			/*
			 * Record which subslices have 7 EUs.
			 */
513
			if (sseu->max_eus_per_subslice - n_disabled == 7)
514
				sseu->subslice_7eu[s] |= 1 << ss;
515 516 517
		}
	}

518 519
	sseu->eu_total = compute_eu_total(sseu);

520 521 522 523 524
	/*
	 * BDW is expected to always have a uniform distribution of EU across
	 * subslices with the exception that any one EU in any one subslice may
	 * be fused off for die recovery.
	 */
525
	sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
526
				DIV_ROUND_UP(sseu->eu_total,
527 528
					     intel_sseu_subslice_total(sseu)) :
				0;
529 530 531 532 533

	/*
	 * BDW supports slice power gating on devices with more than
	 * one slice.
	 */
534
	sseu->has_slice_pg = hweight8(sseu->slice_mask) > 1;
535 536
	sseu->has_subslice_pg = 0;
	sseu->has_eu_pg = 0;
537 538
}

539 540
static void haswell_sseu_info_init(struct drm_i915_private *dev_priv)
{
541
	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
542
	u32 fuse1;
543
	int s, ss;
544 545 546 547 548

	/*
	 * There isn't a register to tell us how many slices/subslices. We
	 * work off the PCI-ids here.
	 */
549
	switch (INTEL_INFO(dev_priv)->gt) {
550
	default:
551
		MISSING_CASE(INTEL_INFO(dev_priv)->gt);
552 553 554
		/* fall through */
	case 1:
		sseu->slice_mask = BIT(0);
555
		sseu->subslice_mask[0] = BIT(0);
556 557 558
		break;
	case 2:
		sseu->slice_mask = BIT(0);
559
		sseu->subslice_mask[0] = BIT(0) | BIT(1);
560 561 562
		break;
	case 3:
		sseu->slice_mask = BIT(0) | BIT(1);
563 564
		sseu->subslice_mask[0] = BIT(0) | BIT(1);
		sseu->subslice_mask[1] = BIT(0) | BIT(1);
565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583
		break;
	}

	fuse1 = I915_READ(HSW_PAVP_FUSE1);
	switch ((fuse1 & HSW_F1_EU_DIS_MASK) >> HSW_F1_EU_DIS_SHIFT) {
	default:
		MISSING_CASE((fuse1 & HSW_F1_EU_DIS_MASK) >>
			     HSW_F1_EU_DIS_SHIFT);
		/* fall through */
	case HSW_F1_EU_DIS_10EUS:
		sseu->eu_per_subslice = 10;
		break;
	case HSW_F1_EU_DIS_8EUS:
		sseu->eu_per_subslice = 8;
		break;
	case HSW_F1_EU_DIS_6EUS:
		sseu->eu_per_subslice = 6;
		break;
	}
584 585 586 587

	intel_sseu_set_info(sseu, hweight8(sseu->slice_mask),
			    hweight8(sseu->subslice_mask[0]),
			    sseu->eu_per_subslice);
588 589 590 591 592 593 594

	for (s = 0; s < sseu->max_slices; s++) {
		for (ss = 0; ss < sseu->max_subslices; ss++) {
			sseu_set_eus(sseu, s, ss,
				     (1UL << sseu->eu_per_subslice) - 1);
		}
	}
595

596
	sseu->eu_total = compute_eu_total(sseu);
597 598 599 600 601 602 603

	/* No powergating for you. */
	sseu->has_slice_pg = 0;
	sseu->has_subslice_pg = 0;
	sseu->has_eu_pg = 0;
}

L
Lionel Landwerlin 已提交
604
static u32 read_reference_ts_freq(struct drm_i915_private *dev_priv)
605 606
{
	u32 ts_override = I915_READ(GEN9_TIMESTAMP_OVERRIDE);
L
Lionel Landwerlin 已提交
607
	u32 base_freq, frac_freq;
608 609 610

	base_freq = ((ts_override & GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK) >>
		     GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_SHIFT) + 1;
L
Lionel Landwerlin 已提交
611
	base_freq *= 1000;
612 613 614 615

	frac_freq = ((ts_override &
		      GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_MASK) >>
		     GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_SHIFT);
L
Lionel Landwerlin 已提交
616
	frac_freq = 1000 / (frac_freq + 1);
617 618 619 620

	return base_freq + frac_freq;
}

621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666
static u32 gen10_get_crystal_clock_freq(struct drm_i915_private *dev_priv,
					u32 rpm_config_reg)
{
	u32 f19_2_mhz = 19200;
	u32 f24_mhz = 24000;
	u32 crystal_clock = (rpm_config_reg &
			     GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
			    GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;

	switch (crystal_clock) {
	case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
		return f19_2_mhz;
	case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
		return f24_mhz;
	default:
		MISSING_CASE(crystal_clock);
		return 0;
	}
}

static u32 gen11_get_crystal_clock_freq(struct drm_i915_private *dev_priv,
					u32 rpm_config_reg)
{
	u32 f19_2_mhz = 19200;
	u32 f24_mhz = 24000;
	u32 f25_mhz = 25000;
	u32 f38_4_mhz = 38400;
	u32 crystal_clock = (rpm_config_reg &
			     GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
			    GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;

	switch (crystal_clock) {
	case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
		return f24_mhz;
	case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
		return f19_2_mhz;
	case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_38_4_MHZ:
		return f38_4_mhz;
	case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ:
		return f25_mhz;
	default:
		MISSING_CASE(crystal_clock);
		return 0;
	}
}

L
Lionel Landwerlin 已提交
667
static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
668
{
L
Lionel Landwerlin 已提交
669 670 671
	u32 f12_5_mhz = 12500;
	u32 f19_2_mhz = 19200;
	u32 f24_mhz = 24000;
672 673 674 675 676 677 678 679

	if (INTEL_GEN(dev_priv) <= 4) {
		/* PRMs say:
		 *
		 *     "The value in this register increments once every 16
		 *      hclks." (through the “Clocking Configuration”
		 *      (“CLKCFG”) MCHBAR register)
		 */
L
Lionel Landwerlin 已提交
680
		return dev_priv->rawclk_freq / 16;
681 682 683 684 685 686 687 688 689 690
	} else if (INTEL_GEN(dev_priv) <= 8) {
		/* PRMs say:
		 *
		 *     "The PCU TSC counts 10ns increments; this timestamp
		 *      reflects bits 38:3 of the TSC (i.e. 80ns granularity,
		 *      rolling over every 1.5 hours).
		 */
		return f12_5_mhz;
	} else if (INTEL_GEN(dev_priv) <= 9) {
		u32 ctc_reg = I915_READ(CTC_MODE);
L
Lionel Landwerlin 已提交
691
		u32 freq = 0;
692 693 694 695 696 697 698 699 700 701 702 703 704 705 706

		if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
			freq = read_reference_ts_freq(dev_priv);
		} else {
			freq = IS_GEN9_LP(dev_priv) ? f19_2_mhz : f24_mhz;

			/* Now figure out how the command stream's timestamp
			 * register increments from this frequency (it might
			 * increment only every few clock cycle).
			 */
			freq >>= 3 - ((ctc_reg & CTC_SHIFT_PARAMETER_MASK) >>
				      CTC_SHIFT_PARAMETER_SHIFT);
		}

		return freq;
707
	} else if (INTEL_GEN(dev_priv) <= 12) {
708
		u32 ctc_reg = I915_READ(CTC_MODE);
L
Lionel Landwerlin 已提交
709
		u32 freq = 0;
710 711 712 713 714 715 716 717 718

		/* First figure out the reference frequency. There are 2 ways
		 * we can compute the frequency, either through the
		 * TIMESTAMP_OVERRIDE register or through RPM_CONFIG. CTC_MODE
		 * tells us which one we should use.
		 */
		if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
			freq = read_reference_ts_freq(dev_priv);
		} else {
719 720 721 722 723 724 725 726
			u32 rpm_config_reg = I915_READ(RPM_CONFIG0);

			if (INTEL_GEN(dev_priv) <= 10)
				freq = gen10_get_crystal_clock_freq(dev_priv,
								rpm_config_reg);
			else
				freq = gen11_get_crystal_clock_freq(dev_priv,
								rpm_config_reg);
727

728 729 730 731 732 733 734 735
			/* Now figure out how the command stream's timestamp
			 * register increments from this frequency (it might
			 * increment only every few clock cycle).
			 */
			freq >>= 3 - ((rpm_config_reg &
				       GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
				      GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT);
		}
736 737 738 739

		return freq;
	}

740
	MISSING_CASE("Unknown gen, unable to read command streamer timestamp frequency\n");
741 742 743
	return 0;
}

744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764
#undef INTEL_VGA_DEVICE
#define INTEL_VGA_DEVICE(id, info) (id)

static const u16 subplatform_ult_ids[] = {
	INTEL_HSW_ULT_GT1_IDS(0),
	INTEL_HSW_ULT_GT2_IDS(0),
	INTEL_HSW_ULT_GT3_IDS(0),
	INTEL_BDW_ULT_GT1_IDS(0),
	INTEL_BDW_ULT_GT2_IDS(0),
	INTEL_BDW_ULT_GT3_IDS(0),
	INTEL_BDW_ULT_RSVD_IDS(0),
	INTEL_SKL_ULT_GT1_IDS(0),
	INTEL_SKL_ULT_GT2_IDS(0),
	INTEL_SKL_ULT_GT3_IDS(0),
	INTEL_KBL_ULT_GT1_IDS(0),
	INTEL_KBL_ULT_GT2_IDS(0),
	INTEL_KBL_ULT_GT3_IDS(0),
	INTEL_CFL_U_GT2_IDS(0),
	INTEL_CFL_U_GT3_IDS(0),
	INTEL_WHL_U_GT1_IDS(0),
	INTEL_WHL_U_GT2_IDS(0),
765
	INTEL_WHL_U_GT3_IDS(0),
766 767 768 769 770 771 772 773 774 775 776 777
};

static const u16 subplatform_ulx_ids[] = {
	INTEL_HSW_ULX_GT1_IDS(0),
	INTEL_HSW_ULX_GT2_IDS(0),
	INTEL_BDW_ULX_GT1_IDS(0),
	INTEL_BDW_ULX_GT2_IDS(0),
	INTEL_BDW_ULX_GT3_IDS(0),
	INTEL_BDW_ULX_RSVD_IDS(0),
	INTEL_SKL_ULX_GT1_IDS(0),
	INTEL_SKL_ULX_GT2_IDS(0),
	INTEL_KBL_ULX_GT1_IDS(0),
778
	INTEL_KBL_ULX_GT2_IDS(0),
779
	INTEL_AML_KBL_GT2_IDS(0),
780
	INTEL_AML_CFL_GT2_IDS(0),
781 782 783 784
};

static const u16 subplatform_portf_ids[] = {
	INTEL_CNL_PORT_F_IDS(0),
785
	INTEL_ICL_PORT_F_IDS(0),
786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804
};

static bool find_devid(u16 id, const u16 *p, unsigned int num)
{
	for (; num; num--, p++) {
		if (*p == id)
			return true;
	}

	return false;
}

void intel_device_info_subplatform_init(struct drm_i915_private *i915)
{
	const struct intel_device_info *info = INTEL_INFO(i915);
	const struct intel_runtime_info *rinfo = RUNTIME_INFO(i915);
	const unsigned int pi = __platform_mask_index(rinfo, info->platform);
	const unsigned int pb = __platform_mask_bit(rinfo, info->platform);
	u16 devid = INTEL_DEVID(i915);
805
	u32 mask = 0;
806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830

	/* Make sure IS_<platform> checks are working. */
	RUNTIME_INFO(i915)->platform_mask[pi] = BIT(pb);

	/* Find and mark subplatform bits based on the PCI device id. */
	if (find_devid(devid, subplatform_ult_ids,
		       ARRAY_SIZE(subplatform_ult_ids))) {
		mask = BIT(INTEL_SUBPLATFORM_ULT);
	} else if (find_devid(devid, subplatform_ulx_ids,
			      ARRAY_SIZE(subplatform_ulx_ids))) {
		mask = BIT(INTEL_SUBPLATFORM_ULX);
		if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
			/* ULX machines are also considered ULT. */
			mask |= BIT(INTEL_SUBPLATFORM_ULT);
		}
	} else if (find_devid(devid, subplatform_portf_ids,
			      ARRAY_SIZE(subplatform_portf_ids))) {
		mask = BIT(INTEL_SUBPLATFORM_PORTF);
	}

	GEM_BUG_ON(mask & ~INTEL_SUBPLATFORM_BITS);

	RUNTIME_INFO(i915)->platform_mask[pi] |= mask;
}

831 832
/**
 * intel_device_info_runtime_init - initialize runtime info
833
 * @dev_priv: the i915 device
834
 *
835 836 837 838 839 840 841 842 843 844 845 846
 * Determine various intel_device_info fields at runtime.
 *
 * Use it when either:
 *   - it's judged too laborious to fill n static structures with the limit
 *     when a simple if statement does the job,
 *   - run-time checks (eg read fuse/strap registers) are needed.
 *
 * This function needs to be called:
 *   - after the MMIO has been setup as we are reading registers,
 *   - after the PCH has been detected,
 *   - before the first usage of the fields it can tweak.
 */
847
void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
848
{
849
	struct intel_device_info *info = mkwrite_device_info(dev_priv);
850
	struct intel_runtime_info *runtime = RUNTIME_INFO(dev_priv);
851 852
	enum pipe pipe;

853 854
	if (INTEL_GEN(dev_priv) >= 10) {
		for_each_pipe(dev_priv, pipe)
855
			runtime->num_scalers[pipe] = 2;
856
	} else if (IS_GEN(dev_priv, 9)) {
857 858 859
		runtime->num_scalers[PIPE_A] = 2;
		runtime->num_scalers[PIPE_B] = 2;
		runtime->num_scalers[PIPE_C] = 1;
860 861
	}

862
	BUILD_BUG_ON(BITS_PER_TYPE(intel_engine_mask_t) < I915_NUM_ENGINES);
863

864
	if (INTEL_GEN(dev_priv) >= 11)
865
		for_each_pipe(dev_priv, pipe)
866
			runtime->num_sprites[pipe] = 6;
867
	else if (IS_GEN(dev_priv, 10) || IS_GEMINILAKE(dev_priv))
868
		for_each_pipe(dev_priv, pipe)
869
			runtime->num_sprites[pipe] = 3;
870
	else if (IS_BROXTON(dev_priv)) {
871 872 873 874 875 876 877 878 879
		/*
		 * Skylake and Broxton currently don't expose the topmost plane as its
		 * use is exclusive with the legacy cursor and we only want to expose
		 * one of those, not both. Until we can safely expose the topmost plane
		 * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
		 * we don't expose the topmost plane at all to prevent ABI breakage
		 * down the line.
		 */

880 881 882
		runtime->num_sprites[PIPE_A] = 2;
		runtime->num_sprites[PIPE_B] = 2;
		runtime->num_sprites[PIPE_C] = 1;
883
	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
884
		for_each_pipe(dev_priv, pipe)
885
			runtime->num_sprites[pipe] = 2;
886
	} else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) {
887
		for_each_pipe(dev_priv, pipe)
888
			runtime->num_sprites[pipe] = 1;
889
	}
890

891
	if (i915_modparams.disable_display) {
892 893
		DRM_INFO("Display disabled (module parameter)\n");
		info->num_pipes = 0;
894
	} else if (HAS_DISPLAY(dev_priv) &&
895
		   (IS_GEN_RANGE(dev_priv, 7, 8)) &&
896 897 898 899 900 901 902 903 904 905 906 907 908 909 910
		   HAS_PCH_SPLIT(dev_priv)) {
		u32 fuse_strap = I915_READ(FUSE_STRAP);
		u32 sfuse_strap = I915_READ(SFUSE_STRAP);

		/*
		 * SFUSE_STRAP is supposed to have a bit signalling the display
		 * is fused off. Unfortunately it seems that, at least in
		 * certain cases, fused off display means that PCH display
		 * reads don't land anywhere. In that case, we read 0s.
		 *
		 * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK
		 * should be set when taking over after the firmware.
		 */
		if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
		    sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
911
		    (HAS_PCH_CPT(dev_priv) &&
912 913 914 915 916 917 918
		     !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
			DRM_INFO("Display fused off, disabling\n");
			info->num_pipes = 0;
		} else if (fuse_strap & IVB_PIPE_C_DISABLE) {
			DRM_INFO("PipeC fused off\n");
			info->num_pipes -= 1;
		}
919
	} else if (HAS_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 9) {
920
		u32 dfsm = I915_READ(SKL_DFSM);
921
		u8 enabled_mask = BIT(info->num_pipes) - 1;
922 923

		if (dfsm & SKL_DFSM_PIPE_A_DISABLE)
924
			enabled_mask &= ~BIT(PIPE_A);
925
		if (dfsm & SKL_DFSM_PIPE_B_DISABLE)
926
			enabled_mask &= ~BIT(PIPE_B);
927
		if (dfsm & SKL_DFSM_PIPE_C_DISABLE)
928
			enabled_mask &= ~BIT(PIPE_C);
929 930 931
		if (INTEL_GEN(dev_priv) >= 12 &&
		    (dfsm & TGL_DFSM_PIPE_D_DISABLE))
			enabled_mask &= ~BIT(PIPE_D);
932

933 934 935 936 937 938 939 940
		/*
		 * At least one pipe should be enabled and if there are
		 * disabled pipes, they should be the last ones, with no holes
		 * in the mask.
		 */
		if (enabled_mask == 0 || !is_power_of_2(enabled_mask + 1))
			DRM_ERROR("invalid pipe fuse configuration: enabled_mask=0x%x\n",
				  enabled_mask);
941
		else
942
			info->num_pipes = hweight8(enabled_mask);
943 944 945
	}

	/* Initialize slice/subslice/EU info */
946 947 948
	if (IS_HASWELL(dev_priv))
		haswell_sseu_info_init(dev_priv);
	else if (IS_CHERRYVIEW(dev_priv))
949 950 951
		cherryview_sseu_info_init(dev_priv);
	else if (IS_BROADWELL(dev_priv))
		broadwell_sseu_info_init(dev_priv);
952
	else if (IS_GEN(dev_priv, 9))
953
		gen9_sseu_info_init(dev_priv);
954
	else if (IS_GEN(dev_priv, 10))
955
		gen10_sseu_info_init(dev_priv);
956
	else if (INTEL_GEN(dev_priv) >= 11)
957
		gen11_sseu_info_init(dev_priv);
958

959
	if (IS_GEN(dev_priv, 6) && intel_vtd_active()) {
960
		DRM_INFO("Disabling ppGTT for VT-d support\n");
961
		info->ppgtt_type = INTEL_PPGTT_NONE;
962 963
	}

964
	/* Initialize command stream timestamp frequency */
965
	runtime->cs_timestamp_frequency_khz = read_timestamp_frequency(dev_priv);
966
}
967 968 969 970

void intel_driver_caps_print(const struct intel_driver_caps *caps,
			     struct drm_printer *p)
{
971 972
	drm_printf(p, "Has logical contexts? %s\n",
		   yesno(caps->has_logical_contexts));
973 974
	drm_printf(p, "scheduler: %x\n", caps->scheduler);
}
975 976 977 978 979 980 981 982 983 984

/*
 * Determine which engines are fused off in our particular hardware. Since the
 * fuse register is in the blitter powerwell, we need forcewake to be ready at
 * this point (but later we need to prune the forcewake domains for engines that
 * are indeed fused off).
 */
void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
{
	struct intel_device_info *info = mkwrite_device_info(dev_priv);
985
	unsigned int logical_vdbox = 0;
986
	unsigned int i;
987
	u32 media_fuse;
988 989
	u16 vdbox_mask;
	u16 vebox_mask;
990 991 992 993

	if (INTEL_GEN(dev_priv) < 11)
		return;

994
	media_fuse = ~I915_READ(GEN11_GT_VEBOX_VDBOX_DISABLE);
995

996 997 998
	vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
	vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
		      GEN11_GT_VEBOX_DISABLE_SHIFT;
999 1000 1001 1002 1003

	for (i = 0; i < I915_MAX_VCS; i++) {
		if (!HAS_ENGINE(dev_priv, _VCS(i)))
			continue;

1004
		if (!(BIT(i) & vdbox_mask)) {
1005
			info->engine_mask &= ~BIT(_VCS(i));
1006
			DRM_DEBUG_DRIVER("vcs%u fused off\n", i);
1007
			continue;
1008
		}
1009 1010 1011 1012

		/*
		 * In Gen11, only even numbered logical VDBOXes are
		 * hooked up to an SFC (Scaler & Format Converter) unit.
1013
		 * In TGL each VDBOX has access to an SFC.
1014
		 */
1015
		if (IS_TIGERLAKE(dev_priv) || logical_vdbox++ % 2 == 0)
1016
			RUNTIME_INFO(dev_priv)->vdbox_sfc_access |= BIT(i);
1017
	}
1018 1019 1020
	DRM_DEBUG_DRIVER("vdbox enable: %04x, instances: %04lx\n",
			 vdbox_mask, VDBOX_MASK(dev_priv));
	GEM_BUG_ON(vdbox_mask != VDBOX_MASK(dev_priv));
1021 1022 1023 1024 1025

	for (i = 0; i < I915_MAX_VECS; i++) {
		if (!HAS_ENGINE(dev_priv, _VECS(i)))
			continue;

1026
		if (!(BIT(i) & vebox_mask)) {
1027
			info->engine_mask &= ~BIT(_VECS(i));
1028 1029
			DRM_DEBUG_DRIVER("vecs%u fused off\n", i);
		}
1030
	}
1031 1032 1033
	DRM_DEBUG_DRIVER("vebox enable: %04x, instances: %04lx\n",
			 vebox_mask, VEBOX_MASK(dev_priv));
	GEM_BUG_ON(vebox_mask != VEBOX_MASK(dev_priv));
1034
}