intel_device_info.c 29.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 * Copyright © 2016 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 */

25 26
#include <drm/drm_print.h>

27
#include "intel_device_info.h"
28 29
#include "i915_drv.h"

30 31 32 33 34 35 36 37 38 39 40 41
#define PLATFORM_NAME(x) [INTEL_##x] = #x
static const char * const platform_names[] = {
	PLATFORM_NAME(I830),
	PLATFORM_NAME(I845G),
	PLATFORM_NAME(I85X),
	PLATFORM_NAME(I865G),
	PLATFORM_NAME(I915G),
	PLATFORM_NAME(I915GM),
	PLATFORM_NAME(I945G),
	PLATFORM_NAME(I945GM),
	PLATFORM_NAME(G33),
	PLATFORM_NAME(PINEVIEW),
42 43
	PLATFORM_NAME(I965G),
	PLATFORM_NAME(I965GM),
44 45
	PLATFORM_NAME(G45),
	PLATFORM_NAME(GM45),
46 47 48 49 50 51 52 53 54 55 56
	PLATFORM_NAME(IRONLAKE),
	PLATFORM_NAME(SANDYBRIDGE),
	PLATFORM_NAME(IVYBRIDGE),
	PLATFORM_NAME(VALLEYVIEW),
	PLATFORM_NAME(HASWELL),
	PLATFORM_NAME(BROADWELL),
	PLATFORM_NAME(CHERRYVIEW),
	PLATFORM_NAME(SKYLAKE),
	PLATFORM_NAME(BROXTON),
	PLATFORM_NAME(KABYLAKE),
	PLATFORM_NAME(GEMINILAKE),
57
	PLATFORM_NAME(COFFEELAKE),
58
	PLATFORM_NAME(CANNONLAKE),
59
	PLATFORM_NAME(ICELAKE),
60
	PLATFORM_NAME(ELKHARTLAKE),
61 62 63 64 65
};
#undef PLATFORM_NAME

const char *intel_platform_name(enum intel_platform platform)
{
66 67
	BUILD_BUG_ON(ARRAY_SIZE(platform_names) != INTEL_MAX_PLATFORMS);

68 69 70 71 72 73 74
	if (WARN_ON_ONCE(platform >= ARRAY_SIZE(platform_names) ||
			 platform_names[platform] == NULL))
		return "<unknown>";

	return platform_names[platform];
}

75 76 77 78 79 80
void intel_device_info_dump_flags(const struct intel_device_info *info,
				  struct drm_printer *p)
{
#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->name));
	DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
#undef PRINT_FLAG
81 82 83 84

#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->display.name));
	DEV_INFO_DISPLAY_FOR_EACH_FLAG(PRINT_FLAG);
#undef PRINT_FLAG
85 86
}

87 88
static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p)
{
89 90
	int s;

91 92
	drm_printf(p, "slice total: %u, mask=%04x\n",
		   hweight8(sseu->slice_mask), sseu->slice_mask);
93
	drm_printf(p, "subslice total: %u\n", intel_sseu_subslice_total(sseu));
94
	for (s = 0; s < sseu->max_slices; s++) {
S
Stuart Summers 已提交
95
		drm_printf(p, "slice%d: %u subslices, mask=%08x\n",
96
			   s, intel_sseu_subslices_per_slice(sseu, s),
S
Stuart Summers 已提交
97
			   intel_sseu_get_subslices(sseu, s));
98
	}
99 100 101 102 103 104 105 106 107
	drm_printf(p, "EU total: %u\n", sseu->eu_total);
	drm_printf(p, "EU per subslice: %u\n", sseu->eu_per_subslice);
	drm_printf(p, "has slice power gating: %s\n",
		   yesno(sseu->has_slice_pg));
	drm_printf(p, "has subslice power gating: %s\n",
		   yesno(sseu->has_subslice_pg));
	drm_printf(p, "has EU power gating: %s\n", yesno(sseu->has_eu_pg));
}

108
void intel_device_info_dump_runtime(const struct intel_runtime_info *info,
109 110 111 112 113 114 115 116
				    struct drm_printer *p)
{
	sseu_dump(&info->sseu, p);

	drm_printf(p, "CS timestamp frequency: %u kHz\n",
		   info->cs_timestamp_frequency_khz);
}

117 118 119
static int sseu_eu_idx(const struct sseu_dev_info *sseu, int slice,
		       int subslice)
{
S
Stuart Summers 已提交
120
	int slice_stride = sseu->max_subslices * sseu->eu_stride;
121

S
Stuart Summers 已提交
122
	return slice * slice_stride + subslice * sseu->eu_stride;
123 124 125 126 127 128 129 130
}

static u16 sseu_get_eus(const struct sseu_dev_info *sseu, int slice,
			int subslice)
{
	int i, offset = sseu_eu_idx(sseu, slice, subslice);
	u16 eu_mask = 0;

S
Stuart Summers 已提交
131
	for (i = 0; i < sseu->eu_stride; i++) {
132 133 134 135 136 137 138 139 140 141 142 143
		eu_mask |= ((u16)sseu->eu_mask[offset + i]) <<
			(i * BITS_PER_BYTE);
	}

	return eu_mask;
}

static void sseu_set_eus(struct sseu_dev_info *sseu, int slice, int subslice,
			 u16 eu_mask)
{
	int i, offset = sseu_eu_idx(sseu, slice, subslice);

S
Stuart Summers 已提交
144
	for (i = 0; i < sseu->eu_stride; i++) {
145 146 147 148 149
		sseu->eu_mask[offset + i] =
			(eu_mask >> (BITS_PER_BYTE * i)) & 0xff;
	}
}

150 151 152 153 154 155 156 157 158 159 160
void intel_device_info_dump_topology(const struct sseu_dev_info *sseu,
				     struct drm_printer *p)
{
	int s, ss;

	if (sseu->max_slices == 0) {
		drm_printf(p, "Unavailable\n");
		return;
	}

	for (s = 0; s < sseu->max_slices; s++) {
S
Stuart Summers 已提交
161
		drm_printf(p, "slice%d: %u subslice(s) (0x%08x):\n",
162
			   s, intel_sseu_subslices_per_slice(sseu, s),
S
Stuart Summers 已提交
163
			   intel_sseu_get_subslices(sseu, s));
164 165 166 167 168 169 170 171 172 173

		for (ss = 0; ss < sseu->max_subslices; ss++) {
			u16 enabled_eus = sseu_get_eus(sseu, s, ss);

			drm_printf(p, "\tsubslice%d: %u EUs (0x%hx)\n",
				   ss, hweight16(enabled_eus), enabled_eus);
		}
	}
}

174 175 176 177 178 179 180 181 182 183
static u16 compute_eu_total(const struct sseu_dev_info *sseu)
{
	u16 i, total = 0;

	for (i = 0; i < ARRAY_SIZE(sseu->eu_mask); i++)
		total += hweight8(sseu->eu_mask[i]);

	return total;
}

184 185
static void gen11_sseu_info_init(struct drm_i915_private *dev_priv)
{
186
	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
187 188 189 190 191
	u8 s_en;
	u32 ss_en, ss_en_mask;
	u8 eu_en;
	int s;

S
Stuart Summers 已提交
192 193 194 195
	if (IS_ELKHARTLAKE(dev_priv))
		intel_sseu_set_info(sseu, 1, 4, 8);
	else
		intel_sseu_set_info(sseu, 1, 8, 8);
196 197 198 199 200 201 202 203 204 205 206

	s_en = I915_READ(GEN11_GT_SLICE_ENABLE) & GEN11_GT_S_ENA_MASK;
	ss_en = ~I915_READ(GEN11_GT_SUBSLICE_DISABLE);
	ss_en_mask = BIT(sseu->max_subslices) - 1;
	eu_en = ~(I915_READ(GEN11_EU_DISABLE) & GEN11_EU_DIS_MASK);

	for (s = 0; s < sseu->max_slices; s++) {
		if (s_en & BIT(s)) {
			int ss;

			sseu->slice_mask |= BIT(s);
S
Stuart Summers 已提交
207 208 209 210 211

			intel_sseu_set_subslices(sseu, s, ss_en_mask);

			for (ss = 0; ss < sseu->max_subslices; ss++)
				if (intel_sseu_has_subslice(sseu, s, ss))
212 213 214 215 216 217 218 219 220 221 222 223
					sseu_set_eus(sseu, s, ss, eu_en);
		}
	}
	sseu->eu_per_subslice = hweight8(eu_en);
	sseu->eu_total = compute_eu_total(sseu);

	/* ICL has no power gating restrictions. */
	sseu->has_slice_pg = 1;
	sseu->has_subslice_pg = 1;
	sseu->has_eu_pg = 1;
}

224 225
static void gen10_sseu_info_init(struct drm_i915_private *dev_priv)
{
226
	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
227
	const u32 fuse2 = I915_READ(GEN8_FUSE2);
228 229 230
	int s, ss;
	const int eu_mask = 0xff;
	u32 subslice_mask, eu_en;
231

S
Stuart Summers 已提交
232 233
	intel_sseu_set_info(sseu, 6, 4, 8);

234 235
	sseu->slice_mask = (fuse2 & GEN10_F2_S_ENA_MASK) >>
			    GEN10_F2_S_ENA_SHIFT;
236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259

	/* Slice0 */
	eu_en = ~I915_READ(GEN8_EU_DISABLE0);
	for (ss = 0; ss < sseu->max_subslices; ss++)
		sseu_set_eus(sseu, 0, ss, (eu_en >> (8 * ss)) & eu_mask);
	/* Slice1 */
	sseu_set_eus(sseu, 1, 0, (eu_en >> 24) & eu_mask);
	eu_en = ~I915_READ(GEN8_EU_DISABLE1);
	sseu_set_eus(sseu, 1, 1, eu_en & eu_mask);
	/* Slice2 */
	sseu_set_eus(sseu, 2, 0, (eu_en >> 8) & eu_mask);
	sseu_set_eus(sseu, 2, 1, (eu_en >> 16) & eu_mask);
	/* Slice3 */
	sseu_set_eus(sseu, 3, 0, (eu_en >> 24) & eu_mask);
	eu_en = ~I915_READ(GEN8_EU_DISABLE2);
	sseu_set_eus(sseu, 3, 1, eu_en & eu_mask);
	/* Slice4 */
	sseu_set_eus(sseu, 4, 0, (eu_en >> 8) & eu_mask);
	sseu_set_eus(sseu, 4, 1, (eu_en >> 16) & eu_mask);
	/* Slice5 */
	sseu_set_eus(sseu, 5, 0, (eu_en >> 24) & eu_mask);
	eu_en = ~I915_READ(GEN10_EU_DISABLE3);
	sseu_set_eus(sseu, 5, 1, eu_en & eu_mask);

S
Stuart Summers 已提交
260 261 262 263
	subslice_mask = (1 << 4) - 1;
	subslice_mask &= ~((fuse2 & GEN10_F2_SS_DIS_MASK) >>
			   GEN10_F2_SS_DIS_SHIFT);

264 265 266
	for (s = 0; s < sseu->max_slices; s++) {
		for (ss = 0; ss < sseu->max_subslices; ss++) {
			if (sseu_get_eus(sseu, s, ss) == 0)
S
Stuart Summers 已提交
267
				subslice_mask &= ~BIT(ss);
268
		}
S
Stuart Summers 已提交
269 270 271 272 273 274 275

		/*
		 * Slice0 can have up to 3 subslices, but there are only 2 in
		 * slice1/2.
		 */
		intel_sseu_set_subslices(sseu, s, s == 0 ? subslice_mask :
							   subslice_mask & 0x3);
276 277 278
	}

	sseu->eu_total = compute_eu_total(sseu);
279 280 281 282 283 284 285

	/*
	 * CNL is expected to always have a uniform distribution
	 * of EU across subslices with the exception that any one
	 * EU in any one subslice may be fused off for die
	 * recovery.
	 */
286
	sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
287
				DIV_ROUND_UP(sseu->eu_total,
288 289
					     intel_sseu_subslice_total(sseu)) :
				0;
290 291 292 293 294 295 296

	/* No restrictions on Power Gating */
	sseu->has_slice_pg = 1;
	sseu->has_subslice_pg = 1;
	sseu->has_eu_pg = 1;
}

297 298
static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
{
299
	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
300
	u32 fuse;
S
Stuart Summers 已提交
301
	u8 subslice_mask;
302 303 304

	fuse = I915_READ(CHV_FUSE_GT);

305
	sseu->slice_mask = BIT(0);
S
Stuart Summers 已提交
306
	intel_sseu_set_info(sseu, 1, 2, 8);
307 308

	if (!(fuse & CHV_FGT_DISABLE_SS0)) {
309 310 311 312 313 314
		u8 disabled_mask =
			((fuse & CHV_FGT_EU_DIS_SS0_R0_MASK) >>
			 CHV_FGT_EU_DIS_SS0_R0_SHIFT) |
			(((fuse & CHV_FGT_EU_DIS_SS0_R1_MASK) >>
			  CHV_FGT_EU_DIS_SS0_R1_SHIFT) << 4);

S
Stuart Summers 已提交
315
		subslice_mask |= BIT(0);
316
		sseu_set_eus(sseu, 0, 0, ~disabled_mask);
317 318 319
	}

	if (!(fuse & CHV_FGT_DISABLE_SS1)) {
320 321 322 323 324 325
		u8 disabled_mask =
			((fuse & CHV_FGT_EU_DIS_SS1_R0_MASK) >>
			 CHV_FGT_EU_DIS_SS1_R0_SHIFT) |
			(((fuse & CHV_FGT_EU_DIS_SS1_R1_MASK) >>
			  CHV_FGT_EU_DIS_SS1_R1_SHIFT) << 4);

S
Stuart Summers 已提交
326
		subslice_mask |= BIT(1);
327
		sseu_set_eus(sseu, 0, 1, ~disabled_mask);
328 329
	}

S
Stuart Summers 已提交
330 331
	intel_sseu_set_subslices(sseu, 0, subslice_mask);

332 333
	sseu->eu_total = compute_eu_total(sseu);

334 335 336 337
	/*
	 * CHV expected to always have a uniform distribution of EU
	 * across subslices.
	*/
338 339 340
	sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
				sseu->eu_total /
					intel_sseu_subslice_total(sseu) :
341 342 343 344 345 346
				0;
	/*
	 * CHV supports subslice power gating on devices with more than
	 * one subslice, and supports EU power gating on devices with
	 * more than one EU pair per subslice.
	*/
347
	sseu->has_slice_pg = 0;
348
	sseu->has_subslice_pg = intel_sseu_subslice_total(sseu) > 1;
349
	sseu->has_eu_pg = (sseu->eu_per_subslice > 2);
350 351 352 353 354
}

static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
{
	struct intel_device_info *info = mkwrite_device_info(dev_priv);
355
	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
356
	int s, ss;
357 358
	u32 fuse2, eu_disable, subslice_mask;
	const u8 eu_mask = 0xff;
359 360

	fuse2 = I915_READ(GEN8_FUSE2);
361
	sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
362

363
	/* BXT has a single slice and at most 3 subslices. */
S
Stuart Summers 已提交
364 365
	intel_sseu_set_info(sseu, IS_GEN9_LP(dev_priv) ? 1 : 3,
			    IS_GEN9_LP(dev_priv) ? 3 : 4, 8);
366

367 368 369 370
	/*
	 * The subslice disable field is global, i.e. it applies
	 * to each of the enabled slices.
	*/
371 372 373
	subslice_mask = (1 << sseu->max_subslices) - 1;
	subslice_mask &= ~((fuse2 & GEN9_F2_SS_DIS_MASK) >>
			   GEN9_F2_SS_DIS_SHIFT);
374 375 376 377 378

	/*
	 * Iterate through enabled slices and subslices to
	 * count the total enabled EU.
	*/
379
	for (s = 0; s < sseu->max_slices; s++) {
380
		if (!(sseu->slice_mask & BIT(s)))
381 382 383
			/* skip disabled slice */
			continue;

S
Stuart Summers 已提交
384
		intel_sseu_set_subslices(sseu, s, subslice_mask);
385

386
		eu_disable = I915_READ(GEN9_EU_DISABLE(s));
387
		for (ss = 0; ss < sseu->max_subslices; ss++) {
388
			int eu_per_ss;
389
			u8 eu_disabled_mask;
390

S
Stuart Summers 已提交
391
			if (!intel_sseu_has_subslice(sseu, s, ss))
392 393 394
				/* skip disabled subslice */
				continue;

395
			eu_disabled_mask = (eu_disable >> (ss * 8)) & eu_mask;
396 397 398 399 400

			sseu_set_eus(sseu, s, ss, ~eu_disabled_mask);

			eu_per_ss = sseu->max_eus_per_subslice -
				hweight8(eu_disabled_mask);
401 402 403 404 405 406 407

			/*
			 * Record which subslice(s) has(have) 7 EUs. we
			 * can tune the hash used to spread work among
			 * subslices if they are unbalanced.
			 */
			if (eu_per_ss == 7)
408
				sseu->subslice_7eu[s] |= BIT(ss);
409 410 411
		}
	}

412 413
	sseu->eu_total = compute_eu_total(sseu);

414 415 416 417 418 419 420
	/*
	 * SKL is expected to always have a uniform distribution
	 * of EU across subslices with the exception that any one
	 * EU in any one subslice may be fused off for die
	 * recovery. BXT is expected to be perfectly uniform in EU
	 * distribution.
	*/
421
	sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
422
				DIV_ROUND_UP(sseu->eu_total,
423 424
					     intel_sseu_subslice_total(sseu)) :
				0;
425
	/*
426
	 * SKL+ supports slice power gating on devices with more than
427
	 * one slice, and supports EU power gating on devices with
428
	 * more than one EU pair per subslice. BXT+ supports subslice
429 430 431 432
	 * power gating on devices with more than one subslice, and
	 * supports EU power gating on devices with more than one EU
	 * pair per subslice.
	*/
433
	sseu->has_slice_pg =
434
		!IS_GEN9_LP(dev_priv) && hweight8(sseu->slice_mask) > 1;
435
	sseu->has_subslice_pg =
436
		IS_GEN9_LP(dev_priv) && intel_sseu_subslice_total(sseu) > 1;
437
	sseu->has_eu_pg = sseu->eu_per_subslice > 2;
438

439
	if (IS_GEN9_LP(dev_priv)) {
440 441
#define IS_SS_DISABLED(ss)	(!(sseu->subslice_mask[0] & BIT(ss)))
		info->has_pooled_eu = hweight8(sseu->subslice_mask[0]) == 3;
442

443
		sseu->min_eu_in_pool = 0;
444
		if (info->has_pooled_eu) {
445
			if (IS_SS_DISABLED(2) || IS_SS_DISABLED(0))
446
				sseu->min_eu_in_pool = 3;
447
			else if (IS_SS_DISABLED(1))
448
				sseu->min_eu_in_pool = 6;
449
			else
450
				sseu->min_eu_in_pool = 9;
451 452 453 454 455 456 457
		}
#undef IS_SS_DISABLED
	}
}

static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
{
458
	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
459
	int s, ss;
460
	u32 fuse2, subslice_mask, eu_disable[3]; /* s_max */
461 462

	fuse2 = I915_READ(GEN8_FUSE2);
463
	sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
S
Stuart Summers 已提交
464
	intel_sseu_set_info(sseu, 3, 3, 8);
465

466 467 468 469
	/*
	 * The subslice disable field is global, i.e. it applies
	 * to each of the enabled slices.
	 */
470 471 472
	subslice_mask = GENMASK(sseu->max_subslices - 1, 0);
	subslice_mask &= ~((fuse2 & GEN8_F2_SS_DIS_MASK) >>
			   GEN8_F2_SS_DIS_SHIFT);
473 474 475 476 477 478 479 480 481 482 483 484 485

	eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK;
	eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) |
			((I915_READ(GEN8_EU_DISABLE1) & GEN8_EU_DIS1_S1_MASK) <<
			 (32 - GEN8_EU_DIS0_S1_SHIFT));
	eu_disable[2] = (I915_READ(GEN8_EU_DISABLE1) >> GEN8_EU_DIS1_S2_SHIFT) |
			((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) <<
			 (32 - GEN8_EU_DIS1_S2_SHIFT));

	/*
	 * Iterate through enabled slices and subslices to
	 * count the total enabled EU.
	 */
486
	for (s = 0; s < sseu->max_slices; s++) {
487
		if (!(sseu->slice_mask & BIT(s)))
488 489 490
			/* skip disabled slice */
			continue;

S
Stuart Summers 已提交
491
		intel_sseu_set_subslices(sseu, s, subslice_mask);
492 493 494

		for (ss = 0; ss < sseu->max_subslices; ss++) {
			u8 eu_disabled_mask;
495 496
			u32 n_disabled;

S
Stuart Summers 已提交
497
			if (!intel_sseu_has_subslice(sseu, s, ss))
498 499 500
				/* skip disabled subslice */
				continue;

501
			eu_disabled_mask =
S
Stuart Summers 已提交
502 503
				eu_disable[s] >>
					(ss * sseu->max_eus_per_subslice);
504 505 506 507

			sseu_set_eus(sseu, s, ss, ~eu_disabled_mask);

			n_disabled = hweight8(eu_disabled_mask);
508 509 510 511

			/*
			 * Record which subslices have 7 EUs.
			 */
512
			if (sseu->max_eus_per_subslice - n_disabled == 7)
513
				sseu->subslice_7eu[s] |= 1 << ss;
514 515 516
		}
	}

517 518
	sseu->eu_total = compute_eu_total(sseu);

519 520 521 522 523
	/*
	 * BDW is expected to always have a uniform distribution of EU across
	 * subslices with the exception that any one EU in any one subslice may
	 * be fused off for die recovery.
	 */
524
	sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
525
				DIV_ROUND_UP(sseu->eu_total,
526 527
					     intel_sseu_subslice_total(sseu)) :
				0;
528 529 530 531 532

	/*
	 * BDW supports slice power gating on devices with more than
	 * one slice.
	 */
533
	sseu->has_slice_pg = hweight8(sseu->slice_mask) > 1;
534 535
	sseu->has_subslice_pg = 0;
	sseu->has_eu_pg = 0;
536 537
}

538 539
static void haswell_sseu_info_init(struct drm_i915_private *dev_priv)
{
540
	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
541
	u32 fuse1;
542
	int s, ss;
S
Stuart Summers 已提交
543
	u32 subslice_mask;
544 545 546 547 548

	/*
	 * There isn't a register to tell us how many slices/subslices. We
	 * work off the PCI-ids here.
	 */
549
	switch (INTEL_INFO(dev_priv)->gt) {
550
	default:
551
		MISSING_CASE(INTEL_INFO(dev_priv)->gt);
552 553 554
		/* fall through */
	case 1:
		sseu->slice_mask = BIT(0);
S
Stuart Summers 已提交
555
		subslice_mask = BIT(0);
556 557 558
		break;
	case 2:
		sseu->slice_mask = BIT(0);
S
Stuart Summers 已提交
559
		subslice_mask = BIT(0) | BIT(1);
560 561 562
		break;
	case 3:
		sseu->slice_mask = BIT(0) | BIT(1);
S
Stuart Summers 已提交
563
		subslice_mask = BIT(0) | BIT(1);
564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582
		break;
	}

	fuse1 = I915_READ(HSW_PAVP_FUSE1);
	switch ((fuse1 & HSW_F1_EU_DIS_MASK) >> HSW_F1_EU_DIS_SHIFT) {
	default:
		MISSING_CASE((fuse1 & HSW_F1_EU_DIS_MASK) >>
			     HSW_F1_EU_DIS_SHIFT);
		/* fall through */
	case HSW_F1_EU_DIS_10EUS:
		sseu->eu_per_subslice = 10;
		break;
	case HSW_F1_EU_DIS_8EUS:
		sseu->eu_per_subslice = 8;
		break;
	case HSW_F1_EU_DIS_6EUS:
		sseu->eu_per_subslice = 6;
		break;
	}
S
Stuart Summers 已提交
583 584 585 586

	intel_sseu_set_info(sseu, hweight8(sseu->slice_mask),
			    hweight8(subslice_mask),
			    sseu->eu_per_subslice);
587 588

	for (s = 0; s < sseu->max_slices; s++) {
S
Stuart Summers 已提交
589 590
		intel_sseu_set_subslices(sseu, s, subslice_mask);

591 592 593 594 595
		for (ss = 0; ss < sseu->max_subslices; ss++) {
			sseu_set_eus(sseu, s, ss,
				     (1UL << sseu->eu_per_subslice) - 1);
		}
	}
596

597
	sseu->eu_total = compute_eu_total(sseu);
598 599 600 601 602 603 604

	/* No powergating for you. */
	sseu->has_slice_pg = 0;
	sseu->has_subslice_pg = 0;
	sseu->has_eu_pg = 0;
}

L
Lionel Landwerlin 已提交
605
static u32 read_reference_ts_freq(struct drm_i915_private *dev_priv)
606 607
{
	u32 ts_override = I915_READ(GEN9_TIMESTAMP_OVERRIDE);
L
Lionel Landwerlin 已提交
608
	u32 base_freq, frac_freq;
609 610 611

	base_freq = ((ts_override & GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK) >>
		     GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_SHIFT) + 1;
L
Lionel Landwerlin 已提交
612
	base_freq *= 1000;
613 614 615 616

	frac_freq = ((ts_override &
		      GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_MASK) >>
		     GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_SHIFT);
L
Lionel Landwerlin 已提交
617
	frac_freq = 1000 / (frac_freq + 1);
618 619 620 621

	return base_freq + frac_freq;
}

622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667
static u32 gen10_get_crystal_clock_freq(struct drm_i915_private *dev_priv,
					u32 rpm_config_reg)
{
	u32 f19_2_mhz = 19200;
	u32 f24_mhz = 24000;
	u32 crystal_clock = (rpm_config_reg &
			     GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
			    GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;

	switch (crystal_clock) {
	case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
		return f19_2_mhz;
	case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
		return f24_mhz;
	default:
		MISSING_CASE(crystal_clock);
		return 0;
	}
}

static u32 gen11_get_crystal_clock_freq(struct drm_i915_private *dev_priv,
					u32 rpm_config_reg)
{
	u32 f19_2_mhz = 19200;
	u32 f24_mhz = 24000;
	u32 f25_mhz = 25000;
	u32 f38_4_mhz = 38400;
	u32 crystal_clock = (rpm_config_reg &
			     GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
			    GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;

	switch (crystal_clock) {
	case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
		return f24_mhz;
	case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
		return f19_2_mhz;
	case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_38_4_MHZ:
		return f38_4_mhz;
	case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ:
		return f25_mhz;
	default:
		MISSING_CASE(crystal_clock);
		return 0;
	}
}

L
Lionel Landwerlin 已提交
668
static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
669
{
L
Lionel Landwerlin 已提交
670 671 672
	u32 f12_5_mhz = 12500;
	u32 f19_2_mhz = 19200;
	u32 f24_mhz = 24000;
673 674 675 676 677 678 679 680

	if (INTEL_GEN(dev_priv) <= 4) {
		/* PRMs say:
		 *
		 *     "The value in this register increments once every 16
		 *      hclks." (through the “Clocking Configuration”
		 *      (“CLKCFG”) MCHBAR register)
		 */
L
Lionel Landwerlin 已提交
681
		return dev_priv->rawclk_freq / 16;
682 683 684 685 686 687 688 689 690 691
	} else if (INTEL_GEN(dev_priv) <= 8) {
		/* PRMs say:
		 *
		 *     "The PCU TSC counts 10ns increments; this timestamp
		 *      reflects bits 38:3 of the TSC (i.e. 80ns granularity,
		 *      rolling over every 1.5 hours).
		 */
		return f12_5_mhz;
	} else if (INTEL_GEN(dev_priv) <= 9) {
		u32 ctc_reg = I915_READ(CTC_MODE);
L
Lionel Landwerlin 已提交
692
		u32 freq = 0;
693 694 695 696 697 698 699 700 701 702 703 704 705 706 707

		if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
			freq = read_reference_ts_freq(dev_priv);
		} else {
			freq = IS_GEN9_LP(dev_priv) ? f19_2_mhz : f24_mhz;

			/* Now figure out how the command stream's timestamp
			 * register increments from this frequency (it might
			 * increment only every few clock cycle).
			 */
			freq >>= 3 - ((ctc_reg & CTC_SHIFT_PARAMETER_MASK) >>
				      CTC_SHIFT_PARAMETER_SHIFT);
		}

		return freq;
708
	} else if (INTEL_GEN(dev_priv) <= 11) {
709
		u32 ctc_reg = I915_READ(CTC_MODE);
L
Lionel Landwerlin 已提交
710
		u32 freq = 0;
711 712 713 714 715 716 717 718 719

		/* First figure out the reference frequency. There are 2 ways
		 * we can compute the frequency, either through the
		 * TIMESTAMP_OVERRIDE register or through RPM_CONFIG. CTC_MODE
		 * tells us which one we should use.
		 */
		if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
			freq = read_reference_ts_freq(dev_priv);
		} else {
720 721 722 723 724 725 726 727
			u32 rpm_config_reg = I915_READ(RPM_CONFIG0);

			if (INTEL_GEN(dev_priv) <= 10)
				freq = gen10_get_crystal_clock_freq(dev_priv,
								rpm_config_reg);
			else
				freq = gen11_get_crystal_clock_freq(dev_priv,
								rpm_config_reg);
728

729 730 731 732 733 734 735 736
			/* Now figure out how the command stream's timestamp
			 * register increments from this frequency (it might
			 * increment only every few clock cycle).
			 */
			freq >>= 3 - ((rpm_config_reg &
				       GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
				      GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT);
		}
737 738 739 740

		return freq;
	}

741
	MISSING_CASE("Unknown gen, unable to read command streamer timestamp frequency\n");
742 743 744
	return 0;
}

745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808
#undef INTEL_VGA_DEVICE
#define INTEL_VGA_DEVICE(id, info) (id)

static const u16 subplatform_ult_ids[] = {
	INTEL_HSW_ULT_GT1_IDS(0),
	INTEL_HSW_ULT_GT2_IDS(0),
	INTEL_HSW_ULT_GT3_IDS(0),
	INTEL_BDW_ULT_GT1_IDS(0),
	INTEL_BDW_ULT_GT2_IDS(0),
	INTEL_BDW_ULT_GT3_IDS(0),
	INTEL_BDW_ULT_RSVD_IDS(0),
	INTEL_SKL_ULT_GT1_IDS(0),
	INTEL_SKL_ULT_GT2_IDS(0),
	INTEL_SKL_ULT_GT3_IDS(0),
	INTEL_KBL_ULT_GT1_IDS(0),
	INTEL_KBL_ULT_GT2_IDS(0),
	INTEL_KBL_ULT_GT3_IDS(0),
	INTEL_CFL_U_GT2_IDS(0),
	INTEL_CFL_U_GT3_IDS(0),
	INTEL_WHL_U_GT1_IDS(0),
	INTEL_WHL_U_GT2_IDS(0),
	INTEL_WHL_U_GT3_IDS(0)
};

static const u16 subplatform_ulx_ids[] = {
	INTEL_HSW_ULX_GT1_IDS(0),
	INTEL_HSW_ULX_GT2_IDS(0),
	INTEL_BDW_ULX_GT1_IDS(0),
	INTEL_BDW_ULX_GT2_IDS(0),
	INTEL_BDW_ULX_GT3_IDS(0),
	INTEL_BDW_ULX_RSVD_IDS(0),
	INTEL_SKL_ULX_GT1_IDS(0),
	INTEL_SKL_ULX_GT2_IDS(0),
	INTEL_KBL_ULX_GT1_IDS(0),
	INTEL_KBL_ULX_GT2_IDS(0)
};

static const u16 subplatform_aml_ids[] = {
	INTEL_AML_KBL_GT2_IDS(0),
	INTEL_AML_CFL_GT2_IDS(0)
};

static const u16 subplatform_portf_ids[] = {
	INTEL_CNL_PORT_F_IDS(0),
	INTEL_ICL_PORT_F_IDS(0)
};

static bool find_devid(u16 id, const u16 *p, unsigned int num)
{
	for (; num; num--, p++) {
		if (*p == id)
			return true;
	}

	return false;
}

void intel_device_info_subplatform_init(struct drm_i915_private *i915)
{
	const struct intel_device_info *info = INTEL_INFO(i915);
	const struct intel_runtime_info *rinfo = RUNTIME_INFO(i915);
	const unsigned int pi = __platform_mask_index(rinfo, info->platform);
	const unsigned int pb = __platform_mask_bit(rinfo, info->platform);
	u16 devid = INTEL_DEVID(i915);
809
	u32 mask = 0;
810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837

	/* Make sure IS_<platform> checks are working. */
	RUNTIME_INFO(i915)->platform_mask[pi] = BIT(pb);

	/* Find and mark subplatform bits based on the PCI device id. */
	if (find_devid(devid, subplatform_ult_ids,
		       ARRAY_SIZE(subplatform_ult_ids))) {
		mask = BIT(INTEL_SUBPLATFORM_ULT);
	} else if (find_devid(devid, subplatform_ulx_ids,
			      ARRAY_SIZE(subplatform_ulx_ids))) {
		mask = BIT(INTEL_SUBPLATFORM_ULX);
		if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
			/* ULX machines are also considered ULT. */
			mask |= BIT(INTEL_SUBPLATFORM_ULT);
		}
	} else if (find_devid(devid, subplatform_aml_ids,
			      ARRAY_SIZE(subplatform_aml_ids))) {
		mask = BIT(INTEL_SUBPLATFORM_AML);
	} else if (find_devid(devid, subplatform_portf_ids,
			      ARRAY_SIZE(subplatform_portf_ids))) {
		mask = BIT(INTEL_SUBPLATFORM_PORTF);
	}

	GEM_BUG_ON(mask & ~INTEL_SUBPLATFORM_BITS);

	RUNTIME_INFO(i915)->platform_mask[pi] |= mask;
}

838 839
/**
 * intel_device_info_runtime_init - initialize runtime info
840
 * @dev_priv: the i915 device
841
 *
842 843 844 845 846 847 848 849 850 851 852 853
 * Determine various intel_device_info fields at runtime.
 *
 * Use it when either:
 *   - it's judged too laborious to fill n static structures with the limit
 *     when a simple if statement does the job,
 *   - run-time checks (eg read fuse/strap registers) are needed.
 *
 * This function needs to be called:
 *   - after the MMIO has been setup as we are reading registers,
 *   - after the PCH has been detected,
 *   - before the first usage of the fields it can tweak.
 */
854
void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
855
{
856
	struct intel_device_info *info = mkwrite_device_info(dev_priv);
857
	struct intel_runtime_info *runtime = RUNTIME_INFO(dev_priv);
858 859
	enum pipe pipe;

860 861
	if (INTEL_GEN(dev_priv) >= 10) {
		for_each_pipe(dev_priv, pipe)
862
			runtime->num_scalers[pipe] = 2;
863
	} else if (IS_GEN(dev_priv, 9)) {
864 865 866
		runtime->num_scalers[PIPE_A] = 2;
		runtime->num_scalers[PIPE_B] = 2;
		runtime->num_scalers[PIPE_C] = 1;
867 868
	}

869
	BUILD_BUG_ON(BITS_PER_TYPE(intel_engine_mask_t) < I915_NUM_ENGINES);
870

871
	if (INTEL_GEN(dev_priv) >= 11)
872
		for_each_pipe(dev_priv, pipe)
873
			runtime->num_sprites[pipe] = 6;
874
	else if (IS_GEN(dev_priv, 10) || IS_GEMINILAKE(dev_priv))
875
		for_each_pipe(dev_priv, pipe)
876
			runtime->num_sprites[pipe] = 3;
877
	else if (IS_BROXTON(dev_priv)) {
878 879 880 881 882 883 884 885 886
		/*
		 * Skylake and Broxton currently don't expose the topmost plane as its
		 * use is exclusive with the legacy cursor and we only want to expose
		 * one of those, not both. Until we can safely expose the topmost plane
		 * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
		 * we don't expose the topmost plane at all to prevent ABI breakage
		 * down the line.
		 */

887 888 889
		runtime->num_sprites[PIPE_A] = 2;
		runtime->num_sprites[PIPE_B] = 2;
		runtime->num_sprites[PIPE_C] = 1;
890
	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
891
		for_each_pipe(dev_priv, pipe)
892
			runtime->num_sprites[pipe] = 2;
893
	} else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) {
894
		for_each_pipe(dev_priv, pipe)
895
			runtime->num_sprites[pipe] = 1;
896
	}
897

898
	if (i915_modparams.disable_display) {
899 900
		DRM_INFO("Display disabled (module parameter)\n");
		info->num_pipes = 0;
901
	} else if (HAS_DISPLAY(dev_priv) &&
902
		   (IS_GEN_RANGE(dev_priv, 7, 8)) &&
903 904 905 906 907 908 909 910 911 912 913 914 915 916 917
		   HAS_PCH_SPLIT(dev_priv)) {
		u32 fuse_strap = I915_READ(FUSE_STRAP);
		u32 sfuse_strap = I915_READ(SFUSE_STRAP);

		/*
		 * SFUSE_STRAP is supposed to have a bit signalling the display
		 * is fused off. Unfortunately it seems that, at least in
		 * certain cases, fused off display means that PCH display
		 * reads don't land anywhere. In that case, we read 0s.
		 *
		 * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK
		 * should be set when taking over after the firmware.
		 */
		if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
		    sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
918
		    (HAS_PCH_CPT(dev_priv) &&
919 920 921 922 923 924 925
		     !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
			DRM_INFO("Display fused off, disabling\n");
			info->num_pipes = 0;
		} else if (fuse_strap & IVB_PIPE_C_DISABLE) {
			DRM_INFO("PipeC fused off\n");
			info->num_pipes -= 1;
		}
926
	} else if (HAS_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 9) {
927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959
		u32 dfsm = I915_READ(SKL_DFSM);
		u8 disabled_mask = 0;
		bool invalid;
		int num_bits;

		if (dfsm & SKL_DFSM_PIPE_A_DISABLE)
			disabled_mask |= BIT(PIPE_A);
		if (dfsm & SKL_DFSM_PIPE_B_DISABLE)
			disabled_mask |= BIT(PIPE_B);
		if (dfsm & SKL_DFSM_PIPE_C_DISABLE)
			disabled_mask |= BIT(PIPE_C);

		num_bits = hweight8(disabled_mask);

		switch (disabled_mask) {
		case BIT(PIPE_A):
		case BIT(PIPE_B):
		case BIT(PIPE_A) | BIT(PIPE_B):
		case BIT(PIPE_A) | BIT(PIPE_C):
			invalid = true;
			break;
		default:
			invalid = false;
		}

		if (num_bits > info->num_pipes || invalid)
			DRM_ERROR("invalid pipe fuse configuration: 0x%x\n",
				  disabled_mask);
		else
			info->num_pipes -= num_bits;
	}

	/* Initialize slice/subslice/EU info */
960 961 962
	if (IS_HASWELL(dev_priv))
		haswell_sseu_info_init(dev_priv);
	else if (IS_CHERRYVIEW(dev_priv))
963 964 965
		cherryview_sseu_info_init(dev_priv);
	else if (IS_BROADWELL(dev_priv))
		broadwell_sseu_info_init(dev_priv);
966
	else if (IS_GEN(dev_priv, 9))
967
		gen9_sseu_info_init(dev_priv);
968
	else if (IS_GEN(dev_priv, 10))
969
		gen10_sseu_info_init(dev_priv);
970
	else if (INTEL_GEN(dev_priv) >= 11)
971
		gen11_sseu_info_init(dev_priv);
972

973
	if (IS_GEN(dev_priv, 6) && intel_vtd_active()) {
974
		DRM_INFO("Disabling ppGTT for VT-d support\n");
975
		info->ppgtt_type = INTEL_PPGTT_NONE;
976 977
	}

978
	/* Initialize command stream timestamp frequency */
979
	runtime->cs_timestamp_frequency_khz = read_timestamp_frequency(dev_priv);
980
}
981 982 983 984

void intel_driver_caps_print(const struct intel_driver_caps *caps,
			     struct drm_printer *p)
{
985 986
	drm_printf(p, "Has logical contexts? %s\n",
		   yesno(caps->has_logical_contexts));
987 988
	drm_printf(p, "scheduler: %x\n", caps->scheduler);
}
989 990 991 992 993 994 995 996 997 998

/*
 * Determine which engines are fused off in our particular hardware. Since the
 * fuse register is in the blitter powerwell, we need forcewake to be ready at
 * this point (but later we need to prune the forcewake domains for engines that
 * are indeed fused off).
 */
void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
{
	struct intel_device_info *info = mkwrite_device_info(dev_priv);
999
	unsigned int logical_vdbox = 0;
1000
	unsigned int i;
1001
	u32 media_fuse;
1002 1003
	u16 vdbox_mask;
	u16 vebox_mask;
1004 1005 1006 1007

	if (INTEL_GEN(dev_priv) < 11)
		return;

1008
	media_fuse = ~I915_READ(GEN11_GT_VEBOX_VDBOX_DISABLE);
1009

1010 1011 1012
	vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
	vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
		      GEN11_GT_VEBOX_DISABLE_SHIFT;
1013 1014 1015 1016 1017

	for (i = 0; i < I915_MAX_VCS; i++) {
		if (!HAS_ENGINE(dev_priv, _VCS(i)))
			continue;

1018
		if (!(BIT(i) & vdbox_mask)) {
1019
			info->engine_mask &= ~BIT(_VCS(i));
1020
			DRM_DEBUG_DRIVER("vcs%u fused off\n", i);
1021
			continue;
1022
		}
1023 1024 1025 1026 1027 1028

		/*
		 * In Gen11, only even numbered logical VDBOXes are
		 * hooked up to an SFC (Scaler & Format Converter) unit.
		 */
		if (logical_vdbox++ % 2 == 0)
1029
			RUNTIME_INFO(dev_priv)->vdbox_sfc_access |= BIT(i);
1030
	}
1031 1032 1033
	DRM_DEBUG_DRIVER("vdbox enable: %04x, instances: %04lx\n",
			 vdbox_mask, VDBOX_MASK(dev_priv));
	GEM_BUG_ON(vdbox_mask != VDBOX_MASK(dev_priv));
1034 1035 1036 1037 1038

	for (i = 0; i < I915_MAX_VECS; i++) {
		if (!HAS_ENGINE(dev_priv, _VECS(i)))
			continue;

1039
		if (!(BIT(i) & vebox_mask)) {
1040
			info->engine_mask &= ~BIT(_VECS(i));
1041 1042
			DRM_DEBUG_DRIVER("vecs%u fused off\n", i);
		}
1043
	}
1044 1045 1046
	DRM_DEBUG_DRIVER("vebox enable: %04x, instances: %04lx\n",
			 vebox_mask, VEBOX_MASK(dev_priv));
	GEM_BUG_ON(vebox_mask != VEBOX_MASK(dev_priv));
1047
}