intel_device_info.c 30.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 * Copyright © 2016 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 */

25 26
#include <drm/drm_print.h>

27
#include "intel_device_info.h"
28 29
#include "i915_drv.h"

30 31 32 33 34 35 36 37 38 39 40 41
#define PLATFORM_NAME(x) [INTEL_##x] = #x
static const char * const platform_names[] = {
	PLATFORM_NAME(I830),
	PLATFORM_NAME(I845G),
	PLATFORM_NAME(I85X),
	PLATFORM_NAME(I865G),
	PLATFORM_NAME(I915G),
	PLATFORM_NAME(I915GM),
	PLATFORM_NAME(I945G),
	PLATFORM_NAME(I945GM),
	PLATFORM_NAME(G33),
	PLATFORM_NAME(PINEVIEW),
42 43
	PLATFORM_NAME(I965G),
	PLATFORM_NAME(I965GM),
44 45
	PLATFORM_NAME(G45),
	PLATFORM_NAME(GM45),
46 47 48 49 50 51 52 53 54 55 56
	PLATFORM_NAME(IRONLAKE),
	PLATFORM_NAME(SANDYBRIDGE),
	PLATFORM_NAME(IVYBRIDGE),
	PLATFORM_NAME(VALLEYVIEW),
	PLATFORM_NAME(HASWELL),
	PLATFORM_NAME(BROADWELL),
	PLATFORM_NAME(CHERRYVIEW),
	PLATFORM_NAME(SKYLAKE),
	PLATFORM_NAME(BROXTON),
	PLATFORM_NAME(KABYLAKE),
	PLATFORM_NAME(GEMINILAKE),
57
	PLATFORM_NAME(COFFEELAKE),
58
	PLATFORM_NAME(CANNONLAKE),
59
	PLATFORM_NAME(ICELAKE),
60
	PLATFORM_NAME(ELKHARTLAKE),
61 62 63 64 65
};
#undef PLATFORM_NAME

const char *intel_platform_name(enum intel_platform platform)
{
66 67
	BUILD_BUG_ON(ARRAY_SIZE(platform_names) != INTEL_MAX_PLATFORMS);

68 69 70 71 72 73 74
	if (WARN_ON_ONCE(platform >= ARRAY_SIZE(platform_names) ||
			 platform_names[platform] == NULL))
		return "<unknown>";

	return platform_names[platform];
}

75 76 77 78 79 80
void intel_device_info_dump_flags(const struct intel_device_info *info,
				  struct drm_printer *p)
{
#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->name));
	DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
#undef PRINT_FLAG
81 82 83 84

#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->display.name));
	DEV_INFO_DISPLAY_FOR_EACH_FLAG(PRINT_FLAG);
#undef PRINT_FLAG
85 86
}

87 88
static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p)
{
89 90
	int s;

91 92
	drm_printf(p, "slice total: %u, mask=%04x\n",
		   hweight8(sseu->slice_mask), sseu->slice_mask);
93
	drm_printf(p, "subslice total: %u\n", intel_sseu_subslice_total(sseu));
94 95
	for (s = 0; s < sseu->max_slices; s++) {
		drm_printf(p, "slice%d: %u subslices, mask=%04x\n",
96
			   s, intel_sseu_subslices_per_slice(sseu, s),
97 98
			   sseu->subslice_mask[s]);
	}
99 100 101 102 103 104 105 106 107
	drm_printf(p, "EU total: %u\n", sseu->eu_total);
	drm_printf(p, "EU per subslice: %u\n", sseu->eu_per_subslice);
	drm_printf(p, "has slice power gating: %s\n",
		   yesno(sseu->has_slice_pg));
	drm_printf(p, "has subslice power gating: %s\n",
		   yesno(sseu->has_subslice_pg));
	drm_printf(p, "has EU power gating: %s\n", yesno(sseu->has_eu_pg));
}

108
void intel_device_info_dump_runtime(const struct intel_runtime_info *info,
109 110 111 112 113 114 115 116
				    struct drm_printer *p)
{
	sseu_dump(&info->sseu, p);

	drm_printf(p, "CS timestamp frequency: %u kHz\n",
		   info->cs_timestamp_frequency_khz);
}

117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150
static int sseu_eu_idx(const struct sseu_dev_info *sseu, int slice,
		       int subslice)
{
	int subslice_stride = GEN_SSEU_STRIDE(sseu->max_eus_per_subslice);
	int slice_stride = sseu->max_subslices * subslice_stride;

	return slice * slice_stride + subslice * subslice_stride;
}

static u16 sseu_get_eus(const struct sseu_dev_info *sseu, int slice,
			int subslice)
{
	int i, offset = sseu_eu_idx(sseu, slice, subslice);
	u16 eu_mask = 0;

	for (i = 0; i < GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); i++) {
		eu_mask |= ((u16)sseu->eu_mask[offset + i]) <<
			(i * BITS_PER_BYTE);
	}

	return eu_mask;
}

static void sseu_set_eus(struct sseu_dev_info *sseu, int slice, int subslice,
			 u16 eu_mask)
{
	int i, offset = sseu_eu_idx(sseu, slice, subslice);

	for (i = 0; i < GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); i++) {
		sseu->eu_mask[offset + i] =
			(eu_mask >> (BITS_PER_BYTE * i)) & 0xff;
	}
}

151 152 153 154 155 156 157 158 159 160 161 162
void intel_device_info_dump_topology(const struct sseu_dev_info *sseu,
				     struct drm_printer *p)
{
	int s, ss;

	if (sseu->max_slices == 0) {
		drm_printf(p, "Unavailable\n");
		return;
	}

	for (s = 0; s < sseu->max_slices; s++) {
		drm_printf(p, "slice%d: %u subslice(s) (0x%hhx):\n",
163
			   s, intel_sseu_subslices_per_slice(sseu, s),
164 165 166 167 168 169 170 171 172 173 174
			   sseu->subslice_mask[s]);

		for (ss = 0; ss < sseu->max_subslices; ss++) {
			u16 enabled_eus = sseu_get_eus(sseu, s, ss);

			drm_printf(p, "\tsubslice%d: %u EUs (0x%hx)\n",
				   ss, hweight16(enabled_eus), enabled_eus);
		}
	}
}

175 176 177 178 179 180 181 182 183 184
static u16 compute_eu_total(const struct sseu_dev_info *sseu)
{
	u16 i, total = 0;

	for (i = 0; i < ARRAY_SIZE(sseu->eu_mask); i++)
		total += hweight8(sseu->eu_mask[i]);

	return total;
}

185 186
static void gen11_sseu_info_init(struct drm_i915_private *dev_priv)
{
187
	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
188 189 190 191 192
	u8 s_en;
	u32 ss_en, ss_en_mask;
	u8 eu_en;
	int s;

193 194 195 196 197 198 199 200 201
	if (IS_ELKHARTLAKE(dev_priv)) {
		sseu->max_slices = 1;
		sseu->max_subslices = 4;
		sseu->max_eus_per_subslice = 8;
	} else {
		sseu->max_slices = 1;
		sseu->max_subslices = 8;
		sseu->max_eus_per_subslice = 8;
	}
202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229

	s_en = I915_READ(GEN11_GT_SLICE_ENABLE) & GEN11_GT_S_ENA_MASK;
	ss_en = ~I915_READ(GEN11_GT_SUBSLICE_DISABLE);
	ss_en_mask = BIT(sseu->max_subslices) - 1;
	eu_en = ~(I915_READ(GEN11_EU_DISABLE) & GEN11_EU_DIS_MASK);

	for (s = 0; s < sseu->max_slices; s++) {
		if (s_en & BIT(s)) {
			int ss_idx = sseu->max_subslices * s;
			int ss;

			sseu->slice_mask |= BIT(s);
			sseu->subslice_mask[s] = (ss_en >> ss_idx) & ss_en_mask;
			for (ss = 0; ss < sseu->max_subslices; ss++) {
				if (sseu->subslice_mask[s] & BIT(ss))
					sseu_set_eus(sseu, s, ss, eu_en);
			}
		}
	}
	sseu->eu_per_subslice = hweight8(eu_en);
	sseu->eu_total = compute_eu_total(sseu);

	/* ICL has no power gating restrictions. */
	sseu->has_slice_pg = 1;
	sseu->has_subslice_pg = 1;
	sseu->has_eu_pg = 1;
}

230 231
static void gen10_sseu_info_init(struct drm_i915_private *dev_priv)
{
232
	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
233
	const u32 fuse2 = I915_READ(GEN8_FUSE2);
234 235 236
	int s, ss;
	const int eu_mask = 0xff;
	u32 subslice_mask, eu_en;
237 238 239

	sseu->slice_mask = (fuse2 & GEN10_F2_S_ENA_MASK) >>
			    GEN10_F2_S_ENA_SHIFT;
240 241 242
	sseu->max_slices = 6;
	sseu->max_subslices = 4;
	sseu->max_eus_per_subslice = 8;
243

244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289
	subslice_mask = (1 << 4) - 1;
	subslice_mask &= ~((fuse2 & GEN10_F2_SS_DIS_MASK) >>
			   GEN10_F2_SS_DIS_SHIFT);

	/*
	 * Slice0 can have up to 3 subslices, but there are only 2 in
	 * slice1/2.
	 */
	sseu->subslice_mask[0] = subslice_mask;
	for (s = 1; s < sseu->max_slices; s++)
		sseu->subslice_mask[s] = subslice_mask & 0x3;

	/* Slice0 */
	eu_en = ~I915_READ(GEN8_EU_DISABLE0);
	for (ss = 0; ss < sseu->max_subslices; ss++)
		sseu_set_eus(sseu, 0, ss, (eu_en >> (8 * ss)) & eu_mask);
	/* Slice1 */
	sseu_set_eus(sseu, 1, 0, (eu_en >> 24) & eu_mask);
	eu_en = ~I915_READ(GEN8_EU_DISABLE1);
	sseu_set_eus(sseu, 1, 1, eu_en & eu_mask);
	/* Slice2 */
	sseu_set_eus(sseu, 2, 0, (eu_en >> 8) & eu_mask);
	sseu_set_eus(sseu, 2, 1, (eu_en >> 16) & eu_mask);
	/* Slice3 */
	sseu_set_eus(sseu, 3, 0, (eu_en >> 24) & eu_mask);
	eu_en = ~I915_READ(GEN8_EU_DISABLE2);
	sseu_set_eus(sseu, 3, 1, eu_en & eu_mask);
	/* Slice4 */
	sseu_set_eus(sseu, 4, 0, (eu_en >> 8) & eu_mask);
	sseu_set_eus(sseu, 4, 1, (eu_en >> 16) & eu_mask);
	/* Slice5 */
	sseu_set_eus(sseu, 5, 0, (eu_en >> 24) & eu_mask);
	eu_en = ~I915_READ(GEN10_EU_DISABLE3);
	sseu_set_eus(sseu, 5, 1, eu_en & eu_mask);

	/* Do a second pass where we mark the subslices disabled if all their
	 * eus are off.
	 */
	for (s = 0; s < sseu->max_slices; s++) {
		for (ss = 0; ss < sseu->max_subslices; ss++) {
			if (sseu_get_eus(sseu, s, ss) == 0)
				sseu->subslice_mask[s] &= ~BIT(ss);
		}
	}

	sseu->eu_total = compute_eu_total(sseu);
290 291 292 293 294 295 296

	/*
	 * CNL is expected to always have a uniform distribution
	 * of EU across subslices with the exception that any one
	 * EU in any one subslice may be fused off for die
	 * recovery.
	 */
297
	sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
298
				DIV_ROUND_UP(sseu->eu_total,
299 300
					     intel_sseu_subslice_total(sseu)) :
				0;
301 302 303 304 305 306 307

	/* No restrictions on Power Gating */
	sseu->has_slice_pg = 1;
	sseu->has_subslice_pg = 1;
	sseu->has_eu_pg = 1;
}

308 309
static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
{
310
	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
311
	u32 fuse;
312 313 314

	fuse = I915_READ(CHV_FUSE_GT);

315
	sseu->slice_mask = BIT(0);
316 317 318
	sseu->max_slices = 1;
	sseu->max_subslices = 2;
	sseu->max_eus_per_subslice = 8;
319 320

	if (!(fuse & CHV_FGT_DISABLE_SS0)) {
321 322 323 324 325 326 327 328
		u8 disabled_mask =
			((fuse & CHV_FGT_EU_DIS_SS0_R0_MASK) >>
			 CHV_FGT_EU_DIS_SS0_R0_SHIFT) |
			(((fuse & CHV_FGT_EU_DIS_SS0_R1_MASK) >>
			  CHV_FGT_EU_DIS_SS0_R1_SHIFT) << 4);

		sseu->subslice_mask[0] |= BIT(0);
		sseu_set_eus(sseu, 0, 0, ~disabled_mask);
329 330 331
	}

	if (!(fuse & CHV_FGT_DISABLE_SS1)) {
332 333 334 335 336 337 338 339
		u8 disabled_mask =
			((fuse & CHV_FGT_EU_DIS_SS1_R0_MASK) >>
			 CHV_FGT_EU_DIS_SS1_R0_SHIFT) |
			(((fuse & CHV_FGT_EU_DIS_SS1_R1_MASK) >>
			  CHV_FGT_EU_DIS_SS1_R1_SHIFT) << 4);

		sseu->subslice_mask[0] |= BIT(1);
		sseu_set_eus(sseu, 0, 1, ~disabled_mask);
340 341
	}

342 343
	sseu->eu_total = compute_eu_total(sseu);

344 345 346 347
	/*
	 * CHV expected to always have a uniform distribution of EU
	 * across subslices.
	*/
348 349 350
	sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
				sseu->eu_total /
					intel_sseu_subslice_total(sseu) :
351 352 353 354 355 356
				0;
	/*
	 * CHV supports subslice power gating on devices with more than
	 * one subslice, and supports EU power gating on devices with
	 * more than one EU pair per subslice.
	*/
357
	sseu->has_slice_pg = 0;
358
	sseu->has_subslice_pg = intel_sseu_subslice_total(sseu) > 1;
359
	sseu->has_eu_pg = (sseu->eu_per_subslice > 2);
360 361 362 363 364
}

static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
{
	struct intel_device_info *info = mkwrite_device_info(dev_priv);
365
	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
366
	int s, ss;
367 368
	u32 fuse2, eu_disable, subslice_mask;
	const u8 eu_mask = 0xff;
369 370

	fuse2 = I915_READ(GEN8_FUSE2);
371
	sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
372

373 374 375 376 377
	/* BXT has a single slice and at most 3 subslices. */
	sseu->max_slices = IS_GEN9_LP(dev_priv) ? 1 : 3;
	sseu->max_subslices = IS_GEN9_LP(dev_priv) ? 3 : 4;
	sseu->max_eus_per_subslice = 8;

378 379 380 381
	/*
	 * The subslice disable field is global, i.e. it applies
	 * to each of the enabled slices.
	*/
382 383 384
	subslice_mask = (1 << sseu->max_subslices) - 1;
	subslice_mask &= ~((fuse2 & GEN9_F2_SS_DIS_MASK) >>
			   GEN9_F2_SS_DIS_SHIFT);
385 386 387 388 389

	/*
	 * Iterate through enabled slices and subslices to
	 * count the total enabled EU.
	*/
390
	for (s = 0; s < sseu->max_slices; s++) {
391
		if (!(sseu->slice_mask & BIT(s)))
392 393 394
			/* skip disabled slice */
			continue;

395 396
		sseu->subslice_mask[s] = subslice_mask;

397
		eu_disable = I915_READ(GEN9_EU_DISABLE(s));
398
		for (ss = 0; ss < sseu->max_subslices; ss++) {
399
			int eu_per_ss;
400
			u8 eu_disabled_mask;
401

402
			if (!(sseu->subslice_mask[s] & BIT(ss)))
403 404 405
				/* skip disabled subslice */
				continue;

406
			eu_disabled_mask = (eu_disable >> (ss * 8)) & eu_mask;
407 408 409 410 411

			sseu_set_eus(sseu, s, ss, ~eu_disabled_mask);

			eu_per_ss = sseu->max_eus_per_subslice -
				hweight8(eu_disabled_mask);
412 413 414 415 416 417 418

			/*
			 * Record which subslice(s) has(have) 7 EUs. we
			 * can tune the hash used to spread work among
			 * subslices if they are unbalanced.
			 */
			if (eu_per_ss == 7)
419
				sseu->subslice_7eu[s] |= BIT(ss);
420 421 422
		}
	}

423 424
	sseu->eu_total = compute_eu_total(sseu);

425 426 427 428 429 430 431
	/*
	 * SKL is expected to always have a uniform distribution
	 * of EU across subslices with the exception that any one
	 * EU in any one subslice may be fused off for die
	 * recovery. BXT is expected to be perfectly uniform in EU
	 * distribution.
	*/
432
	sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
433
				DIV_ROUND_UP(sseu->eu_total,
434 435
					     intel_sseu_subslice_total(sseu)) :
				0;
436
	/*
437
	 * SKL+ supports slice power gating on devices with more than
438
	 * one slice, and supports EU power gating on devices with
439
	 * more than one EU pair per subslice. BXT+ supports subslice
440 441 442 443
	 * power gating on devices with more than one subslice, and
	 * supports EU power gating on devices with more than one EU
	 * pair per subslice.
	*/
444
	sseu->has_slice_pg =
445
		!IS_GEN9_LP(dev_priv) && hweight8(sseu->slice_mask) > 1;
446
	sseu->has_subslice_pg =
447
		IS_GEN9_LP(dev_priv) && intel_sseu_subslice_total(sseu) > 1;
448
	sseu->has_eu_pg = sseu->eu_per_subslice > 2;
449

450
	if (IS_GEN9_LP(dev_priv)) {
451 452
#define IS_SS_DISABLED(ss)	(!(sseu->subslice_mask[0] & BIT(ss)))
		info->has_pooled_eu = hweight8(sseu->subslice_mask[0]) == 3;
453

454
		sseu->min_eu_in_pool = 0;
455
		if (info->has_pooled_eu) {
456
			if (IS_SS_DISABLED(2) || IS_SS_DISABLED(0))
457
				sseu->min_eu_in_pool = 3;
458
			else if (IS_SS_DISABLED(1))
459
				sseu->min_eu_in_pool = 6;
460
			else
461
				sseu->min_eu_in_pool = 9;
462 463 464 465 466 467 468
		}
#undef IS_SS_DISABLED
	}
}

static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
{
469
	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
470
	int s, ss;
471
	u32 fuse2, subslice_mask, eu_disable[3]; /* s_max */
472 473

	fuse2 = I915_READ(GEN8_FUSE2);
474
	sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
475 476 477 478
	sseu->max_slices = 3;
	sseu->max_subslices = 3;
	sseu->max_eus_per_subslice = 8;

479 480 481 482
	/*
	 * The subslice disable field is global, i.e. it applies
	 * to each of the enabled slices.
	 */
483 484 485
	subslice_mask = GENMASK(sseu->max_subslices - 1, 0);
	subslice_mask &= ~((fuse2 & GEN8_F2_SS_DIS_MASK) >>
			   GEN8_F2_SS_DIS_SHIFT);
486 487 488 489 490 491 492 493 494 495 496 497 498

	eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK;
	eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) |
			((I915_READ(GEN8_EU_DISABLE1) & GEN8_EU_DIS1_S1_MASK) <<
			 (32 - GEN8_EU_DIS0_S1_SHIFT));
	eu_disable[2] = (I915_READ(GEN8_EU_DISABLE1) >> GEN8_EU_DIS1_S2_SHIFT) |
			((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) <<
			 (32 - GEN8_EU_DIS1_S2_SHIFT));

	/*
	 * Iterate through enabled slices and subslices to
	 * count the total enabled EU.
	 */
499
	for (s = 0; s < sseu->max_slices; s++) {
500
		if (!(sseu->slice_mask & BIT(s)))
501 502 503
			/* skip disabled slice */
			continue;

504 505 506 507
		sseu->subslice_mask[s] = subslice_mask;

		for (ss = 0; ss < sseu->max_subslices; ss++) {
			u8 eu_disabled_mask;
508 509
			u32 n_disabled;

510
			if (!(sseu->subslice_mask[s] & BIT(ss)))
511 512 513
				/* skip disabled subslice */
				continue;

514 515 516 517 518 519
			eu_disabled_mask =
				eu_disable[s] >> (ss * sseu->max_eus_per_subslice);

			sseu_set_eus(sseu, s, ss, ~eu_disabled_mask);

			n_disabled = hweight8(eu_disabled_mask);
520 521 522 523

			/*
			 * Record which subslices have 7 EUs.
			 */
524
			if (sseu->max_eus_per_subslice - n_disabled == 7)
525
				sseu->subslice_7eu[s] |= 1 << ss;
526 527 528
		}
	}

529 530
	sseu->eu_total = compute_eu_total(sseu);

531 532 533 534 535
	/*
	 * BDW is expected to always have a uniform distribution of EU across
	 * subslices with the exception that any one EU in any one subslice may
	 * be fused off for die recovery.
	 */
536
	sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
537
				DIV_ROUND_UP(sseu->eu_total,
538 539
					     intel_sseu_subslice_total(sseu)) :
				0;
540 541 542 543 544

	/*
	 * BDW supports slice power gating on devices with more than
	 * one slice.
	 */
545
	sseu->has_slice_pg = hweight8(sseu->slice_mask) > 1;
546 547
	sseu->has_subslice_pg = 0;
	sseu->has_eu_pg = 0;
548 549
}

550 551
static void haswell_sseu_info_init(struct drm_i915_private *dev_priv)
{
552
	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
553
	u32 fuse1;
554
	int s, ss;
555 556 557 558 559

	/*
	 * There isn't a register to tell us how many slices/subslices. We
	 * work off the PCI-ids here.
	 */
560
	switch (INTEL_INFO(dev_priv)->gt) {
561
	default:
562
		MISSING_CASE(INTEL_INFO(dev_priv)->gt);
563 564 565
		/* fall through */
	case 1:
		sseu->slice_mask = BIT(0);
566
		sseu->subslice_mask[0] = BIT(0);
567 568 569
		break;
	case 2:
		sseu->slice_mask = BIT(0);
570
		sseu->subslice_mask[0] = BIT(0) | BIT(1);
571 572 573
		break;
	case 3:
		sseu->slice_mask = BIT(0) | BIT(1);
574 575
		sseu->subslice_mask[0] = BIT(0) | BIT(1);
		sseu->subslice_mask[1] = BIT(0) | BIT(1);
576 577 578
		break;
	}

579 580 581
	sseu->max_slices = hweight8(sseu->slice_mask);
	sseu->max_subslices = hweight8(sseu->subslice_mask[0]);

582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597
	fuse1 = I915_READ(HSW_PAVP_FUSE1);
	switch ((fuse1 & HSW_F1_EU_DIS_MASK) >> HSW_F1_EU_DIS_SHIFT) {
	default:
		MISSING_CASE((fuse1 & HSW_F1_EU_DIS_MASK) >>
			     HSW_F1_EU_DIS_SHIFT);
		/* fall through */
	case HSW_F1_EU_DIS_10EUS:
		sseu->eu_per_subslice = 10;
		break;
	case HSW_F1_EU_DIS_8EUS:
		sseu->eu_per_subslice = 8;
		break;
	case HSW_F1_EU_DIS_6EUS:
		sseu->eu_per_subslice = 6;
		break;
	}
598 599 600 601 602 603 604 605
	sseu->max_eus_per_subslice = sseu->eu_per_subslice;

	for (s = 0; s < sseu->max_slices; s++) {
		for (ss = 0; ss < sseu->max_subslices; ss++) {
			sseu_set_eus(sseu, s, ss,
				     (1UL << sseu->eu_per_subslice) - 1);
		}
	}
606

607
	sseu->eu_total = compute_eu_total(sseu);
608 609 610 611 612 613 614

	/* No powergating for you. */
	sseu->has_slice_pg = 0;
	sseu->has_subslice_pg = 0;
	sseu->has_eu_pg = 0;
}

L
Lionel Landwerlin 已提交
615
static u32 read_reference_ts_freq(struct drm_i915_private *dev_priv)
616 617
{
	u32 ts_override = I915_READ(GEN9_TIMESTAMP_OVERRIDE);
L
Lionel Landwerlin 已提交
618
	u32 base_freq, frac_freq;
619 620 621

	base_freq = ((ts_override & GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK) >>
		     GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_SHIFT) + 1;
L
Lionel Landwerlin 已提交
622
	base_freq *= 1000;
623 624 625 626

	frac_freq = ((ts_override &
		      GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_MASK) >>
		     GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_SHIFT);
L
Lionel Landwerlin 已提交
627
	frac_freq = 1000 / (frac_freq + 1);
628 629 630 631

	return base_freq + frac_freq;
}

632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677
static u32 gen10_get_crystal_clock_freq(struct drm_i915_private *dev_priv,
					u32 rpm_config_reg)
{
	u32 f19_2_mhz = 19200;
	u32 f24_mhz = 24000;
	u32 crystal_clock = (rpm_config_reg &
			     GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
			    GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;

	switch (crystal_clock) {
	case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
		return f19_2_mhz;
	case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
		return f24_mhz;
	default:
		MISSING_CASE(crystal_clock);
		return 0;
	}
}

static u32 gen11_get_crystal_clock_freq(struct drm_i915_private *dev_priv,
					u32 rpm_config_reg)
{
	u32 f19_2_mhz = 19200;
	u32 f24_mhz = 24000;
	u32 f25_mhz = 25000;
	u32 f38_4_mhz = 38400;
	u32 crystal_clock = (rpm_config_reg &
			     GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
			    GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;

	switch (crystal_clock) {
	case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
		return f24_mhz;
	case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
		return f19_2_mhz;
	case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_38_4_MHZ:
		return f38_4_mhz;
	case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ:
		return f25_mhz;
	default:
		MISSING_CASE(crystal_clock);
		return 0;
	}
}

L
Lionel Landwerlin 已提交
678
static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
679
{
L
Lionel Landwerlin 已提交
680 681 682
	u32 f12_5_mhz = 12500;
	u32 f19_2_mhz = 19200;
	u32 f24_mhz = 24000;
683 684 685 686 687 688 689 690

	if (INTEL_GEN(dev_priv) <= 4) {
		/* PRMs say:
		 *
		 *     "The value in this register increments once every 16
		 *      hclks." (through the “Clocking Configuration”
		 *      (“CLKCFG”) MCHBAR register)
		 */
L
Lionel Landwerlin 已提交
691
		return dev_priv->rawclk_freq / 16;
692 693 694 695 696 697 698 699 700 701
	} else if (INTEL_GEN(dev_priv) <= 8) {
		/* PRMs say:
		 *
		 *     "The PCU TSC counts 10ns increments; this timestamp
		 *      reflects bits 38:3 of the TSC (i.e. 80ns granularity,
		 *      rolling over every 1.5 hours).
		 */
		return f12_5_mhz;
	} else if (INTEL_GEN(dev_priv) <= 9) {
		u32 ctc_reg = I915_READ(CTC_MODE);
L
Lionel Landwerlin 已提交
702
		u32 freq = 0;
703 704 705 706 707 708 709 710 711 712 713 714 715 716 717

		if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
			freq = read_reference_ts_freq(dev_priv);
		} else {
			freq = IS_GEN9_LP(dev_priv) ? f19_2_mhz : f24_mhz;

			/* Now figure out how the command stream's timestamp
			 * register increments from this frequency (it might
			 * increment only every few clock cycle).
			 */
			freq >>= 3 - ((ctc_reg & CTC_SHIFT_PARAMETER_MASK) >>
				      CTC_SHIFT_PARAMETER_SHIFT);
		}

		return freq;
718
	} else if (INTEL_GEN(dev_priv) <= 11) {
719
		u32 ctc_reg = I915_READ(CTC_MODE);
L
Lionel Landwerlin 已提交
720
		u32 freq = 0;
721 722 723 724 725 726 727 728 729

		/* First figure out the reference frequency. There are 2 ways
		 * we can compute the frequency, either through the
		 * TIMESTAMP_OVERRIDE register or through RPM_CONFIG. CTC_MODE
		 * tells us which one we should use.
		 */
		if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
			freq = read_reference_ts_freq(dev_priv);
		} else {
730 731 732 733 734 735 736 737
			u32 rpm_config_reg = I915_READ(RPM_CONFIG0);

			if (INTEL_GEN(dev_priv) <= 10)
				freq = gen10_get_crystal_clock_freq(dev_priv,
								rpm_config_reg);
			else
				freq = gen11_get_crystal_clock_freq(dev_priv,
								rpm_config_reg);
738

739 740 741 742 743 744 745 746
			/* Now figure out how the command stream's timestamp
			 * register increments from this frequency (it might
			 * increment only every few clock cycle).
			 */
			freq >>= 3 - ((rpm_config_reg &
				       GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
				      GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT);
		}
747 748 749 750

		return freq;
	}

751
	MISSING_CASE("Unknown gen, unable to read command streamer timestamp frequency\n");
752 753 754
	return 0;
}

755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818
#undef INTEL_VGA_DEVICE
#define INTEL_VGA_DEVICE(id, info) (id)

static const u16 subplatform_ult_ids[] = {
	INTEL_HSW_ULT_GT1_IDS(0),
	INTEL_HSW_ULT_GT2_IDS(0),
	INTEL_HSW_ULT_GT3_IDS(0),
	INTEL_BDW_ULT_GT1_IDS(0),
	INTEL_BDW_ULT_GT2_IDS(0),
	INTEL_BDW_ULT_GT3_IDS(0),
	INTEL_BDW_ULT_RSVD_IDS(0),
	INTEL_SKL_ULT_GT1_IDS(0),
	INTEL_SKL_ULT_GT2_IDS(0),
	INTEL_SKL_ULT_GT3_IDS(0),
	INTEL_KBL_ULT_GT1_IDS(0),
	INTEL_KBL_ULT_GT2_IDS(0),
	INTEL_KBL_ULT_GT3_IDS(0),
	INTEL_CFL_U_GT2_IDS(0),
	INTEL_CFL_U_GT3_IDS(0),
	INTEL_WHL_U_GT1_IDS(0),
	INTEL_WHL_U_GT2_IDS(0),
	INTEL_WHL_U_GT3_IDS(0)
};

static const u16 subplatform_ulx_ids[] = {
	INTEL_HSW_ULX_GT1_IDS(0),
	INTEL_HSW_ULX_GT2_IDS(0),
	INTEL_BDW_ULX_GT1_IDS(0),
	INTEL_BDW_ULX_GT2_IDS(0),
	INTEL_BDW_ULX_GT3_IDS(0),
	INTEL_BDW_ULX_RSVD_IDS(0),
	INTEL_SKL_ULX_GT1_IDS(0),
	INTEL_SKL_ULX_GT2_IDS(0),
	INTEL_KBL_ULX_GT1_IDS(0),
	INTEL_KBL_ULX_GT2_IDS(0)
};

static const u16 subplatform_aml_ids[] = {
	INTEL_AML_KBL_GT2_IDS(0),
	INTEL_AML_CFL_GT2_IDS(0)
};

static const u16 subplatform_portf_ids[] = {
	INTEL_CNL_PORT_F_IDS(0),
	INTEL_ICL_PORT_F_IDS(0)
};

static bool find_devid(u16 id, const u16 *p, unsigned int num)
{
	for (; num; num--, p++) {
		if (*p == id)
			return true;
	}

	return false;
}

void intel_device_info_subplatform_init(struct drm_i915_private *i915)
{
	const struct intel_device_info *info = INTEL_INFO(i915);
	const struct intel_runtime_info *rinfo = RUNTIME_INFO(i915);
	const unsigned int pi = __platform_mask_index(rinfo, info->platform);
	const unsigned int pb = __platform_mask_bit(rinfo, info->platform);
	u16 devid = INTEL_DEVID(i915);
819
	u32 mask = 0;
820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847

	/* Make sure IS_<platform> checks are working. */
	RUNTIME_INFO(i915)->platform_mask[pi] = BIT(pb);

	/* Find and mark subplatform bits based on the PCI device id. */
	if (find_devid(devid, subplatform_ult_ids,
		       ARRAY_SIZE(subplatform_ult_ids))) {
		mask = BIT(INTEL_SUBPLATFORM_ULT);
	} else if (find_devid(devid, subplatform_ulx_ids,
			      ARRAY_SIZE(subplatform_ulx_ids))) {
		mask = BIT(INTEL_SUBPLATFORM_ULX);
		if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
			/* ULX machines are also considered ULT. */
			mask |= BIT(INTEL_SUBPLATFORM_ULT);
		}
	} else if (find_devid(devid, subplatform_aml_ids,
			      ARRAY_SIZE(subplatform_aml_ids))) {
		mask = BIT(INTEL_SUBPLATFORM_AML);
	} else if (find_devid(devid, subplatform_portf_ids,
			      ARRAY_SIZE(subplatform_portf_ids))) {
		mask = BIT(INTEL_SUBPLATFORM_PORTF);
	}

	GEM_BUG_ON(mask & ~INTEL_SUBPLATFORM_BITS);

	RUNTIME_INFO(i915)->platform_mask[pi] |= mask;
}

848 849
/**
 * intel_device_info_runtime_init - initialize runtime info
850
 * @dev_priv: the i915 device
851
 *
852 853 854 855 856 857 858 859 860 861 862 863
 * Determine various intel_device_info fields at runtime.
 *
 * Use it when either:
 *   - it's judged too laborious to fill n static structures with the limit
 *     when a simple if statement does the job,
 *   - run-time checks (eg read fuse/strap registers) are needed.
 *
 * This function needs to be called:
 *   - after the MMIO has been setup as we are reading registers,
 *   - after the PCH has been detected,
 *   - before the first usage of the fields it can tweak.
 */
864
void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
865
{
866
	struct intel_device_info *info = mkwrite_device_info(dev_priv);
867
	struct intel_runtime_info *runtime = RUNTIME_INFO(dev_priv);
868 869
	enum pipe pipe;

870 871
	if (INTEL_GEN(dev_priv) >= 10) {
		for_each_pipe(dev_priv, pipe)
872
			runtime->num_scalers[pipe] = 2;
873
	} else if (IS_GEN(dev_priv, 9)) {
874 875 876
		runtime->num_scalers[PIPE_A] = 2;
		runtime->num_scalers[PIPE_B] = 2;
		runtime->num_scalers[PIPE_C] = 1;
877 878
	}

879
	BUILD_BUG_ON(BITS_PER_TYPE(intel_engine_mask_t) < I915_NUM_ENGINES);
880

881
	if (INTEL_GEN(dev_priv) >= 11)
882
		for_each_pipe(dev_priv, pipe)
883
			runtime->num_sprites[pipe] = 6;
884
	else if (IS_GEN(dev_priv, 10) || IS_GEMINILAKE(dev_priv))
885
		for_each_pipe(dev_priv, pipe)
886
			runtime->num_sprites[pipe] = 3;
887
	else if (IS_BROXTON(dev_priv)) {
888 889 890 891 892 893 894 895 896
		/*
		 * Skylake and Broxton currently don't expose the topmost plane as its
		 * use is exclusive with the legacy cursor and we only want to expose
		 * one of those, not both. Until we can safely expose the topmost plane
		 * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
		 * we don't expose the topmost plane at all to prevent ABI breakage
		 * down the line.
		 */

897 898 899
		runtime->num_sprites[PIPE_A] = 2;
		runtime->num_sprites[PIPE_B] = 2;
		runtime->num_sprites[PIPE_C] = 1;
900
	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
901
		for_each_pipe(dev_priv, pipe)
902
			runtime->num_sprites[pipe] = 2;
903
	} else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) {
904
		for_each_pipe(dev_priv, pipe)
905
			runtime->num_sprites[pipe] = 1;
906
	}
907

908
	if (i915_modparams.disable_display) {
909 910
		DRM_INFO("Display disabled (module parameter)\n");
		info->num_pipes = 0;
911
	} else if (HAS_DISPLAY(dev_priv) &&
912
		   (IS_GEN_RANGE(dev_priv, 7, 8)) &&
913 914 915 916 917 918 919 920 921 922 923 924 925 926 927
		   HAS_PCH_SPLIT(dev_priv)) {
		u32 fuse_strap = I915_READ(FUSE_STRAP);
		u32 sfuse_strap = I915_READ(SFUSE_STRAP);

		/*
		 * SFUSE_STRAP is supposed to have a bit signalling the display
		 * is fused off. Unfortunately it seems that, at least in
		 * certain cases, fused off display means that PCH display
		 * reads don't land anywhere. In that case, we read 0s.
		 *
		 * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK
		 * should be set when taking over after the firmware.
		 */
		if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
		    sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
928
		    (HAS_PCH_CPT(dev_priv) &&
929 930 931 932 933 934 935
		     !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
			DRM_INFO("Display fused off, disabling\n");
			info->num_pipes = 0;
		} else if (fuse_strap & IVB_PIPE_C_DISABLE) {
			DRM_INFO("PipeC fused off\n");
			info->num_pipes -= 1;
		}
936
	} else if (HAS_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 9) {
937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969
		u32 dfsm = I915_READ(SKL_DFSM);
		u8 disabled_mask = 0;
		bool invalid;
		int num_bits;

		if (dfsm & SKL_DFSM_PIPE_A_DISABLE)
			disabled_mask |= BIT(PIPE_A);
		if (dfsm & SKL_DFSM_PIPE_B_DISABLE)
			disabled_mask |= BIT(PIPE_B);
		if (dfsm & SKL_DFSM_PIPE_C_DISABLE)
			disabled_mask |= BIT(PIPE_C);

		num_bits = hweight8(disabled_mask);

		switch (disabled_mask) {
		case BIT(PIPE_A):
		case BIT(PIPE_B):
		case BIT(PIPE_A) | BIT(PIPE_B):
		case BIT(PIPE_A) | BIT(PIPE_C):
			invalid = true;
			break;
		default:
			invalid = false;
		}

		if (num_bits > info->num_pipes || invalid)
			DRM_ERROR("invalid pipe fuse configuration: 0x%x\n",
				  disabled_mask);
		else
			info->num_pipes -= num_bits;
	}

	/* Initialize slice/subslice/EU info */
970 971 972
	if (IS_HASWELL(dev_priv))
		haswell_sseu_info_init(dev_priv);
	else if (IS_CHERRYVIEW(dev_priv))
973 974 975
		cherryview_sseu_info_init(dev_priv);
	else if (IS_BROADWELL(dev_priv))
		broadwell_sseu_info_init(dev_priv);
976
	else if (IS_GEN(dev_priv, 9))
977
		gen9_sseu_info_init(dev_priv);
978
	else if (IS_GEN(dev_priv, 10))
979
		gen10_sseu_info_init(dev_priv);
980
	else if (INTEL_GEN(dev_priv) >= 11)
981
		gen11_sseu_info_init(dev_priv);
982

983
	if (IS_GEN(dev_priv, 6) && intel_vtd_active()) {
984
		DRM_INFO("Disabling ppGTT for VT-d support\n");
985
		info->ppgtt_type = INTEL_PPGTT_NONE;
986 987
	}

988
	/* Initialize command stream timestamp frequency */
989
	runtime->cs_timestamp_frequency_khz = read_timestamp_frequency(dev_priv);
990
}
991 992 993 994

void intel_driver_caps_print(const struct intel_driver_caps *caps,
			     struct drm_printer *p)
{
995 996
	drm_printf(p, "Has logical contexts? %s\n",
		   yesno(caps->has_logical_contexts));
997 998
	drm_printf(p, "scheduler: %x\n", caps->scheduler);
}
999 1000 1001 1002 1003 1004 1005 1006 1007 1008

/*
 * Determine which engines are fused off in our particular hardware. Since the
 * fuse register is in the blitter powerwell, we need forcewake to be ready at
 * this point (but later we need to prune the forcewake domains for engines that
 * are indeed fused off).
 */
void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
{
	struct intel_device_info *info = mkwrite_device_info(dev_priv);
1009
	unsigned int logical_vdbox = 0;
1010
	unsigned int i;
1011
	u32 media_fuse;
1012 1013
	u16 vdbox_mask;
	u16 vebox_mask;
1014 1015 1016 1017

	if (INTEL_GEN(dev_priv) < 11)
		return;

1018
	media_fuse = ~I915_READ(GEN11_GT_VEBOX_VDBOX_DISABLE);
1019

1020 1021 1022
	vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
	vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
		      GEN11_GT_VEBOX_DISABLE_SHIFT;
1023 1024 1025 1026 1027

	for (i = 0; i < I915_MAX_VCS; i++) {
		if (!HAS_ENGINE(dev_priv, _VCS(i)))
			continue;

1028
		if (!(BIT(i) & vdbox_mask)) {
1029
			info->engine_mask &= ~BIT(_VCS(i));
1030
			DRM_DEBUG_DRIVER("vcs%u fused off\n", i);
1031
			continue;
1032
		}
1033 1034 1035 1036 1037 1038

		/*
		 * In Gen11, only even numbered logical VDBOXes are
		 * hooked up to an SFC (Scaler & Format Converter) unit.
		 */
		if (logical_vdbox++ % 2 == 0)
1039
			RUNTIME_INFO(dev_priv)->vdbox_sfc_access |= BIT(i);
1040
	}
1041 1042 1043
	DRM_DEBUG_DRIVER("vdbox enable: %04x, instances: %04lx\n",
			 vdbox_mask, VDBOX_MASK(dev_priv));
	GEM_BUG_ON(vdbox_mask != VDBOX_MASK(dev_priv));
1044 1045 1046 1047 1048

	for (i = 0; i < I915_MAX_VECS; i++) {
		if (!HAS_ENGINE(dev_priv, _VECS(i)))
			continue;

1049
		if (!(BIT(i) & vebox_mask)) {
1050
			info->engine_mask &= ~BIT(_VECS(i));
1051 1052
			DRM_DEBUG_DRIVER("vecs%u fused off\n", i);
		}
1053
	}
1054 1055 1056
	DRM_DEBUG_DRIVER("vebox enable: %04x, instances: %04lx\n",
			 vebox_mask, VEBOX_MASK(dev_priv));
	GEM_BUG_ON(vebox_mask != VEBOX_MASK(dev_priv));
1057
}