intel_device_info.c 13.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
/*
 * Copyright © 2016 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 */

#include "i915_drv.h"

27 28 29 30 31 32 33 34 35 36 37 38
#define PLATFORM_NAME(x) [INTEL_##x] = #x
static const char * const platform_names[] = {
	PLATFORM_NAME(I830),
	PLATFORM_NAME(I845G),
	PLATFORM_NAME(I85X),
	PLATFORM_NAME(I865G),
	PLATFORM_NAME(I915G),
	PLATFORM_NAME(I915GM),
	PLATFORM_NAME(I945G),
	PLATFORM_NAME(I945GM),
	PLATFORM_NAME(G33),
	PLATFORM_NAME(PINEVIEW),
39 40
	PLATFORM_NAME(I965G),
	PLATFORM_NAME(I965GM),
41 42
	PLATFORM_NAME(G45),
	PLATFORM_NAME(GM45),
43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65
	PLATFORM_NAME(IRONLAKE),
	PLATFORM_NAME(SANDYBRIDGE),
	PLATFORM_NAME(IVYBRIDGE),
	PLATFORM_NAME(VALLEYVIEW),
	PLATFORM_NAME(HASWELL),
	PLATFORM_NAME(BROADWELL),
	PLATFORM_NAME(CHERRYVIEW),
	PLATFORM_NAME(SKYLAKE),
	PLATFORM_NAME(BROXTON),
	PLATFORM_NAME(KABYLAKE),
	PLATFORM_NAME(GEMINILAKE),
};
#undef PLATFORM_NAME

const char *intel_platform_name(enum intel_platform platform)
{
	if (WARN_ON_ONCE(platform >= ARRAY_SIZE(platform_names) ||
			 platform_names[platform] == NULL))
		return "<unknown>";

	return platform_names[platform];
}

66 67 68 69
void intel_device_info_dump(struct drm_i915_private *dev_priv)
{
	const struct intel_device_info *info = &dev_priv->info;

70 71
	DRM_DEBUG_DRIVER("i915 device info: platform=%s gen=%i pciid=0x%04x rev=0x%02x",
			 intel_platform_name(info->platform),
72 73
			 info->gen,
			 dev_priv->drm.pdev->device,
74 75 76 77
			 dev_priv->drm.pdev->revision);
#define PRINT_FLAG(name) \
	DRM_DEBUG_DRIVER("i915 device info: " #name ": %s", yesno(info->name))
	DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
78 79 80 81 82
#undef PRINT_FLAG
}

static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
{
83
	struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
84 85 86 87
	u32 fuse, eu_dis;

	fuse = I915_READ(CHV_FUSE_GT);

88
	sseu->slice_mask = BIT(0);
89 90

	if (!(fuse & CHV_FGT_DISABLE_SS0)) {
91
		sseu->subslice_mask |= BIT(0);
92 93
		eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK |
				 CHV_FGT_EU_DIS_SS0_R1_MASK);
94
		sseu->eu_total += 8 - hweight32(eu_dis);
95 96 97
	}

	if (!(fuse & CHV_FGT_DISABLE_SS1)) {
98
		sseu->subslice_mask |= BIT(1);
99 100
		eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK |
				 CHV_FGT_EU_DIS_SS1_R1_MASK);
101
		sseu->eu_total += 8 - hweight32(eu_dis);
102 103 104 105 106 107
	}

	/*
	 * CHV expected to always have a uniform distribution of EU
	 * across subslices.
	*/
108 109
	sseu->eu_per_subslice = sseu_subslice_total(sseu) ?
				sseu->eu_total / sseu_subslice_total(sseu) :
110 111 112 113 114 115
				0;
	/*
	 * CHV supports subslice power gating on devices with more than
	 * one subslice, and supports EU power gating on devices with
	 * more than one EU pair per subslice.
	*/
116
	sseu->has_slice_pg = 0;
117
	sseu->has_subslice_pg = sseu_subslice_total(sseu) > 1;
118
	sseu->has_eu_pg = (sseu->eu_per_subslice > 2);
119 120 121 122 123
}

static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
{
	struct intel_device_info *info = mkwrite_device_info(dev_priv);
124
	struct sseu_dev_info *sseu = &info->sseu;
125 126
	int s_max = 3, ss_max = 4, eu_max = 8;
	int s, ss;
127
	u32 fuse2, eu_disable;
128 129 130
	u8 eu_mask = 0xff;

	fuse2 = I915_READ(GEN8_FUSE2);
131
	sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
132 133 134 135 136

	/*
	 * The subslice disable field is global, i.e. it applies
	 * to each of the enabled slices.
	*/
137 138 139
	sseu->subslice_mask = (1 << ss_max) - 1;
	sseu->subslice_mask &= ~((fuse2 & GEN9_F2_SS_DIS_MASK) >>
				 GEN9_F2_SS_DIS_SHIFT);
140 141 142 143 144 145

	/*
	 * Iterate through enabled slices and subslices to
	 * count the total enabled EU.
	*/
	for (s = 0; s < s_max; s++) {
146
		if (!(sseu->slice_mask & BIT(s)))
147 148 149 150 151 152 153
			/* skip disabled slice */
			continue;

		eu_disable = I915_READ(GEN9_EU_DISABLE(s));
		for (ss = 0; ss < ss_max; ss++) {
			int eu_per_ss;

154
			if (!(sseu->subslice_mask & BIT(ss)))
155 156 157 158 159 160 161 162 163 164 165 166
				/* skip disabled subslice */
				continue;

			eu_per_ss = eu_max - hweight8((eu_disable >> (ss*8)) &
						      eu_mask);

			/*
			 * Record which subslice(s) has(have) 7 EUs. we
			 * can tune the hash used to spread work among
			 * subslices if they are unbalanced.
			 */
			if (eu_per_ss == 7)
167
				sseu->subslice_7eu[s] |= BIT(ss);
168

169
			sseu->eu_total += eu_per_ss;
170 171 172 173 174 175 176 177 178 179
		}
	}

	/*
	 * SKL is expected to always have a uniform distribution
	 * of EU across subslices with the exception that any one
	 * EU in any one subslice may be fused off for die
	 * recovery. BXT is expected to be perfectly uniform in EU
	 * distribution.
	*/
180
	sseu->eu_per_subslice = sseu_subslice_total(sseu) ?
181
				DIV_ROUND_UP(sseu->eu_total,
182
					     sseu_subslice_total(sseu)) : 0;
183 184 185 186 187 188 189 190
	/*
	 * SKL supports slice power gating on devices with more than
	 * one slice, and supports EU power gating on devices with
	 * more than one EU pair per subslice. BXT supports subslice
	 * power gating on devices with more than one subslice, and
	 * supports EU power gating on devices with more than one EU
	 * pair per subslice.
	*/
191
	sseu->has_slice_pg =
192
		(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
193
		hweight8(sseu->slice_mask) > 1;
194
	sseu->has_subslice_pg =
195
		IS_GEN9_LP(dev_priv) && sseu_subslice_total(sseu) > 1;
196
	sseu->has_eu_pg = sseu->eu_per_subslice > 2;
197 198

	if (IS_BROXTON(dev_priv)) {
199
#define IS_SS_DISABLED(ss)	(!(sseu->subslice_mask & BIT(ss)))
200 201 202 203 204 205 206
		/*
		 * There is a HW issue in 2x6 fused down parts that requires
		 * Pooled EU to be enabled as a WA. The pool configuration
		 * changes depending upon which subslice is fused down. This
		 * doesn't affect if the device has all 3 subslices enabled.
		 */
		/* WaEnablePooledEuFor2x6:bxt */
207 208
		info->has_pooled_eu = ((hweight8(sseu->subslice_mask) == 3) ||
				       (hweight8(sseu->subslice_mask) == 2 &&
209 210
					INTEL_REVID(dev_priv) < BXT_REVID_C0));

211
		sseu->min_eu_in_pool = 0;
212
		if (info->has_pooled_eu) {
213
			if (IS_SS_DISABLED(2) || IS_SS_DISABLED(0))
214
				sseu->min_eu_in_pool = 3;
215
			else if (IS_SS_DISABLED(1))
216
				sseu->min_eu_in_pool = 6;
217
			else
218
				sseu->min_eu_in_pool = 9;
219 220 221 222 223 224 225
		}
#undef IS_SS_DISABLED
	}
}

static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
{
226
	struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
227 228
	const int s_max = 3, ss_max = 3, eu_max = 8;
	int s, ss;
229
	u32 fuse2, eu_disable[3]; /* s_max */
230 231

	fuse2 = I915_READ(GEN8_FUSE2);
232
	sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
233 234 235 236
	/*
	 * The subslice disable field is global, i.e. it applies
	 * to each of the enabled slices.
	 */
237
	sseu->subslice_mask = GENMASK(ss_max - 1, 0);
238 239
	sseu->subslice_mask &= ~((fuse2 & GEN8_F2_SS_DIS_MASK) >>
				 GEN8_F2_SS_DIS_SHIFT);
240 241 242 243 244 245 246 247 248 249 250 251 252 253

	eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK;
	eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) |
			((I915_READ(GEN8_EU_DISABLE1) & GEN8_EU_DIS1_S1_MASK) <<
			 (32 - GEN8_EU_DIS0_S1_SHIFT));
	eu_disable[2] = (I915_READ(GEN8_EU_DISABLE1) >> GEN8_EU_DIS1_S2_SHIFT) |
			((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) <<
			 (32 - GEN8_EU_DIS1_S2_SHIFT));

	/*
	 * Iterate through enabled slices and subslices to
	 * count the total enabled EU.
	 */
	for (s = 0; s < s_max; s++) {
254
		if (!(sseu->slice_mask & BIT(s)))
255 256 257 258 259 260
			/* skip disabled slice */
			continue;

		for (ss = 0; ss < ss_max; ss++) {
			u32 n_disabled;

261
			if (!(sseu->subslice_mask & BIT(ss)))
262 263 264 265 266 267 268 269 270
				/* skip disabled subslice */
				continue;

			n_disabled = hweight8(eu_disable[s] >> (ss * eu_max));

			/*
			 * Record which subslices have 7 EUs.
			 */
			if (eu_max - n_disabled == 7)
271
				sseu->subslice_7eu[s] |= 1 << ss;
272

273
			sseu->eu_total += eu_max - n_disabled;
274 275 276 277 278 279 280 281
		}
	}

	/*
	 * BDW is expected to always have a uniform distribution of EU across
	 * subslices with the exception that any one EU in any one subslice may
	 * be fused off for die recovery.
	 */
282 283 284
	sseu->eu_per_subslice = sseu_subslice_total(sseu) ?
				DIV_ROUND_UP(sseu->eu_total,
					     sseu_subslice_total(sseu)) : 0;
285 286 287 288 289

	/*
	 * BDW supports slice power gating on devices with more than
	 * one slice.
	 */
290
	sseu->has_slice_pg = hweight8(sseu->slice_mask) > 1;
291 292
	sseu->has_subslice_pg = 0;
	sseu->has_eu_pg = 0;
293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312
}

/*
 * Determine various intel_device_info fields at runtime.
 *
 * Use it when either:
 *   - it's judged too laborious to fill n static structures with the limit
 *     when a simple if statement does the job,
 *   - run-time checks (eg read fuse/strap registers) are needed.
 *
 * This function needs to be called:
 *   - after the MMIO has been setup as we are reading registers,
 *   - after the PCH has been detected,
 *   - before the first usage of the fields it can tweak.
 */
void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
{
	struct intel_device_info *info = mkwrite_device_info(dev_priv);
	enum pipe pipe;

313 314 315 316 317 318
	if (INTEL_GEN(dev_priv) >= 9) {
		info->num_scalers[PIPE_A] = 2;
		info->num_scalers[PIPE_B] = 2;
		info->num_scalers[PIPE_C] = 1;
	}

319 320 321 322 323 324 325 326
	/*
	 * Skylake and Broxton currently don't expose the topmost plane as its
	 * use is exclusive with the legacy cursor and we only want to expose
	 * one of those, not both. Until we can safely expose the topmost plane
	 * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
	 * we don't expose the topmost plane at all to prevent ABI breakage
	 * down the line.
	 */
327 328 329 330
	if (IS_GEMINILAKE(dev_priv))
		for_each_pipe(dev_priv, pipe)
			info->num_sprites[pipe] = 3;
	else if (IS_BROXTON(dev_priv)) {
331 332 333
		info->num_sprites[PIPE_A] = 2;
		info->num_sprites[PIPE_B] = 2;
		info->num_sprites[PIPE_C] = 1;
334
	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
335 336
		for_each_pipe(dev_priv, pipe)
			info->num_sprites[pipe] = 2;
337
	} else if (INTEL_GEN(dev_priv) >= 5) {
338 339
		for_each_pipe(dev_priv, pipe)
			info->num_sprites[pipe] = 1;
340
	}
341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412

	if (i915.disable_display) {
		DRM_INFO("Display disabled (module parameter)\n");
		info->num_pipes = 0;
	} else if (info->num_pipes > 0 &&
		   (IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) &&
		   HAS_PCH_SPLIT(dev_priv)) {
		u32 fuse_strap = I915_READ(FUSE_STRAP);
		u32 sfuse_strap = I915_READ(SFUSE_STRAP);

		/*
		 * SFUSE_STRAP is supposed to have a bit signalling the display
		 * is fused off. Unfortunately it seems that, at least in
		 * certain cases, fused off display means that PCH display
		 * reads don't land anywhere. In that case, we read 0s.
		 *
		 * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK
		 * should be set when taking over after the firmware.
		 */
		if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
		    sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
		    (dev_priv->pch_type == PCH_CPT &&
		     !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
			DRM_INFO("Display fused off, disabling\n");
			info->num_pipes = 0;
		} else if (fuse_strap & IVB_PIPE_C_DISABLE) {
			DRM_INFO("PipeC fused off\n");
			info->num_pipes -= 1;
		}
	} else if (info->num_pipes > 0 && IS_GEN9(dev_priv)) {
		u32 dfsm = I915_READ(SKL_DFSM);
		u8 disabled_mask = 0;
		bool invalid;
		int num_bits;

		if (dfsm & SKL_DFSM_PIPE_A_DISABLE)
			disabled_mask |= BIT(PIPE_A);
		if (dfsm & SKL_DFSM_PIPE_B_DISABLE)
			disabled_mask |= BIT(PIPE_B);
		if (dfsm & SKL_DFSM_PIPE_C_DISABLE)
			disabled_mask |= BIT(PIPE_C);

		num_bits = hweight8(disabled_mask);

		switch (disabled_mask) {
		case BIT(PIPE_A):
		case BIT(PIPE_B):
		case BIT(PIPE_A) | BIT(PIPE_B):
		case BIT(PIPE_A) | BIT(PIPE_C):
			invalid = true;
			break;
		default:
			invalid = false;
		}

		if (num_bits > info->num_pipes || invalid)
			DRM_ERROR("invalid pipe fuse configuration: 0x%x\n",
				  disabled_mask);
		else
			info->num_pipes -= num_bits;
	}

	/* Initialize slice/subslice/EU info */
	if (IS_CHERRYVIEW(dev_priv))
		cherryview_sseu_info_init(dev_priv);
	else if (IS_BROADWELL(dev_priv))
		broadwell_sseu_info_init(dev_priv);
	else if (INTEL_INFO(dev_priv)->gen >= 9)
		gen9_sseu_info_init(dev_priv);

	info->has_snoop = !info->has_llc;

413
	DRM_DEBUG_DRIVER("slice mask: %04x\n", info->sseu.slice_mask);
414
	DRM_DEBUG_DRIVER("slice total: %u\n", hweight8(info->sseu.slice_mask));
415 416
	DRM_DEBUG_DRIVER("subslice total: %u\n",
			 sseu_subslice_total(&info->sseu));
417
	DRM_DEBUG_DRIVER("subslice mask %04x\n", info->sseu.subslice_mask);
418
	DRM_DEBUG_DRIVER("subslice per slice: %u\n",
419
			 hweight8(info->sseu.subslice_mask));
420 421
	DRM_DEBUG_DRIVER("EU total: %u\n", info->sseu.eu_total);
	DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->sseu.eu_per_subslice);
422
	DRM_DEBUG_DRIVER("has slice power gating: %s\n",
423
			 info->sseu.has_slice_pg ? "y" : "n");
424
	DRM_DEBUG_DRIVER("has subslice power gating: %s\n",
425
			 info->sseu.has_subslice_pg ? "y" : "n");
426
	DRM_DEBUG_DRIVER("has EU power gating: %s\n",
427
			 info->sseu.has_eu_pg ? "y" : "n");
428
}