intel_workarounds.c 42.0 KB
Newer Older
1 2 3 4 5 6 7
/*
 * SPDX-License-Identifier: MIT
 *
 * Copyright © 2014-2018 Intel Corporation
 */

#include "i915_drv.h"
8
#include "intel_context.h"
9
#include "intel_gt.h"
10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41
#include "intel_workarounds.h"

/**
 * DOC: Hardware workarounds
 *
 * This file is intended as a central place to implement most [1]_ of the
 * required workarounds for hardware to work as originally intended. They fall
 * in five basic categories depending on how/when they are applied:
 *
 * - Workarounds that touch registers that are saved/restored to/from the HW
 *   context image. The list is emitted (via Load Register Immediate commands)
 *   everytime a new context is created.
 * - GT workarounds. The list of these WAs is applied whenever these registers
 *   revert to default values (on GPU reset, suspend/resume [2]_, etc..).
 * - Display workarounds. The list is applied during display clock-gating
 *   initialization.
 * - Workarounds that whitelist a privileged register, so that UMDs can manage
 *   them directly. This is just a special case of a MMMIO workaround (as we
 *   write the list of these to/be-whitelisted registers to some special HW
 *   registers).
 * - Workaround batchbuffers, that get executed automatically by the hardware
 *   on every HW context restore.
 *
 * .. [1] Please notice that there are other WAs that, due to their nature,
 *    cannot be applied from a central place. Those are peppered around the rest
 *    of the code, as needed.
 *
 * .. [2] Technically, some registers are powercontext saved & restored, so they
 *    survive a suspend/resume. In practice, writing them again is not too
 *    costly and simplifies things. We can revisit this in the future.
 *
 * Layout
42
 * ~~~~~~
43 44 45 46 47 48 49 50 51 52
 *
 * Keep things in this file ordered by WA type, as per the above (context, GT,
 * display, register whitelist, batchbuffer). Then, inside each type, keep the
 * following order:
 *
 * - Infrastructure functions and macros
 * - WAs per platform in standard gen/chrono order
 * - Public functions to init or apply the given workaround type.
 */

53
static void wa_init_start(struct i915_wa_list *wal, const char *name, const char *engine_name)
54 55
{
	wal->name = name;
56
	wal->engine_name = engine_name;
57 58
}

59 60
#define WA_LIST_CHUNK (1 << 4)

61 62
static void wa_init_finish(struct i915_wa_list *wal)
{
63 64 65 66 67 68 69 70 71 72 73 74
	/* Trim unused entries. */
	if (!IS_ALIGNED(wal->count, WA_LIST_CHUNK)) {
		struct i915_wa *list = kmemdup(wal->list,
					       wal->count * sizeof(*list),
					       GFP_KERNEL);

		if (list) {
			kfree(wal->list);
			wal->list = list;
		}
	}

75 76 77
	if (!wal->count)
		return;

78 79
	DRM_DEBUG_DRIVER("Initialized %u %s workarounds on %s\n",
			 wal->wa_count, wal->name, wal->engine_name);
80 81
}

82
static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa)
83
{
84 85
	unsigned int addr = i915_mmio_reg_offset(wa->reg);
	unsigned int start = 0, end = wal->count;
86
	const unsigned int grow = WA_LIST_CHUNK;
87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105
	struct i915_wa *wa_;

	GEM_BUG_ON(!is_power_of_2(grow));

	if (IS_ALIGNED(wal->count, grow)) { /* Either uninitialized or full. */
		struct i915_wa *list;

		list = kmalloc_array(ALIGN(wal->count + 1, grow), sizeof(*wa),
				     GFP_KERNEL);
		if (!list) {
			DRM_ERROR("No space for workaround init!\n");
			return;
		}

		if (wal->list)
			memcpy(list, wal->list, sizeof(*wa) * wal->count);

		wal->list = list;
	}
106 107 108 109

	while (start < end) {
		unsigned int mid = start + (end - start) / 2;

110
		if (i915_mmio_reg_offset(wal->list[mid].reg) < addr) {
111
			start = mid + 1;
112
		} else if (i915_mmio_reg_offset(wal->list[mid].reg) > addr) {
113 114
			end = mid;
		} else {
115
			wa_ = &wal->list[mid];
116

117
			if ((wa->mask & ~wa_->mask) == 0) {
118
				DRM_ERROR("Discarding overwritten w/a for reg %04x (mask: %08x, value: %08x)\n",
119 120
					  i915_mmio_reg_offset(wa_->reg),
					  wa_->mask, wa_->val);
121

122
				wa_->val &= ~wa->mask;
123 124
			}

125 126 127
			wal->wa_count++;
			wa_->val |= wa->val;
			wa_->mask |= wa->mask;
128
			wa_->read |= wa->read;
129 130 131
			return;
		}
	}
132

133 134 135
	wal->wa_count++;
	wa_ = &wal->list[wal->count++];
	*wa_ = *wa;
136

137 138 139 140 141
	while (wa_-- > wal->list) {
		GEM_BUG_ON(i915_mmio_reg_offset(wa_[0].reg) ==
			   i915_mmio_reg_offset(wa_[1].reg));
		if (i915_mmio_reg_offset(wa_[1].reg) >
		    i915_mmio_reg_offset(wa_[0].reg))
142
			break;
143

144
		swap(wa_[1], wa_[0]);
145
	}
146 147
}

148
static void
149 150
wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask,
		   u32 val)
151 152
{
	struct i915_wa wa = {
153
		.reg  = reg,
154
		.mask = mask,
155 156
		.val  = val,
		.read = mask,
157 158 159 160 161
	};

	_wa_add(wal, &wa);
}

162 163 164
static void
wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
{
165
	wa_write_masked_or(wal, reg, val, _MASKED_BIT_ENABLE(val));
166 167 168 169 170 171 172 173 174 175 176 177 178 179
}

static void
wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
{
	wa_write_masked_or(wal, reg, ~0, val);
}

static void
wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
{
	wa_write_masked_or(wal, reg, val, val);
}

180
#define WA_SET_BIT_MASKED(addr, mask) \
181
	wa_write_masked_or(wal, (addr), (mask), _MASKED_BIT_ENABLE(mask))
182 183

#define WA_CLR_BIT_MASKED(addr, mask) \
184
	wa_write_masked_or(wal, (addr), (mask), _MASKED_BIT_DISABLE(mask))
185 186

#define WA_SET_FIELD_MASKED(addr, mask, value) \
187
	wa_write_masked_or(wal, (addr), (mask), _MASKED_FIELD((mask), (value)))
188

189 190
static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine,
				      struct i915_wa_list *wal)
191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236
{
	WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);

	/* WaDisableAsyncFlipPerfMode:bdw,chv */
	WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);

	/* WaDisablePartialInstShootdown:bdw,chv */
	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
			  PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);

	/* Use Force Non-Coherent whenever executing a 3D context. This is a
	 * workaround for for a possible hang in the unlikely event a TLB
	 * invalidation occurs during a PSD flush.
	 */
	/* WaForceEnableNonCoherent:bdw,chv */
	/* WaHdcDisableFetchWhenMasked:bdw,chv */
	WA_SET_BIT_MASKED(HDC_CHICKEN0,
			  HDC_DONOT_FETCH_MEM_WHEN_MASKED |
			  HDC_FORCE_NON_COHERENT);

	/* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
	 * "The Hierarchical Z RAW Stall Optimization allows non-overlapping
	 *  polygons in the same 8x4 pixel/sample area to be processed without
	 *  stalling waiting for the earlier ones to write to Hierarchical Z
	 *  buffer."
	 *
	 * This optimization is off by default for BDW and CHV; turn it on.
	 */
	WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);

	/* Wa4x4STCOptimizationDisable:bdw,chv */
	WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);

	/*
	 * BSpec recommends 8x4 when MSAA is used,
	 * however in practice 16x4 seems fastest.
	 *
	 * Note that PS/WM thread counts depend on the WIZ hashing
	 * disable bit, which we don't touch here, but it's good
	 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
	 */
	WA_SET_FIELD_MASKED(GEN7_GT_MODE,
			    GEN6_WIZ_HASHING_MASK,
			    GEN6_WIZ_HASHING_16x4);
}

237 238
static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine,
				     struct i915_wa_list *wal)
239
{
240
	struct drm_i915_private *i915 = engine->i915;
241

242
	gen8_ctx_workarounds_init(engine, wal);
243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261

	/* WaDisableThreadStallDopClockGating:bdw (pre-production) */
	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);

	/* WaDisableDopClockGating:bdw
	 *
	 * Also see the related UCGTCL1 write in broadwell_init_clock_gating()
	 * to disable EUTC clock gating.
	 */
	WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
			  DOP_CLOCK_GATING_DISABLE);

	WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
			  GEN8_SAMPLER_POWER_BYPASS_DIS);

	WA_SET_BIT_MASKED(HDC_CHICKEN0,
			  /* WaForceContextSaveRestoreNonCoherent:bdw */
			  HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
			  /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
262
			  (IS_BDW_GT3(i915) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
263 264
}

265 266
static void chv_ctx_workarounds_init(struct intel_engine_cs *engine,
				     struct i915_wa_list *wal)
267
{
268
	gen8_ctx_workarounds_init(engine, wal);
269 270 271 272 273 274 275 276

	/* WaDisableThreadStallDopClockGating:chv */
	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);

	/* Improve HiZ throughput on CHV. */
	WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
}

277 278
static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine,
				      struct i915_wa_list *wal)
279
{
280 281 282
	struct drm_i915_private *i915 = engine->i915;

	if (HAS_LLC(i915)) {
283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338
		/* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
		 *
		 * Must match Display Engine. See
		 * WaCompressedResourceDisplayNewHashMode.
		 */
		WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
				  GEN9_PBE_COMPRESSED_HASH_SELECTION);
		WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
				  GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR);
	}

	/* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */
	/* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */
	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
			  FLOW_CONTROL_ENABLE |
			  PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);

	/* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */
	/* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */
	WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
			  GEN9_ENABLE_YV12_BUGFIX |
			  GEN9_ENABLE_GPGPU_PREEMPTION);

	/* Wa4x4STCOptimizationDisable:skl,bxt,kbl,glk,cfl */
	/* WaDisablePartialResolveInVc:skl,bxt,kbl,cfl */
	WA_SET_BIT_MASKED(CACHE_MODE_1,
			  GEN8_4x4_STC_OPTIMIZATION_DISABLE |
			  GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE);

	/* WaCcsTlbPrefetchDisable:skl,bxt,kbl,glk,cfl */
	WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
			  GEN9_CCS_TLB_PREFETCH_ENABLE);

	/* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl,cfl */
	WA_SET_BIT_MASKED(HDC_CHICKEN0,
			  HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
			  HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);

	/* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
	 * both tied to WaForceContextSaveRestoreNonCoherent
	 * in some hsds for skl. We keep the tie for all gen9. The
	 * documentation is a bit hazy and so we want to get common behaviour,
	 * even though there is no clear evidence we would need both on kbl/bxt.
	 * This area has been source of system hangs so we play it safe
	 * and mimic the skl regardless of what bspec says.
	 *
	 * Use Force Non-Coherent whenever executing a 3D context. This
	 * is a workaround for a possible hang in the unlikely event
	 * a TLB invalidation occurs during a PSD flush.
	 */

	/* WaForceEnableNonCoherent:skl,bxt,kbl,cfl */
	WA_SET_BIT_MASKED(HDC_CHICKEN0,
			  HDC_FORCE_NON_COHERENT);

	/* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */
339
	if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) || IS_COFFEELAKE(i915))
340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364
		WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
				  GEN8_SAMPLER_POWER_BYPASS_DIS);

	/* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk,cfl */
	WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);

	/*
	 * Supporting preemption with fine-granularity requires changes in the
	 * batch buffer programming. Since we can't break old userspace, we
	 * need to set our default preemption level to safe value. Userspace is
	 * still able to use more fine-grained preemption levels, since in
	 * WaEnablePreemptionGranularityControlByUMD we're whitelisting the
	 * per-ctx register. As such, WaDisable{3D,GPGPU}MidCmdPreemption are
	 * not real HW workarounds, but merely a way to start using preemption
	 * while maintaining old contract with userspace.
	 */

	/* WaDisable3DMidCmdPreemption:skl,bxt,glk,cfl,[cnl] */
	WA_CLR_BIT_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);

	/* WaDisableGPGPUMidCmdPreemption:skl,bxt,blk,cfl,[cnl] */
	WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
			    GEN9_PREEMPT_GPGPU_LEVEL_MASK,
			    GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);

365
	/* WaClearHIZ_WM_CHICKEN3:bxt,glk */
366
	if (IS_GEN9_LP(i915))
367
		WA_SET_BIT_MASKED(GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ);
368 369
}

370 371
static void skl_tune_iz_hashing(struct intel_engine_cs *engine,
				struct i915_wa_list *wal)
372
{
373
	struct drm_i915_private *i915 = engine->i915;
374 375 376 377 378 379 380 381 382 383
	u8 vals[3] = { 0, 0, 0 };
	unsigned int i;

	for (i = 0; i < 3; i++) {
		u8 ss;

		/*
		 * Only consider slices where one, and only one, subslice has 7
		 * EUs
		 */
384
		if (!is_power_of_2(RUNTIME_INFO(i915)->sseu.subslice_7eu[i]))
385 386 387 388 389 390 391 392
			continue;

		/*
		 * subslice_7eu[i] != 0 (because of the check above) and
		 * ss_max == 4 (maximum number of subslices possible per slice)
		 *
		 * ->    0 <= ss <= 3;
		 */
393
		ss = ffs(RUNTIME_INFO(i915)->sseu.subslice_7eu[i]) - 1;
394 395 396 397
		vals[i] = 3 - ss;
	}

	if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
398
		return;
399 400 401 402 403 404 405 406 407 408 409

	/* Tune IZ hashing. See intel_device_info_runtime_init() */
	WA_SET_FIELD_MASKED(GEN7_GT_MODE,
			    GEN9_IZ_HASHING_MASK(2) |
			    GEN9_IZ_HASHING_MASK(1) |
			    GEN9_IZ_HASHING_MASK(0),
			    GEN9_IZ_HASHING(2, vals[2]) |
			    GEN9_IZ_HASHING(1, vals[1]) |
			    GEN9_IZ_HASHING(0, vals[0]));
}

410 411
static void skl_ctx_workarounds_init(struct intel_engine_cs *engine,
				     struct i915_wa_list *wal)
412
{
413 414
	gen9_ctx_workarounds_init(engine, wal);
	skl_tune_iz_hashing(engine, wal);
415
}
416

417 418
static void bxt_ctx_workarounds_init(struct intel_engine_cs *engine,
				     struct i915_wa_list *wal)
419
{
420
	gen9_ctx_workarounds_init(engine, wal);
421

422 423 424 425 426 427 428
	/* WaDisableThreadStallDopClockGating:bxt */
	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
			  STALL_DOP_GATING_DISABLE);

	/* WaToEnableHwFixForPushConstHWBug:bxt */
	WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
			  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
429 430
}

431 432
static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine,
				     struct i915_wa_list *wal)
433
{
434
	struct drm_i915_private *i915 = engine->i915;
435

436
	gen9_ctx_workarounds_init(engine, wal);
437

438
	/* WaToEnableHwFixForPushConstHWBug:kbl */
439
	if (IS_KBL_REVID(i915, KBL_REVID_C0, REVID_FOREVER))
440 441
		WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
				  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
442

443 444 445 446 447
	/* WaDisableSbeCacheDispatchPortSharing:kbl */
	WA_SET_BIT_MASKED(GEN7_HALF_SLICE_CHICKEN1,
			  GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
}

448 449
static void glk_ctx_workarounds_init(struct intel_engine_cs *engine,
				     struct i915_wa_list *wal)
450
{
451
	gen9_ctx_workarounds_init(engine, wal);
452 453

	/* WaToEnableHwFixForPushConstHWBug:glk */
454 455 456 457
	WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
			  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
}

458 459
static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine,
				     struct i915_wa_list *wal)
460
{
461
	gen9_ctx_workarounds_init(engine, wal);
462 463 464 465

	/* WaToEnableHwFixForPushConstHWBug:cfl */
	WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
			  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
466

467 468 469 470 471
	/* WaDisableSbeCacheDispatchPortSharing:cfl */
	WA_SET_BIT_MASKED(GEN7_HALF_SLICE_CHICKEN1,
			  GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
}

472 473
static void cnl_ctx_workarounds_init(struct intel_engine_cs *engine,
				     struct i915_wa_list *wal)
474
{
475 476
	struct drm_i915_private *i915 = engine->i915;

477 478 479 480 481
	/* WaForceContextSaveRestoreNonCoherent:cnl */
	WA_SET_BIT_MASKED(CNL_HDC_CHICKEN0,
			  HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT);

	/* WaThrottleEUPerfToAvoidTDBackPressure:cnl(pre-prod) */
482
	if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0))
483 484 485 486 487 488 489
		WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, THROTTLE_12_5);

	/* WaDisableReplayBufferBankArbitrationOptimization:cnl */
	WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
			  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);

	/* WaDisableEnhancedSBEVertexCaching:cnl (pre-prod) */
490
	if (IS_CNL_REVID(i915, 0, CNL_REVID_B0))
491 492 493 494 495 496
		WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
				  GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE);

	/* WaPushConstantDereferenceHoldDisable:cnl */
	WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, PUSH_CONSTANT_DEREF_DISABLE);

497
	/* FtrEnableFastAnisoL1BankingFix:cnl */
498 499 500 501 502 503 504 505 506 507 508 509 510 511
	WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, CNL_FAST_ANISO_L1_BANKING_FIX);

	/* WaDisable3DMidCmdPreemption:cnl */
	WA_CLR_BIT_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);

	/* WaDisableGPGPUMidCmdPreemption:cnl */
	WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
			    GEN9_PREEMPT_GPGPU_LEVEL_MASK,
			    GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);

	/* WaDisableEarlyEOT:cnl */
	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, DISABLE_EARLY_EOT);
}

512 513
static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
				     struct i915_wa_list *wal)
514
{
515 516
	struct drm_i915_private *i915 = engine->i915;

517 518 519 520 521 522
	/* WaDisableBankHangMode:icl */
	wa_write(wal,
		 GEN8_L3CNTLREG,
		 intel_uncore_read(engine->uncore, GEN8_L3CNTLREG) |
		 GEN8_ERRDETBCTRL);

523 524 525
	/* Wa_1604370585:icl (pre-prod)
	 * Formerly known as WaPushConstantDereferenceHoldDisable
	 */
526
	if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
527 528 529 530 531 532 533 534 535 536 537 538
		WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
				  PUSH_CONSTANT_DEREF_DISABLE);

	/* WaForceEnableNonCoherent:icl
	 * This is not the same workaround as in early Gen9 platforms, where
	 * lacking this could cause system hangs, but coherency performance
	 * overhead is high and only a few compute workloads really need it
	 * (the register is whitelisted in hardware now, so UMDs can opt in
	 * for coherency if they have a good reason).
	 */
	WA_SET_BIT_MASKED(ICL_HDC_MODE, HDC_FORCE_NON_COHERENT);

539 540 541
	/* Wa_2006611047:icl (pre-prod)
	 * Formerly known as WaDisableImprovedTdlClkGating
	 */
542
	if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
543 544 545
		WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
				  GEN11_TDL_CLOCK_GATING_FIX_DISABLE);

O
Oscar Mateo 已提交
546
	/* Wa_2006665173:icl (pre-prod) */
547
	if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
O
Oscar Mateo 已提交
548 549
		WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
				  GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC);
550 551 552 553 554 555

	/* WaEnableFloatBlendOptimization:icl */
	wa_write_masked_or(wal,
			   GEN10_CACHE_MODE_SS,
			   0, /* write-only, so skip validation */
			   _MASKED_BIT_ENABLE(FLOAT_BLEND_OPTIMIZATION_ENABLE));
556 557 558 559 560

	/* WaDisableGPGPUMidThreadPreemption:icl */
	WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
			    GEN9_PREEMPT_GPGPU_LEVEL_MASK,
			    GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
561 562 563 564

	/* allow headerless messages for preemptible GPGPU context */
	WA_SET_BIT_MASKED(GEN10_SAMPLER_MODE,
			  GEN11_SAMPLER_ENABLE_HEADLESS_MSG);
565 566
}

567 568 569
static void tgl_ctx_workarounds_init(struct intel_engine_cs *engine,
				     struct i915_wa_list *wal)
{
570
	/* Wa_1409142259:tgl */
571 572
	WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
			  GEN12_DISABLE_CPS_AWARE_COLOR_PIPE);
573 574
}

575 576 577 578
static void
__intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
			   struct i915_wa_list *wal,
			   const char *name)
579
{
580 581
	struct drm_i915_private *i915 = engine->i915;

582 583 584
	if (engine->class != RENDER_CLASS)
		return;

585
	wa_init_start(wal, name, engine->name);
586

587 588 589
	if (IS_GEN(i915, 12))
		tgl_ctx_workarounds_init(engine, wal);
	else if (IS_GEN(i915, 11))
590
		icl_ctx_workarounds_init(engine, wal);
591
	else if (IS_CANNONLAKE(i915))
592
		cnl_ctx_workarounds_init(engine, wal);
593
	else if (IS_COFFEELAKE(i915))
594
		cfl_ctx_workarounds_init(engine, wal);
595
	else if (IS_GEMINILAKE(i915))
596
		glk_ctx_workarounds_init(engine, wal);
597
	else if (IS_KABYLAKE(i915))
598
		kbl_ctx_workarounds_init(engine, wal);
599
	else if (IS_BROXTON(i915))
600
		bxt_ctx_workarounds_init(engine, wal);
601
	else if (IS_SKYLAKE(i915))
602
		skl_ctx_workarounds_init(engine, wal);
603
	else if (IS_CHERRYVIEW(i915))
604
		chv_ctx_workarounds_init(engine, wal);
605
	else if (IS_BROADWELL(i915))
606
		bdw_ctx_workarounds_init(engine, wal);
607 608
	else if (INTEL_GEN(i915) < 8)
		return;
609
	else
610
		MISSING_CASE(INTEL_GEN(i915));
611

612
	wa_init_finish(wal);
613 614
}

615 616 617 618 619
void intel_engine_init_ctx_wa(struct intel_engine_cs *engine)
{
	__intel_engine_init_ctx_wa(engine, &engine->ctx_wa_list, "context");
}

620
int intel_engine_emit_ctx_wa(struct i915_request *rq)
621
{
622 623 624
	struct i915_wa_list *wal = &rq->engine->ctx_wa_list;
	struct i915_wa *wa;
	unsigned int i;
625
	u32 *cs;
626
	int ret;
627

628
	if (wal->count == 0)
629 630 631
		return 0;

	ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
632 633 634
	if (ret)
		return ret;

635
	cs = intel_ring_begin(rq, (wal->count * 2 + 2));
636 637 638
	if (IS_ERR(cs))
		return PTR_ERR(cs);

639 640 641 642
	*cs++ = MI_LOAD_REGISTER_IMM(wal->count);
	for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
		*cs++ = i915_mmio_reg_offset(wa->reg);
		*cs++ = wa->val;
643 644 645 646 647 648 649 650 651 652 653 654
	}
	*cs++ = MI_NOOP;

	intel_ring_advance(rq, cs);

	ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
	if (ret)
		return ret;

	return 0;
}

655 656
static void
gen9_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
657
{
658
	/* WaDisableKillLogic:bxt,skl,kbl */
659 660 661 662
	if (!IS_COFFEELAKE(i915))
		wa_write_or(wal,
			    GAM_ECOCHK,
			    ECOCHK_DIS_TLB);
663

664
	if (HAS_LLC(i915)) {
665 666 667 668 669
		/* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
		 *
		 * Must match Display Engine. See
		 * WaCompressedResourceDisplayNewHashMode.
		 */
670 671 672
		wa_write_or(wal,
			    MMCD_MISC_CTRL,
			    MMCD_PCLA | MMCD_HOTSPOT_EN);
673 674 675
	}

	/* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */
676 677 678
	wa_write_or(wal,
		    GAM_ECOCHK,
		    BDW_DISABLE_HDC_INVALIDATION);
679 680
}

681 682
static void
skl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
683
{
684
	gen9_gt_workarounds_init(i915, wal);
685 686

	/* WaDisableGafsUnitClkGating:skl */
687 688 689
	wa_write_or(wal,
		    GEN7_UCGCTL4,
		    GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
690 691

	/* WaInPlaceDecompressionHang:skl */
692 693 694 695
	if (IS_SKL_REVID(i915, SKL_REVID_H0, REVID_FOREVER))
		wa_write_or(wal,
			    GEN9_GAMT_ECO_REG_RW_IA,
			    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
696 697
}

698 699
static void
bxt_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
700
{
701
	gen9_gt_workarounds_init(i915, wal);
702 703

	/* WaInPlaceDecompressionHang:bxt */
704 705 706
	wa_write_or(wal,
		    GEN9_GAMT_ECO_REG_RW_IA,
		    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
707 708
}

709 710
static void
kbl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
711
{
712
	gen9_gt_workarounds_init(i915, wal);
713

714
	/* WaDisableDynamicCreditSharing:kbl */
715 716 717 718
	if (IS_KBL_REVID(i915, 0, KBL_REVID_B0))
		wa_write_or(wal,
			    GAMT_CHKN_BIT_REG,
			    GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
719

720
	/* WaDisableGafsUnitClkGating:kbl */
721 722 723
	wa_write_or(wal,
		    GEN7_UCGCTL4,
		    GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
724

725
	/* WaInPlaceDecompressionHang:kbl */
726 727 728
	wa_write_or(wal,
		    GEN9_GAMT_ECO_REG_RW_IA,
		    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
729
}
730

731 732
static void
glk_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
733
{
734
	gen9_gt_workarounds_init(i915, wal);
735 736
}

737 738
static void
cfl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
739
{
740
	gen9_gt_workarounds_init(i915, wal);
741 742

	/* WaDisableGafsUnitClkGating:cfl */
743 744 745
	wa_write_or(wal,
		    GEN7_UCGCTL4,
		    GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
746

747
	/* WaInPlaceDecompressionHang:cfl */
748 749 750
	wa_write_or(wal,
		    GEN9_GAMT_ECO_REG_RW_IA,
		    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
751
}
752

753
static void
754
wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
755
{
756
	const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
757 758 759 760
	unsigned int slice, subslice;
	u32 l3_en, mcr, mcr_mask;

	GEM_BUG_ON(INTEL_GEN(i915) < 10);
761

762 763 764 765 766 767
	/*
	 * WaProgramMgsrForL3BankSpecificMmioReads: cnl,icl
	 * L3Banks could be fused off in single slice scenario. If that is
	 * the case, we might need to program MCR select to a valid L3Bank
	 * by default, to make sure we correctly read certain registers
	 * later on (in the range 0xB100 - 0xB3FF).
768
	 *
769
	 * WaProgramMgsrForCorrectSliceSpecificMmioReads:cnl,icl
770 771 772 773 774 775 776 777
	 * Before any MMIO read into slice/subslice specific registers, MCR
	 * packet control register needs to be programmed to point to any
	 * enabled s/ss pair. Otherwise, incorrect values will be returned.
	 * This means each subsequent MMIO read will be forwarded to an
	 * specific s/ss combination, but this is OK since these registers
	 * are consistent across s/ss in almost all cases. In the rare
	 * occasions, such as INSTDONE, where this value is dependent
	 * on s/ss combo, the read should be done with read_subslice_reg.
778 779 780 781 782 783 784 785 786 787
	 *
	 * Since GEN8_MCR_SELECTOR contains dual-purpose bits which select both
	 * to which subslice, or to which L3 bank, the respective mmio reads
	 * will go, we have to find a common index which works for both
	 * accesses.
	 *
	 * Case where we cannot find a common index fortunately should not
	 * happen in production hardware, so we only emit a warning instead of
	 * implementing something more complex that requires checking the range
	 * of every MMIO read.
788
	 */
789 790 791 792 793 794 795 796 797 798 799 800 801

	if (INTEL_GEN(i915) >= 10 && is_power_of_2(sseu->slice_mask)) {
		u32 l3_fuse =
			intel_uncore_read(&i915->uncore, GEN10_MIRROR_FUSE3) &
			GEN10_L3BANK_MASK;

		DRM_DEBUG_DRIVER("L3 fuse = %x\n", l3_fuse);
		l3_en = ~(l3_fuse << GEN10_L3BANK_PAIR_COUNT | l3_fuse);
	} else {
		l3_en = ~0;
	}

	slice = fls(sseu->slice_mask) - 1;
S
Stuart Summers 已提交
802
	subslice = fls(l3_en & intel_sseu_get_subslices(sseu, slice));
803 804
	if (!subslice) {
		DRM_WARN("No common index found between subslice mask %x and L3 bank mask %x!\n",
S
Stuart Summers 已提交
805
			 intel_sseu_get_subslices(sseu, slice), l3_en);
806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821
		subslice = fls(l3_en);
		WARN_ON(!subslice);
	}
	subslice--;

	if (INTEL_GEN(i915) >= 11) {
		mcr = GEN11_MCR_SLICE(slice) | GEN11_MCR_SUBSLICE(subslice);
		mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK;
	} else {
		mcr = GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
		mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK;
	}

	DRM_DEBUG_DRIVER("MCR slice/subslice = %x\n", mcr);

	wa_write_masked_or(wal, GEN8_MCR_SELECTOR, mcr_mask, mcr);
822 823
}

824 825
static void
cnl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
826
{
827
	wa_init_mcr(i915, wal);
828

829
	/* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */
830 831 832 833
	if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0))
		wa_write_or(wal,
			    GAMT_CHKN_BIT_REG,
			    GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT);
834 835

	/* WaInPlaceDecompressionHang:cnl */
836 837 838
	wa_write_or(wal,
		    GEN9_GAMT_ECO_REG_RW_IA,
		    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
839 840
}

841 842
static void
icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
843
{
844
	wa_init_mcr(i915, wal);
845

846
	/* WaInPlaceDecompressionHang:icl */
847 848 849
	wa_write_or(wal,
		    GEN9_GAMT_ECO_REG_RW_IA,
		    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
850

851
	/* WaModifyGamTlbPartitioning:icl */
852 853 854 855
	wa_write_masked_or(wal,
			   GEN11_GACB_PERF_CTRL,
			   GEN11_HASH_CTRL_MASK,
			   GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
O
Oscar Mateo 已提交
856

O
Oscar Mateo 已提交
857 858 859
	/* Wa_1405766107:icl
	 * Formerly known as WaCL2SFHalfMaxAlloc
	 */
860 861 862 863
	wa_write_or(wal,
		    GEN11_LSN_UNSLCVC,
		    GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC |
		    GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC);
O
Oscar Mateo 已提交
864 865 866 867

	/* Wa_220166154:icl
	 * Formerly known as WaDisCtxReload
	 */
868 869 870
	wa_write_or(wal,
		    GEN8_GAMW_ECO_DEV_RW_IA,
		    GAMW_ECO_DEV_CTX_RELOAD_DISABLE);
O
Oscar Mateo 已提交
871 872

	/* Wa_1405779004:icl (pre-prod) */
873 874 875 876
	if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
		wa_write_or(wal,
			    SLICE_UNIT_LEVEL_CLKGATE,
			    MSCUNIT_CLKGATE_DIS);
O
Oscar Mateo 已提交
877 878

	/* Wa_1406680159:icl */
879 880 881
	wa_write_or(wal,
		    SUBSLICE_UNIT_LEVEL_CLKGATE,
		    GWUNIT_CLKGATE_DIS);
O
Oscar Mateo 已提交
882

O
Oscar Mateo 已提交
883
	/* Wa_1406838659:icl (pre-prod) */
884 885 886 887
	if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
		wa_write_or(wal,
			    INF_UNIT_LEVEL_CLKGATE,
			    CGPSF_CLKGATE_DIS);
888

O
Oscar Mateo 已提交
889 890 891
	/* Wa_1406463099:icl
	 * Formerly known as WaGamTlbPendError
	 */
892 893 894
	wa_write_or(wal,
		    GAMT_CHKN_BIT_REG,
		    GAMT_CHKN_DISABLE_L3_COH_PIPE);
M
Mika Kuoppala 已提交
895 896 897 898 899

	/* Wa_1607087056:icl */
	wa_write_or(wal,
		    SLICE_UNIT_LEVEL_CLKGATE,
		    L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
900 901
}

902 903 904 905 906
static void
tgl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
}

907 908
static void
gt_init_workarounds(struct drm_i915_private *i915, struct i915_wa_list *wal)
909
{
910 911 912
	if (IS_GEN(i915, 12))
		tgl_gt_workarounds_init(i915, wal);
	else if (IS_GEN(i915, 11))
913
		icl_gt_workarounds_init(i915, wal);
914
	else if (IS_CANNONLAKE(i915))
915
		cnl_gt_workarounds_init(i915, wal);
916 917 918 919 920 921 922 923 924 925 926 927
	else if (IS_COFFEELAKE(i915))
		cfl_gt_workarounds_init(i915, wal);
	else if (IS_GEMINILAKE(i915))
		glk_gt_workarounds_init(i915, wal);
	else if (IS_KABYLAKE(i915))
		kbl_gt_workarounds_init(i915, wal);
	else if (IS_BROXTON(i915))
		bxt_gt_workarounds_init(i915, wal);
	else if (IS_SKYLAKE(i915))
		skl_gt_workarounds_init(i915, wal);
	else if (INTEL_GEN(i915) <= 8)
		return;
928
	else
929
		MISSING_CASE(INTEL_GEN(i915));
930 931 932 933 934
}

void intel_gt_init_workarounds(struct drm_i915_private *i915)
{
	struct i915_wa_list *wal = &i915->gt_wa_list;
935

936
	wa_init_start(wal, "GT", "global");
937
	gt_init_workarounds(i915, wal);
938 939 940 941
	wa_init_finish(wal);
}

static enum forcewake_domains
942
wal_get_fw_for_rmw(struct intel_uncore *uncore, const struct i915_wa_list *wal)
943 944 945 946 947 948
{
	enum forcewake_domains fw = 0;
	struct i915_wa *wa;
	unsigned int i;

	for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
949
		fw |= intel_uncore_forcewake_for_reg(uncore,
950 951 952 953 954 955 956
						     wa->reg,
						     FW_REG_READ |
						     FW_REG_WRITE);

	return fw;
}

957 958 959
static bool
wa_verify(const struct i915_wa *wa, u32 cur, const char *name, const char *from)
{
960
	if ((cur ^ wa->val) & wa->read) {
961
		DRM_ERROR("%s workaround lost on %s! (%x=%x/%x, expected %x, mask=%x)\n",
962 963 964
			  name, from, i915_mmio_reg_offset(wa->reg),
			  cur, cur & wa->read,
			  wa->val, wa->mask);
965 966 967 968 969 970 971

		return false;
	}

	return true;
}

972
static void
973
wa_list_apply(struct intel_uncore *uncore, const struct i915_wa_list *wal)
974 975 976 977 978 979 980 981 982
{
	enum forcewake_domains fw;
	unsigned long flags;
	struct i915_wa *wa;
	unsigned int i;

	if (!wal->count)
		return;

983
	fw = wal_get_fw_for_rmw(uncore, wal);
984

985 986
	spin_lock_irqsave(&uncore->lock, flags);
	intel_uncore_forcewake_get__locked(uncore, fw);
987 988

	for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
989
		intel_uncore_rmw_fw(uncore, wa->reg, wa->mask, wa->val);
990 991 992 993
		if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
			wa_verify(wa,
				  intel_uncore_read_fw(uncore, wa->reg),
				  wal->name, "application");
994 995
	}

996 997
	intel_uncore_forcewake_put__locked(uncore, fw);
	spin_unlock_irqrestore(&uncore->lock, flags);
998 999
}

1000
void intel_gt_apply_workarounds(struct intel_gt *gt)
1001
{
1002
	wa_list_apply(gt->uncore, &gt->i915->gt_wa_list);
1003 1004
}

1005
static bool wa_list_verify(struct intel_uncore *uncore,
1006 1007 1008 1009 1010 1011 1012 1013
			   const struct i915_wa_list *wal,
			   const char *from)
{
	struct i915_wa *wa;
	unsigned int i;
	bool ok = true;

	for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
1014 1015 1016
		ok &= wa_verify(wa,
				intel_uncore_read(uncore, wa->reg),
				wal->name, from);
1017 1018 1019 1020

	return ok;
}

1021
bool intel_gt_verify_workarounds(struct intel_gt *gt, const char *from)
1022
{
1023
	return wa_list_verify(gt->uncore, &gt->i915->gt_wa_list, from);
1024 1025
}

1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039
static inline bool is_nonpriv_flags_valid(u32 flags)
{
	/* Check only valid flag bits are set */
	if (flags & ~RING_FORCE_TO_NONPRIV_MASK_VALID)
		return false;

	/* NB: Only 3 out of 4 enum values are valid for access field */
	if ((flags & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
	    RING_FORCE_TO_NONPRIV_ACCESS_INVALID)
		return false;

	return true;
}

1040
static void
1041
whitelist_reg_ext(struct i915_wa_list *wal, i915_reg_t reg, u32 flags)
1042
{
1043 1044 1045
	struct i915_wa wa = {
		.reg = reg
	};
1046

1047 1048
	if (GEM_DEBUG_WARN_ON(wal->count >= RING_MAX_NONPRIV_SLOTS))
		return;
1049

1050 1051 1052
	if (GEM_DEBUG_WARN_ON(!is_nonpriv_flags_valid(flags)))
		return;

1053
	wa.reg.reg |= flags;
1054
	_wa_add(wal, &wa);
1055 1056
}

1057 1058 1059
static void
whitelist_reg(struct i915_wa_list *wal, i915_reg_t reg)
{
1060
	whitelist_reg_ext(wal, reg, RING_FORCE_TO_NONPRIV_ACCESS_RW);
1061 1062
}

1063
static void gen9_whitelist_build(struct i915_wa_list *w)
1064 1065
{
	/* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
1066
	whitelist_reg(w, GEN9_CTX_PREEMPT_REG);
1067 1068

	/* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */
1069
	whitelist_reg(w, GEN8_CS_CHICKEN1);
1070 1071

	/* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk,cfl */
1072
	whitelist_reg(w, GEN8_HDC_CHICKEN1);
1073 1074 1075

	/* WaSendPushConstantsFromMMIO:skl,bxt */
	whitelist_reg(w, COMMON_SLICE_CHICKEN2);
1076 1077
}

1078
static void skl_whitelist_build(struct intel_engine_cs *engine)
1079
{
1080 1081 1082 1083 1084
	struct i915_wa_list *w = &engine->whitelist;

	if (engine->class != RENDER_CLASS)
		return;

1085
	gen9_whitelist_build(w);
1086 1087

	/* WaDisableLSQCROPERFforOCL:skl */
1088
	whitelist_reg(w, GEN8_L3SQCREG4);
1089 1090
}

1091
static void bxt_whitelist_build(struct intel_engine_cs *engine)
1092
{
1093 1094 1095 1096
	if (engine->class != RENDER_CLASS)
		return;

	gen9_whitelist_build(&engine->whitelist);
1097 1098
}

1099
static void kbl_whitelist_build(struct intel_engine_cs *engine)
1100
{
1101 1102 1103 1104 1105
	struct i915_wa_list *w = &engine->whitelist;

	if (engine->class != RENDER_CLASS)
		return;

1106
	gen9_whitelist_build(w);
1107

1108
	/* WaDisableLSQCROPERFforOCL:kbl */
1109
	whitelist_reg(w, GEN8_L3SQCREG4);
1110 1111
}

1112
static void glk_whitelist_build(struct intel_engine_cs *engine)
1113
{
1114 1115 1116 1117 1118
	struct i915_wa_list *w = &engine->whitelist;

	if (engine->class != RENDER_CLASS)
		return;

1119
	gen9_whitelist_build(w);
1120

1121
	/* WA #0862: Userspace has to set "Barrier Mode" to avoid hangs. */
1122
	whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
1123
}
1124

1125
static void cfl_whitelist_build(struct intel_engine_cs *engine)
1126
{
1127 1128
	struct i915_wa_list *w = &engine->whitelist;

1129 1130 1131
	if (engine->class != RENDER_CLASS)
		return;

1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143
	gen9_whitelist_build(w);

	/*
	 * WaAllowPMDepthAndInvocationCountAccessFromUMD:cfl,whl,cml,aml
	 *
	 * This covers 4 register which are next to one another :
	 *   - PS_INVOCATION_COUNT
	 *   - PS_INVOCATION_COUNT_UDW
	 *   - PS_DEPTH_COUNT
	 *   - PS_DEPTH_COUNT_UDW
	 */
	whitelist_reg_ext(w, PS_INVOCATION_COUNT,
1144
			  RING_FORCE_TO_NONPRIV_ACCESS_RD |
1145
			  RING_FORCE_TO_NONPRIV_RANGE_4);
1146 1147
}

1148
static void cnl_whitelist_build(struct intel_engine_cs *engine)
1149
{
1150 1151 1152 1153 1154
	struct i915_wa_list *w = &engine->whitelist;

	if (engine->class != RENDER_CLASS)
		return;

1155
	/* WaEnablePreemptionGranularityControlByUMD:cnl */
1156 1157 1158
	whitelist_reg(w, GEN8_CS_CHICKEN1);
}

1159
static void icl_whitelist_build(struct intel_engine_cs *engine)
1160
{
1161 1162
	struct i915_wa_list *w = &engine->whitelist;

1163 1164 1165 1166 1167 1168 1169 1170 1171 1172
	switch (engine->class) {
	case RENDER_CLASS:
		/* WaAllowUMDToModifyHalfSliceChicken7:icl */
		whitelist_reg(w, GEN9_HALF_SLICE_CHICKEN7);

		/* WaAllowUMDToModifySamplerMode:icl */
		whitelist_reg(w, GEN10_SAMPLER_MODE);

		/* WaEnableStateCacheRedirectToCS:icl */
		whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183

		/*
		 * WaAllowPMDepthAndInvocationCountAccessFromUMD:icl
		 *
		 * This covers 4 register which are next to one another :
		 *   - PS_INVOCATION_COUNT
		 *   - PS_INVOCATION_COUNT_UDW
		 *   - PS_DEPTH_COUNT
		 *   - PS_DEPTH_COUNT_UDW
		 */
		whitelist_reg_ext(w, PS_INVOCATION_COUNT,
1184
				  RING_FORCE_TO_NONPRIV_ACCESS_RD |
1185
				  RING_FORCE_TO_NONPRIV_RANGE_4);
1186 1187 1188 1189 1190
		break;

	case VIDEO_DECODE_CLASS:
		/* hucStatusRegOffset */
		whitelist_reg_ext(w, _MMIO(0x2000 + engine->mmio_base),
1191
				  RING_FORCE_TO_NONPRIV_ACCESS_RD);
1192 1193
		/* hucUKernelHdrInfoRegOffset */
		whitelist_reg_ext(w, _MMIO(0x2014 + engine->mmio_base),
1194
				  RING_FORCE_TO_NONPRIV_ACCESS_RD);
1195 1196
		/* hucStatus2RegOffset */
		whitelist_reg_ext(w, _MMIO(0x23B0 + engine->mmio_base),
1197
				  RING_FORCE_TO_NONPRIV_ACCESS_RD);
1198 1199 1200 1201 1202
		break;

	default:
		break;
	}
1203 1204
}

1205 1206 1207 1208
static void tgl_whitelist_build(struct intel_engine_cs *engine)
{
}

1209
void intel_engine_init_whitelist(struct intel_engine_cs *engine)
1210 1211
{
	struct drm_i915_private *i915 = engine->i915;
1212
	struct i915_wa_list *w = &engine->whitelist;
1213

1214
	wa_init_start(w, "whitelist", engine->name);
1215

1216 1217 1218
	if (IS_GEN(i915, 12))
		tgl_whitelist_build(engine);
	else if (IS_GEN(i915, 11))
1219
		icl_whitelist_build(engine);
1220
	else if (IS_CANNONLAKE(i915))
1221
		cnl_whitelist_build(engine);
1222
	else if (IS_COFFEELAKE(i915))
1223
		cfl_whitelist_build(engine);
1224
	else if (IS_GEMINILAKE(i915))
1225
		glk_whitelist_build(engine);
1226
	else if (IS_KABYLAKE(i915))
1227
		kbl_whitelist_build(engine);
1228
	else if (IS_BROXTON(i915))
1229
		bxt_whitelist_build(engine);
1230
	else if (IS_SKYLAKE(i915))
1231
		skl_whitelist_build(engine);
1232 1233
	else if (INTEL_GEN(i915) <= 8)
		return;
1234 1235
	else
		MISSING_CASE(INTEL_GEN(i915));
1236

1237
	wa_init_finish(w);
1238 1239
}

1240
void intel_engine_apply_whitelist(struct intel_engine_cs *engine)
1241
{
1242
	const struct i915_wa_list *wal = &engine->whitelist;
1243
	struct intel_uncore *uncore = engine->uncore;
1244
	const u32 base = engine->mmio_base;
1245
	struct i915_wa *wa;
1246 1247
	unsigned int i;

1248
	if (!wal->count)
1249
		return;
1250

1251
	for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
1252 1253 1254
		intel_uncore_write(uncore,
				   RING_FORCE_TO_NONPRIV(base, i),
				   i915_mmio_reg_offset(wa->reg));
1255

1256 1257
	/* And clear the rest just in case of garbage */
	for (; i < RING_MAX_NONPRIV_SLOTS; i++)
1258 1259 1260
		intel_uncore_write(uncore,
				   RING_FORCE_TO_NONPRIV(base, i),
				   i915_mmio_reg_offset(RING_NOPID(base)));
1261 1262
}

1263 1264
static void
rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
1265 1266 1267
{
	struct drm_i915_private *i915 = engine->i915;

1268 1269 1270 1271 1272 1273 1274
	if (IS_TGL_REVID(i915, TGL_REVID_A0, TGL_REVID_A0)) {
		/* Wa_1606700617:tgl */
		wa_masked_en(wal,
			     GEN9_CS_DEBUG_MODE1,
			     FF_DOP_CLOCK_GATE_DISABLE);
	}

1275
	if (IS_GEN(i915, 11)) {
1276 1277 1278 1279 1280 1281
		/* This is not an Wa. Enable for better image quality */
		wa_masked_en(wal,
			     _3D_CHICKEN3,
			     _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE);

		/* WaPipelineFlushCoherentLines:icl */
1282 1283 1284
		wa_write_or(wal,
			    GEN8_L3SQCREG4,
			    GEN8_LQSC_FLUSH_COHERENT_LINES);
1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310

		/*
		 * Wa_1405543622:icl
		 * Formerly known as WaGAPZPriorityScheme
		 */
		wa_write_or(wal,
			    GEN8_GARBCNTL,
			    GEN11_ARBITRATION_PRIO_ORDER_MASK);

		/*
		 * Wa_1604223664:icl
		 * Formerly known as WaL3BankAddressHashing
		 */
		wa_write_masked_or(wal,
				   GEN8_GARBCNTL,
				   GEN11_HASH_CTRL_EXCL_MASK,
				   GEN11_HASH_CTRL_EXCL_BIT0);
		wa_write_masked_or(wal,
				   GEN11_GLBLINVL,
				   GEN11_BANK_HASH_ADDR_EXCL_MASK,
				   GEN11_BANK_HASH_ADDR_EXCL_BIT0);

		/*
		 * Wa_1405733216:icl
		 * Formerly known as WaDisableCleanEvicts
		 */
1311 1312 1313
		wa_write_or(wal,
			    GEN8_L3SQCREG4,
			    GEN11_LQSC_CLEAN_EVICT_DISABLE);
1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325

		/* WaForwardProgressSoftReset:icl */
		wa_write_or(wal,
			    GEN10_SCRATCH_LNCF2,
			    PMFLUSHDONE_LNICRSDROP |
			    PMFLUSH_GAPL3UNBLOCK |
			    PMFLUSHDONE_LNEBLK);

		/* Wa_1406609255:icl (pre-prod) */
		if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
			wa_write_or(wal,
				    GEN7_SARCHKMD,
1326 1327 1328 1329 1330 1331
				    GEN7_DISABLE_DEMAND_PREFETCH);

		/* Wa_1606682166:icl */
		wa_write_or(wal,
			    GEN7_SARCHKMD,
			    GEN7_DISABLE_SAMPLER_PREFETCH);
T
Tvrtko Ursulin 已提交
1332 1333 1334 1335 1336 1337

		/* Wa_1409178092:icl */
		wa_write_masked_or(wal,
				   GEN11_SCRATCH2,
				   GEN11_COHERENT_PARTIAL_WRITE_MERGE_ENABLE,
				   0);
1338 1339
	}

1340 1341
	if (IS_GEN_RANGE(i915, 9, 11)) {
		/* FtrPerCtxtPreemptionGranularityControl:skl,bxt,kbl,cfl,cnl,icl */
1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360
		wa_masked_en(wal,
			     GEN7_FF_SLICE_CS_CHICKEN1,
			     GEN9_FFSC_PERCTX_PREEMPT_CTRL);
	}

	if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) || IS_COFFEELAKE(i915)) {
		/* WaEnableGapsTsvCreditFix:skl,kbl,cfl */
		wa_write_or(wal,
			    GEN8_GARBCNTL,
			    GEN9_GAPS_TSV_CREDIT_DISABLE);
	}

	if (IS_BROXTON(i915)) {
		/* WaDisablePooledEuLoadBalancingFix:bxt */
		wa_masked_en(wal,
			     FF_SLICE_CS_CHICKEN2,
			     GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
	}

1361
	if (IS_GEN(i915, 9)) {
1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386
		/* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
		wa_masked_en(wal,
			     GEN9_CSFE_CHICKEN1_RCS,
			     GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE);

		/* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
		wa_write_or(wal,
			    BDW_SCRATCH1,
			    GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);

		/* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
		if (IS_GEN9_LP(i915))
			wa_write_masked_or(wal,
					   GEN8_L3SQCREG1,
					   L3_PRIO_CREDITS_MASK,
					   L3_GENERAL_PRIO_CREDITS(62) |
					   L3_HIGH_PRIO_CREDITS(2));

		/* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
		wa_write_or(wal,
			    GEN8_L3SQCREG4,
			    GEN8_LQSC_FLUSH_COHERENT_LINES);
	}
}

1387 1388
static void
xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399
{
	struct drm_i915_private *i915 = engine->i915;

	/* WaKBLVECSSemaphoreWaitPoll:kbl */
	if (IS_KBL_REVID(i915, KBL_REVID_A0, KBL_REVID_E0)) {
		wa_write(wal,
			 RING_SEMA_WAIT_POLL(engine->mmio_base),
			 1);
	}
}

1400 1401 1402 1403 1404 1405
static void
engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal)
{
	if (I915_SELFTEST_ONLY(INTEL_GEN(engine->i915) < 8))
		return;

1406
	if (engine->class == RENDER_CLASS)
1407 1408 1409 1410 1411
		rcs_engine_wa_init(engine, wal);
	else
		xcs_engine_wa_init(engine, wal);
}

1412 1413 1414 1415
void intel_engine_init_workarounds(struct intel_engine_cs *engine)
{
	struct i915_wa_list *wal = &engine->wa_list;

1416
	if (INTEL_GEN(engine->i915) < 8)
1417 1418
		return;

1419
	wa_init_start(wal, "engine", engine->name);
1420
	engine_init_workarounds(engine, wal);
1421 1422 1423 1424 1425
	wa_init_finish(wal);
}

void intel_engine_apply_workarounds(struct intel_engine_cs *engine)
{
1426
	wa_list_apply(engine->uncore, &engine->wa_list);
1427 1428
}

1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461
static struct i915_vma *
create_scratch(struct i915_address_space *vm, int count)
{
	struct drm_i915_gem_object *obj;
	struct i915_vma *vma;
	unsigned int size;
	int err;

	size = round_up(count * sizeof(u32), PAGE_SIZE);
	obj = i915_gem_object_create_internal(vm->i915, size);
	if (IS_ERR(obj))
		return ERR_CAST(obj);

	i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);

	vma = i915_vma_instance(obj, vm, NULL);
	if (IS_ERR(vma)) {
		err = PTR_ERR(vma);
		goto err_obj;
	}

	err = i915_vma_pin(vma, 0, 0,
			   i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER);
	if (err)
		goto err_obj;

	return vma;

err_obj:
	i915_gem_object_put(obj);
	return ERR_PTR(err);
}

1462 1463 1464 1465 1466 1467 1468
static bool mcr_range(struct drm_i915_private *i915, u32 offset)
{
	/*
	 * Registers in this range are affected by the MCR selector
	 * which only controls CPU initiated MMIO. Routing does not
	 * work for CS access so we cannot verify them on this path.
	 */
1469
	if (INTEL_GEN(i915) >= 8 && (offset >= 0xb000 && offset <= 0xb4ff))
1470 1471 1472 1473 1474
		return true;

	return false;
}

1475 1476 1477 1478 1479
static int
wa_list_srm(struct i915_request *rq,
	    const struct i915_wa_list *wal,
	    struct i915_vma *vma)
{
1480 1481
	struct drm_i915_private *i915 = rq->i915;
	unsigned int i, count = 0;
1482 1483 1484 1485
	const struct i915_wa *wa;
	u32 srm, *cs;

	srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
1486
	if (INTEL_GEN(i915) >= 8)
1487 1488
		srm++;

1489 1490 1491 1492 1493 1494
	for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
		if (!mcr_range(i915, i915_mmio_reg_offset(wa->reg)))
			count++;
	}

	cs = intel_ring_begin(rq, 4 * count);
1495 1496 1497 1498
	if (IS_ERR(cs))
		return PTR_ERR(cs);

	for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
1499 1500 1501 1502 1503
		u32 offset = i915_mmio_reg_offset(wa->reg);

		if (mcr_range(i915, offset))
			continue;

1504
		*cs++ = srm;
1505
		*cs++ = offset;
1506 1507 1508 1509 1510 1511 1512 1513
		*cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
		*cs++ = 0;
	}
	intel_ring_advance(rq, cs);

	return 0;
}

1514
static int engine_wa_list_verify(struct intel_context *ce,
1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527
				 const struct i915_wa_list * const wal,
				 const char *from)
{
	const struct i915_wa *wa;
	struct i915_request *rq;
	struct i915_vma *vma;
	unsigned int i;
	u32 *results;
	int err;

	if (!wal->count)
		return 0;

1528
	vma = create_scratch(&ce->engine->gt->ggtt->vm, wal->count);
1529 1530 1531
	if (IS_ERR(vma))
		return PTR_ERR(vma);

1532
	rq = intel_context_create_request(ce);
1533 1534 1535 1536 1537 1538 1539 1540 1541 1542
	if (IS_ERR(rq)) {
		err = PTR_ERR(rq);
		goto err_vma;
	}

	err = wa_list_srm(rq, wal, vma);
	if (err)
		goto err_vma;

	i915_request_add(rq);
1543
	if (i915_request_wait(rq, 0, HZ / 5) < 0) {
1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554
		err = -ETIME;
		goto err_vma;
	}

	results = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
	if (IS_ERR(results)) {
		err = PTR_ERR(results);
		goto err_vma;
	}

	err = 0;
1555 1556 1557 1558
	for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
		if (mcr_range(rq->i915, i915_mmio_reg_offset(wa->reg)))
			continue;

1559 1560
		if (!wa_verify(wa, results[i], wal->name, from))
			err = -ENXIO;
1561
	}
1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573

	i915_gem_object_unpin_map(vma->obj);

err_vma:
	i915_vma_unpin(vma);
	i915_vma_put(vma);
	return err;
}

int intel_engine_verify_workarounds(struct intel_engine_cs *engine,
				    const char *from)
{
1574 1575 1576
	return engine_wa_list_verify(engine->kernel_context,
				     &engine->wa_list,
				     from);
1577 1578
}

1579
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1580
#include "selftest_workarounds.c"
1581
#endif