intel_workarounds.c 44.1 KB
Newer Older
1 2 3 4 5 6 7
/*
 * SPDX-License-Identifier: MIT
 *
 * Copyright © 2014-2018 Intel Corporation
 */

#include "i915_drv.h"
8
#include "intel_context.h"
9
#include "intel_engine_pm.h"
10
#include "intel_gt.h"
11
#include "intel_ring.h"
12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43
#include "intel_workarounds.h"

/**
 * DOC: Hardware workarounds
 *
 * This file is intended as a central place to implement most [1]_ of the
 * required workarounds for hardware to work as originally intended. They fall
 * in five basic categories depending on how/when they are applied:
 *
 * - Workarounds that touch registers that are saved/restored to/from the HW
 *   context image. The list is emitted (via Load Register Immediate commands)
 *   everytime a new context is created.
 * - GT workarounds. The list of these WAs is applied whenever these registers
 *   revert to default values (on GPU reset, suspend/resume [2]_, etc..).
 * - Display workarounds. The list is applied during display clock-gating
 *   initialization.
 * - Workarounds that whitelist a privileged register, so that UMDs can manage
 *   them directly. This is just a special case of a MMMIO workaround (as we
 *   write the list of these to/be-whitelisted registers to some special HW
 *   registers).
 * - Workaround batchbuffers, that get executed automatically by the hardware
 *   on every HW context restore.
 *
 * .. [1] Please notice that there are other WAs that, due to their nature,
 *    cannot be applied from a central place. Those are peppered around the rest
 *    of the code, as needed.
 *
 * .. [2] Technically, some registers are powercontext saved & restored, so they
 *    survive a suspend/resume. In practice, writing them again is not too
 *    costly and simplifies things. We can revisit this in the future.
 *
 * Layout
44
 * ~~~~~~
45 46 47 48 49 50 51 52 53 54
 *
 * Keep things in this file ordered by WA type, as per the above (context, GT,
 * display, register whitelist, batchbuffer). Then, inside each type, keep the
 * following order:
 *
 * - Infrastructure functions and macros
 * - WAs per platform in standard gen/chrono order
 * - Public functions to init or apply the given workaround type.
 */

55
static void wa_init_start(struct i915_wa_list *wal, const char *name, const char *engine_name)
56 57
{
	wal->name = name;
58
	wal->engine_name = engine_name;
59 60
}

61 62
#define WA_LIST_CHUNK (1 << 4)

63 64
static void wa_init_finish(struct i915_wa_list *wal)
{
65 66 67 68 69 70 71 72 73 74 75 76
	/* Trim unused entries. */
	if (!IS_ALIGNED(wal->count, WA_LIST_CHUNK)) {
		struct i915_wa *list = kmemdup(wal->list,
					       wal->count * sizeof(*list),
					       GFP_KERNEL);

		if (list) {
			kfree(wal->list);
			wal->list = list;
		}
	}

77 78 79
	if (!wal->count)
		return;

80 81
	DRM_DEBUG_DRIVER("Initialized %u %s workarounds on %s\n",
			 wal->wa_count, wal->name, wal->engine_name);
82 83
}

84
static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa)
85
{
86 87
	unsigned int addr = i915_mmio_reg_offset(wa->reg);
	unsigned int start = 0, end = wal->count;
88
	const unsigned int grow = WA_LIST_CHUNK;
89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107
	struct i915_wa *wa_;

	GEM_BUG_ON(!is_power_of_2(grow));

	if (IS_ALIGNED(wal->count, grow)) { /* Either uninitialized or full. */
		struct i915_wa *list;

		list = kmalloc_array(ALIGN(wal->count + 1, grow), sizeof(*wa),
				     GFP_KERNEL);
		if (!list) {
			DRM_ERROR("No space for workaround init!\n");
			return;
		}

		if (wal->list)
			memcpy(list, wal->list, sizeof(*wa) * wal->count);

		wal->list = list;
	}
108 109 110 111

	while (start < end) {
		unsigned int mid = start + (end - start) / 2;

112
		if (i915_mmio_reg_offset(wal->list[mid].reg) < addr) {
113
			start = mid + 1;
114
		} else if (i915_mmio_reg_offset(wal->list[mid].reg) > addr) {
115 116
			end = mid;
		} else {
117
			wa_ = &wal->list[mid];
118

119
			if ((wa->mask & ~wa_->mask) == 0) {
120
				DRM_ERROR("Discarding overwritten w/a for reg %04x (mask: %08x, value: %08x)\n",
121 122
					  i915_mmio_reg_offset(wa_->reg),
					  wa_->mask, wa_->val);
123

124
				wa_->val &= ~wa->mask;
125 126
			}

127 128 129
			wal->wa_count++;
			wa_->val |= wa->val;
			wa_->mask |= wa->mask;
130
			wa_->read |= wa->read;
131 132 133
			return;
		}
	}
134

135 136 137
	wal->wa_count++;
	wa_ = &wal->list[wal->count++];
	*wa_ = *wa;
138

139 140 141 142 143
	while (wa_-- > wal->list) {
		GEM_BUG_ON(i915_mmio_reg_offset(wa_[0].reg) ==
			   i915_mmio_reg_offset(wa_[1].reg));
		if (i915_mmio_reg_offset(wa_[1].reg) >
		    i915_mmio_reg_offset(wa_[0].reg))
144
			break;
145

146
		swap(wa_[1], wa_[0]);
147
	}
148 149
}

150 151
static void wa_add(struct i915_wa_list *wal, i915_reg_t reg, u32 mask,
		   u32 val, u32 read_mask)
152 153
{
	struct i915_wa wa = {
154
		.reg  = reg,
155
		.mask = mask,
156
		.val  = val,
157
		.read = read_mask,
158 159 160 161 162
	};

	_wa_add(wal, &wa);
}

163 164 165 166 167 168 169
static void
wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask,
		   u32 val)
{
	wa_add(wal, reg, mask, val, mask);
}

170 171 172
static void
wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
{
173
	wa_write_masked_or(wal, reg, val, _MASKED_BIT_ENABLE(val));
174 175 176 177 178 179 180 181 182 183 184 185 186 187
}

static void
wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
{
	wa_write_masked_or(wal, reg, ~0, val);
}

static void
wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
{
	wa_write_masked_or(wal, reg, val, val);
}

188
#define WA_SET_BIT_MASKED(addr, mask) \
189
	wa_write_masked_or(wal, (addr), (mask), _MASKED_BIT_ENABLE(mask))
190 191

#define WA_CLR_BIT_MASKED(addr, mask) \
192
	wa_write_masked_or(wal, (addr), (mask), _MASKED_BIT_DISABLE(mask))
193 194

#define WA_SET_FIELD_MASKED(addr, mask, value) \
195
	wa_write_masked_or(wal, (addr), (mask), _MASKED_FIELD((mask), (value)))
196

197 198
static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine,
				      struct i915_wa_list *wal)
199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244
{
	WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);

	/* WaDisableAsyncFlipPerfMode:bdw,chv */
	WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);

	/* WaDisablePartialInstShootdown:bdw,chv */
	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
			  PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);

	/* Use Force Non-Coherent whenever executing a 3D context. This is a
	 * workaround for for a possible hang in the unlikely event a TLB
	 * invalidation occurs during a PSD flush.
	 */
	/* WaForceEnableNonCoherent:bdw,chv */
	/* WaHdcDisableFetchWhenMasked:bdw,chv */
	WA_SET_BIT_MASKED(HDC_CHICKEN0,
			  HDC_DONOT_FETCH_MEM_WHEN_MASKED |
			  HDC_FORCE_NON_COHERENT);

	/* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
	 * "The Hierarchical Z RAW Stall Optimization allows non-overlapping
	 *  polygons in the same 8x4 pixel/sample area to be processed without
	 *  stalling waiting for the earlier ones to write to Hierarchical Z
	 *  buffer."
	 *
	 * This optimization is off by default for BDW and CHV; turn it on.
	 */
	WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);

	/* Wa4x4STCOptimizationDisable:bdw,chv */
	WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);

	/*
	 * BSpec recommends 8x4 when MSAA is used,
	 * however in practice 16x4 seems fastest.
	 *
	 * Note that PS/WM thread counts depend on the WIZ hashing
	 * disable bit, which we don't touch here, but it's good
	 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
	 */
	WA_SET_FIELD_MASKED(GEN7_GT_MODE,
			    GEN6_WIZ_HASHING_MASK,
			    GEN6_WIZ_HASHING_16x4);
}

245 246
static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine,
				     struct i915_wa_list *wal)
247
{
248
	struct drm_i915_private *i915 = engine->i915;
249

250
	gen8_ctx_workarounds_init(engine, wal);
251 252 253 254 255 256

	/* WaDisableThreadStallDopClockGating:bdw (pre-production) */
	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);

	/* WaDisableDopClockGating:bdw
	 *
257
	 * Also see the related UCGTCL1 write in bdw_init_clock_gating()
258 259 260 261 262 263 264 265 266 267 268 269
	 * to disable EUTC clock gating.
	 */
	WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
			  DOP_CLOCK_GATING_DISABLE);

	WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
			  GEN8_SAMPLER_POWER_BYPASS_DIS);

	WA_SET_BIT_MASKED(HDC_CHICKEN0,
			  /* WaForceContextSaveRestoreNonCoherent:bdw */
			  HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
			  /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
270
			  (IS_BDW_GT3(i915) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
271 272
}

273 274
static void chv_ctx_workarounds_init(struct intel_engine_cs *engine,
				     struct i915_wa_list *wal)
275
{
276
	gen8_ctx_workarounds_init(engine, wal);
277 278 279 280 281 282 283 284

	/* WaDisableThreadStallDopClockGating:chv */
	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);

	/* Improve HiZ throughput on CHV. */
	WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
}

285 286
static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine,
				      struct i915_wa_list *wal)
287
{
288 289 290
	struct drm_i915_private *i915 = engine->i915;

	if (HAS_LLC(i915)) {
291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346
		/* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
		 *
		 * Must match Display Engine. See
		 * WaCompressedResourceDisplayNewHashMode.
		 */
		WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
				  GEN9_PBE_COMPRESSED_HASH_SELECTION);
		WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
				  GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR);
	}

	/* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */
	/* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */
	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
			  FLOW_CONTROL_ENABLE |
			  PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);

	/* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */
	/* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */
	WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
			  GEN9_ENABLE_YV12_BUGFIX |
			  GEN9_ENABLE_GPGPU_PREEMPTION);

	/* Wa4x4STCOptimizationDisable:skl,bxt,kbl,glk,cfl */
	/* WaDisablePartialResolveInVc:skl,bxt,kbl,cfl */
	WA_SET_BIT_MASKED(CACHE_MODE_1,
			  GEN8_4x4_STC_OPTIMIZATION_DISABLE |
			  GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE);

	/* WaCcsTlbPrefetchDisable:skl,bxt,kbl,glk,cfl */
	WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
			  GEN9_CCS_TLB_PREFETCH_ENABLE);

	/* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl,cfl */
	WA_SET_BIT_MASKED(HDC_CHICKEN0,
			  HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
			  HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);

	/* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
	 * both tied to WaForceContextSaveRestoreNonCoherent
	 * in some hsds for skl. We keep the tie for all gen9. The
	 * documentation is a bit hazy and so we want to get common behaviour,
	 * even though there is no clear evidence we would need both on kbl/bxt.
	 * This area has been source of system hangs so we play it safe
	 * and mimic the skl regardless of what bspec says.
	 *
	 * Use Force Non-Coherent whenever executing a 3D context. This
	 * is a workaround for a possible hang in the unlikely event
	 * a TLB invalidation occurs during a PSD flush.
	 */

	/* WaForceEnableNonCoherent:skl,bxt,kbl,cfl */
	WA_SET_BIT_MASKED(HDC_CHICKEN0,
			  HDC_FORCE_NON_COHERENT);

	/* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */
347
	if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) || IS_COFFEELAKE(i915))
348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372
		WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
				  GEN8_SAMPLER_POWER_BYPASS_DIS);

	/* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk,cfl */
	WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);

	/*
	 * Supporting preemption with fine-granularity requires changes in the
	 * batch buffer programming. Since we can't break old userspace, we
	 * need to set our default preemption level to safe value. Userspace is
	 * still able to use more fine-grained preemption levels, since in
	 * WaEnablePreemptionGranularityControlByUMD we're whitelisting the
	 * per-ctx register. As such, WaDisable{3D,GPGPU}MidCmdPreemption are
	 * not real HW workarounds, but merely a way to start using preemption
	 * while maintaining old contract with userspace.
	 */

	/* WaDisable3DMidCmdPreemption:skl,bxt,glk,cfl,[cnl] */
	WA_CLR_BIT_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);

	/* WaDisableGPGPUMidCmdPreemption:skl,bxt,blk,cfl,[cnl] */
	WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
			    GEN9_PREEMPT_GPGPU_LEVEL_MASK,
			    GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);

373
	/* WaClearHIZ_WM_CHICKEN3:bxt,glk */
374
	if (IS_GEN9_LP(i915))
375
		WA_SET_BIT_MASKED(GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ);
376 377
}

378 379
static void skl_tune_iz_hashing(struct intel_engine_cs *engine,
				struct i915_wa_list *wal)
380
{
381
	struct drm_i915_private *i915 = engine->i915;
382 383 384 385 386 387 388 389 390 391
	u8 vals[3] = { 0, 0, 0 };
	unsigned int i;

	for (i = 0; i < 3; i++) {
		u8 ss;

		/*
		 * Only consider slices where one, and only one, subslice has 7
		 * EUs
		 */
392
		if (!is_power_of_2(RUNTIME_INFO(i915)->sseu.subslice_7eu[i]))
393 394 395 396 397 398 399 400
			continue;

		/*
		 * subslice_7eu[i] != 0 (because of the check above) and
		 * ss_max == 4 (maximum number of subslices possible per slice)
		 *
		 * ->    0 <= ss <= 3;
		 */
401
		ss = ffs(RUNTIME_INFO(i915)->sseu.subslice_7eu[i]) - 1;
402 403 404 405
		vals[i] = 3 - ss;
	}

	if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
406
		return;
407 408 409 410 411 412 413 414 415 416 417

	/* Tune IZ hashing. See intel_device_info_runtime_init() */
	WA_SET_FIELD_MASKED(GEN7_GT_MODE,
			    GEN9_IZ_HASHING_MASK(2) |
			    GEN9_IZ_HASHING_MASK(1) |
			    GEN9_IZ_HASHING_MASK(0),
			    GEN9_IZ_HASHING(2, vals[2]) |
			    GEN9_IZ_HASHING(1, vals[1]) |
			    GEN9_IZ_HASHING(0, vals[0]));
}

418 419
static void skl_ctx_workarounds_init(struct intel_engine_cs *engine,
				     struct i915_wa_list *wal)
420
{
421 422
	gen9_ctx_workarounds_init(engine, wal);
	skl_tune_iz_hashing(engine, wal);
423
}
424

425 426
static void bxt_ctx_workarounds_init(struct intel_engine_cs *engine,
				     struct i915_wa_list *wal)
427
{
428
	gen9_ctx_workarounds_init(engine, wal);
429

430 431 432 433 434 435 436
	/* WaDisableThreadStallDopClockGating:bxt */
	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
			  STALL_DOP_GATING_DISABLE);

	/* WaToEnableHwFixForPushConstHWBug:bxt */
	WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
			  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
437 438
}

439 440
static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine,
				     struct i915_wa_list *wal)
441
{
442
	struct drm_i915_private *i915 = engine->i915;
443

444
	gen9_ctx_workarounds_init(engine, wal);
445

446
	/* WaToEnableHwFixForPushConstHWBug:kbl */
447
	if (IS_KBL_REVID(i915, KBL_REVID_C0, REVID_FOREVER))
448 449
		WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
				  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
450

451 452 453 454 455
	/* WaDisableSbeCacheDispatchPortSharing:kbl */
	WA_SET_BIT_MASKED(GEN7_HALF_SLICE_CHICKEN1,
			  GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
}

456 457
static void glk_ctx_workarounds_init(struct intel_engine_cs *engine,
				     struct i915_wa_list *wal)
458
{
459
	gen9_ctx_workarounds_init(engine, wal);
460 461

	/* WaToEnableHwFixForPushConstHWBug:glk */
462 463 464 465
	WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
			  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
}

466 467
static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine,
				     struct i915_wa_list *wal)
468
{
469
	gen9_ctx_workarounds_init(engine, wal);
470 471 472 473

	/* WaToEnableHwFixForPushConstHWBug:cfl */
	WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
			  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
474

475 476 477 478 479
	/* WaDisableSbeCacheDispatchPortSharing:cfl */
	WA_SET_BIT_MASKED(GEN7_HALF_SLICE_CHICKEN1,
			  GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
}

480 481
static void cnl_ctx_workarounds_init(struct intel_engine_cs *engine,
				     struct i915_wa_list *wal)
482
{
483 484
	struct drm_i915_private *i915 = engine->i915;

485 486 487 488 489
	/* WaForceContextSaveRestoreNonCoherent:cnl */
	WA_SET_BIT_MASKED(CNL_HDC_CHICKEN0,
			  HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT);

	/* WaThrottleEUPerfToAvoidTDBackPressure:cnl(pre-prod) */
490
	if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0))
491 492 493 494 495 496 497
		WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, THROTTLE_12_5);

	/* WaDisableReplayBufferBankArbitrationOptimization:cnl */
	WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
			  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);

	/* WaDisableEnhancedSBEVertexCaching:cnl (pre-prod) */
498
	if (IS_CNL_REVID(i915, 0, CNL_REVID_B0))
499 500 501 502 503 504
		WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
				  GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE);

	/* WaPushConstantDereferenceHoldDisable:cnl */
	WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, PUSH_CONSTANT_DEREF_DISABLE);

505
	/* FtrEnableFastAnisoL1BankingFix:cnl */
506 507 508 509 510 511 512 513 514 515 516 517 518 519
	WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, CNL_FAST_ANISO_L1_BANKING_FIX);

	/* WaDisable3DMidCmdPreemption:cnl */
	WA_CLR_BIT_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);

	/* WaDisableGPGPUMidCmdPreemption:cnl */
	WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
			    GEN9_PREEMPT_GPGPU_LEVEL_MASK,
			    GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);

	/* WaDisableEarlyEOT:cnl */
	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, DISABLE_EARLY_EOT);
}

520 521
static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
				     struct i915_wa_list *wal)
522
{
523 524
	struct drm_i915_private *i915 = engine->i915;

525 526 527 528 529 530
	/* WaDisableBankHangMode:icl */
	wa_write(wal,
		 GEN8_L3CNTLREG,
		 intel_uncore_read(engine->uncore, GEN8_L3CNTLREG) |
		 GEN8_ERRDETBCTRL);

531 532 533
	/* Wa_1604370585:icl (pre-prod)
	 * Formerly known as WaPushConstantDereferenceHoldDisable
	 */
534
	if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
535 536 537 538 539 540 541 542 543 544 545 546
		WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
				  PUSH_CONSTANT_DEREF_DISABLE);

	/* WaForceEnableNonCoherent:icl
	 * This is not the same workaround as in early Gen9 platforms, where
	 * lacking this could cause system hangs, but coherency performance
	 * overhead is high and only a few compute workloads really need it
	 * (the register is whitelisted in hardware now, so UMDs can opt in
	 * for coherency if they have a good reason).
	 */
	WA_SET_BIT_MASKED(ICL_HDC_MODE, HDC_FORCE_NON_COHERENT);

547 548 549
	/* Wa_2006611047:icl (pre-prod)
	 * Formerly known as WaDisableImprovedTdlClkGating
	 */
550
	if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
551 552 553
		WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
				  GEN11_TDL_CLOCK_GATING_FIX_DISABLE);

O
Oscar Mateo 已提交
554
	/* Wa_2006665173:icl (pre-prod) */
555
	if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
O
Oscar Mateo 已提交
556 557
		WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
				  GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC);
558 559 560 561 562 563

	/* WaEnableFloatBlendOptimization:icl */
	wa_write_masked_or(wal,
			   GEN10_CACHE_MODE_SS,
			   0, /* write-only, so skip validation */
			   _MASKED_BIT_ENABLE(FLOAT_BLEND_OPTIMIZATION_ENABLE));
564 565 566 567 568

	/* WaDisableGPGPUMidThreadPreemption:icl */
	WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
			    GEN9_PREEMPT_GPGPU_LEVEL_MASK,
			    GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
569 570 571 572

	/* allow headerless messages for preemptible GPGPU context */
	WA_SET_BIT_MASKED(GEN10_SAMPLER_MODE,
			  GEN11_SAMPLER_ENABLE_HEADLESS_MSG);
573 574
}

575 576 577
static void tgl_ctx_workarounds_init(struct intel_engine_cs *engine,
				     struct i915_wa_list *wal)
{
578 579
	u32 val;

580
	/* Wa_1409142259:tgl */
581 582
	WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
			  GEN12_DISABLE_CPS_AWARE_COLOR_PIPE);
583 584 585 586 587 588 589 590 591 592 593 594 595

	/* Wa_1604555607:tgl */
	val = intel_uncore_read(engine->uncore, FF_MODE2);
	val &= ~FF_MODE2_TDS_TIMER_MASK;
	val |= FF_MODE2_TDS_TIMER_128;
	/*
	 * FIXME: FF_MODE2 register is not readable till TGL B0. We can
	 * enable verification of WA from the later steppings, which enables
	 * the read of FF_MODE2.
	 */
	wa_add(wal, FF_MODE2, FF_MODE2_TDS_TIMER_MASK, val,
	       IS_TGL_REVID(engine->i915, TGL_REVID_A0, TGL_REVID_A0) ? 0 :
			    FF_MODE2_TDS_TIMER_MASK);
596 597
}

598 599 600 601
static void
__intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
			   struct i915_wa_list *wal,
			   const char *name)
602
{
603 604
	struct drm_i915_private *i915 = engine->i915;

605 606 607
	if (engine->class != RENDER_CLASS)
		return;

608
	wa_init_start(wal, name, engine->name);
609

610 611 612
	if (IS_GEN(i915, 12))
		tgl_ctx_workarounds_init(engine, wal);
	else if (IS_GEN(i915, 11))
613
		icl_ctx_workarounds_init(engine, wal);
614
	else if (IS_CANNONLAKE(i915))
615
		cnl_ctx_workarounds_init(engine, wal);
616
	else if (IS_COFFEELAKE(i915))
617
		cfl_ctx_workarounds_init(engine, wal);
618
	else if (IS_GEMINILAKE(i915))
619
		glk_ctx_workarounds_init(engine, wal);
620
	else if (IS_KABYLAKE(i915))
621
		kbl_ctx_workarounds_init(engine, wal);
622
	else if (IS_BROXTON(i915))
623
		bxt_ctx_workarounds_init(engine, wal);
624
	else if (IS_SKYLAKE(i915))
625
		skl_ctx_workarounds_init(engine, wal);
626
	else if (IS_CHERRYVIEW(i915))
627
		chv_ctx_workarounds_init(engine, wal);
628
	else if (IS_BROADWELL(i915))
629
		bdw_ctx_workarounds_init(engine, wal);
630 631
	else if (INTEL_GEN(i915) < 8)
		return;
632
	else
633
		MISSING_CASE(INTEL_GEN(i915));
634

635
	wa_init_finish(wal);
636 637
}

638 639 640 641 642
void intel_engine_init_ctx_wa(struct intel_engine_cs *engine)
{
	__intel_engine_init_ctx_wa(engine, &engine->ctx_wa_list, "context");
}

643
int intel_engine_emit_ctx_wa(struct i915_request *rq)
644
{
645 646 647
	struct i915_wa_list *wal = &rq->engine->ctx_wa_list;
	struct i915_wa *wa;
	unsigned int i;
648
	u32 *cs;
649
	int ret;
650

651
	if (wal->count == 0)
652 653 654
		return 0;

	ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
655 656 657
	if (ret)
		return ret;

658
	cs = intel_ring_begin(rq, (wal->count * 2 + 2));
659 660 661
	if (IS_ERR(cs))
		return PTR_ERR(cs);

662 663 664 665
	*cs++ = MI_LOAD_REGISTER_IMM(wal->count);
	for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
		*cs++ = i915_mmio_reg_offset(wa->reg);
		*cs++ = wa->val;
666 667 668 669 670 671 672 673 674 675 676 677
	}
	*cs++ = MI_NOOP;

	intel_ring_advance(rq, cs);

	ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
	if (ret)
		return ret;

	return 0;
}

678 679
static void
gen9_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
680
{
681
	/* WaDisableKillLogic:bxt,skl,kbl */
682 683 684 685
	if (!IS_COFFEELAKE(i915))
		wa_write_or(wal,
			    GAM_ECOCHK,
			    ECOCHK_DIS_TLB);
686

687
	if (HAS_LLC(i915)) {
688 689 690 691 692
		/* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
		 *
		 * Must match Display Engine. See
		 * WaCompressedResourceDisplayNewHashMode.
		 */
693 694 695
		wa_write_or(wal,
			    MMCD_MISC_CTRL,
			    MMCD_PCLA | MMCD_HOTSPOT_EN);
696 697 698
	}

	/* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */
699 700 701
	wa_write_or(wal,
		    GAM_ECOCHK,
		    BDW_DISABLE_HDC_INVALIDATION);
702 703
}

704 705
static void
skl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
706
{
707
	gen9_gt_workarounds_init(i915, wal);
708 709

	/* WaDisableGafsUnitClkGating:skl */
710 711 712
	wa_write_or(wal,
		    GEN7_UCGCTL4,
		    GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
713 714

	/* WaInPlaceDecompressionHang:skl */
715 716 717 718
	if (IS_SKL_REVID(i915, SKL_REVID_H0, REVID_FOREVER))
		wa_write_or(wal,
			    GEN9_GAMT_ECO_REG_RW_IA,
			    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
719 720
}

721 722
static void
bxt_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
723
{
724
	gen9_gt_workarounds_init(i915, wal);
725 726

	/* WaInPlaceDecompressionHang:bxt */
727 728 729
	wa_write_or(wal,
		    GEN9_GAMT_ECO_REG_RW_IA,
		    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
730 731
}

732 733
static void
kbl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
734
{
735
	gen9_gt_workarounds_init(i915, wal);
736

737
	/* WaDisableDynamicCreditSharing:kbl */
738 739 740 741
	if (IS_KBL_REVID(i915, 0, KBL_REVID_B0))
		wa_write_or(wal,
			    GAMT_CHKN_BIT_REG,
			    GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
742

743
	/* WaDisableGafsUnitClkGating:kbl */
744 745 746
	wa_write_or(wal,
		    GEN7_UCGCTL4,
		    GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
747

748
	/* WaInPlaceDecompressionHang:kbl */
749 750 751
	wa_write_or(wal,
		    GEN9_GAMT_ECO_REG_RW_IA,
		    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
752
}
753

754 755
static void
glk_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
756
{
757
	gen9_gt_workarounds_init(i915, wal);
758 759
}

760 761
static void
cfl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
762
{
763
	gen9_gt_workarounds_init(i915, wal);
764 765

	/* WaDisableGafsUnitClkGating:cfl */
766 767 768
	wa_write_or(wal,
		    GEN7_UCGCTL4,
		    GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
769

770
	/* WaInPlaceDecompressionHang:cfl */
771 772 773
	wa_write_or(wal,
		    GEN9_GAMT_ECO_REG_RW_IA,
		    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
774
}
775

776
static void
777
wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
778
{
779
	const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
780 781 782 783
	unsigned int slice, subslice;
	u32 l3_en, mcr, mcr_mask;

	GEM_BUG_ON(INTEL_GEN(i915) < 10);
784

785 786 787 788 789 790
	/*
	 * WaProgramMgsrForL3BankSpecificMmioReads: cnl,icl
	 * L3Banks could be fused off in single slice scenario. If that is
	 * the case, we might need to program MCR select to a valid L3Bank
	 * by default, to make sure we correctly read certain registers
	 * later on (in the range 0xB100 - 0xB3FF).
791
	 *
792
	 * WaProgramMgsrForCorrectSliceSpecificMmioReads:cnl,icl
793 794 795 796 797 798 799 800
	 * Before any MMIO read into slice/subslice specific registers, MCR
	 * packet control register needs to be programmed to point to any
	 * enabled s/ss pair. Otherwise, incorrect values will be returned.
	 * This means each subsequent MMIO read will be forwarded to an
	 * specific s/ss combination, but this is OK since these registers
	 * are consistent across s/ss in almost all cases. In the rare
	 * occasions, such as INSTDONE, where this value is dependent
	 * on s/ss combo, the read should be done with read_subslice_reg.
801 802 803 804 805 806 807 808 809 810
	 *
	 * Since GEN8_MCR_SELECTOR contains dual-purpose bits which select both
	 * to which subslice, or to which L3 bank, the respective mmio reads
	 * will go, we have to find a common index which works for both
	 * accesses.
	 *
	 * Case where we cannot find a common index fortunately should not
	 * happen in production hardware, so we only emit a warning instead of
	 * implementing something more complex that requires checking the range
	 * of every MMIO read.
811
	 */
812 813 814 815 816 817 818 819 820 821 822 823 824

	if (INTEL_GEN(i915) >= 10 && is_power_of_2(sseu->slice_mask)) {
		u32 l3_fuse =
			intel_uncore_read(&i915->uncore, GEN10_MIRROR_FUSE3) &
			GEN10_L3BANK_MASK;

		DRM_DEBUG_DRIVER("L3 fuse = %x\n", l3_fuse);
		l3_en = ~(l3_fuse << GEN10_L3BANK_PAIR_COUNT | l3_fuse);
	} else {
		l3_en = ~0;
	}

	slice = fls(sseu->slice_mask) - 1;
S
Stuart Summers 已提交
825
	subslice = fls(l3_en & intel_sseu_get_subslices(sseu, slice));
826 827
	if (!subslice) {
		DRM_WARN("No common index found between subslice mask %x and L3 bank mask %x!\n",
S
Stuart Summers 已提交
828
			 intel_sseu_get_subslices(sseu, slice), l3_en);
829
		subslice = fls(l3_en);
830
		drm_WARN_ON(&i915->drm, !subslice);
831 832 833 834 835 836 837 838 839 840 841 842 843 844
	}
	subslice--;

	if (INTEL_GEN(i915) >= 11) {
		mcr = GEN11_MCR_SLICE(slice) | GEN11_MCR_SUBSLICE(subslice);
		mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK;
	} else {
		mcr = GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
		mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK;
	}

	DRM_DEBUG_DRIVER("MCR slice/subslice = %x\n", mcr);

	wa_write_masked_or(wal, GEN8_MCR_SELECTOR, mcr_mask, mcr);
845 846
}

847 848
static void
cnl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
849
{
850
	wa_init_mcr(i915, wal);
851

852
	/* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */
853 854 855 856
	if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0))
		wa_write_or(wal,
			    GAMT_CHKN_BIT_REG,
			    GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT);
857 858

	/* WaInPlaceDecompressionHang:cnl */
859 860 861
	wa_write_or(wal,
		    GEN9_GAMT_ECO_REG_RW_IA,
		    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
862 863
}

864 865
static void
icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
866
{
867
	wa_init_mcr(i915, wal);
868

869
	/* WaInPlaceDecompressionHang:icl */
870 871 872
	wa_write_or(wal,
		    GEN9_GAMT_ECO_REG_RW_IA,
		    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
873

874
	/* WaModifyGamTlbPartitioning:icl */
875 876 877 878
	wa_write_masked_or(wal,
			   GEN11_GACB_PERF_CTRL,
			   GEN11_HASH_CTRL_MASK,
			   GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
O
Oscar Mateo 已提交
879

O
Oscar Mateo 已提交
880 881 882
	/* Wa_1405766107:icl
	 * Formerly known as WaCL2SFHalfMaxAlloc
	 */
883 884 885 886
	wa_write_or(wal,
		    GEN11_LSN_UNSLCVC,
		    GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC |
		    GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC);
O
Oscar Mateo 已提交
887 888 889 890

	/* Wa_220166154:icl
	 * Formerly known as WaDisCtxReload
	 */
891 892 893
	wa_write_or(wal,
		    GEN8_GAMW_ECO_DEV_RW_IA,
		    GAMW_ECO_DEV_CTX_RELOAD_DISABLE);
O
Oscar Mateo 已提交
894 895

	/* Wa_1405779004:icl (pre-prod) */
896 897 898 899
	if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
		wa_write_or(wal,
			    SLICE_UNIT_LEVEL_CLKGATE,
			    MSCUNIT_CLKGATE_DIS);
O
Oscar Mateo 已提交
900 901

	/* Wa_1406680159:icl */
902 903 904
	wa_write_or(wal,
		    SUBSLICE_UNIT_LEVEL_CLKGATE,
		    GWUNIT_CLKGATE_DIS);
O
Oscar Mateo 已提交
905

O
Oscar Mateo 已提交
906
	/* Wa_1406838659:icl (pre-prod) */
907 908 909 910
	if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
		wa_write_or(wal,
			    INF_UNIT_LEVEL_CLKGATE,
			    CGPSF_CLKGATE_DIS);
911

O
Oscar Mateo 已提交
912 913 914
	/* Wa_1406463099:icl
	 * Formerly known as WaGamTlbPendError
	 */
915 916 917
	wa_write_or(wal,
		    GAMT_CHKN_BIT_REG,
		    GAMT_CHKN_DISABLE_L3_COH_PIPE);
M
Mika Kuoppala 已提交
918 919 920 921 922

	/* Wa_1607087056:icl */
	wa_write_or(wal,
		    SLICE_UNIT_LEVEL_CLKGATE,
		    L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
923 924
}

925 926 927
static void
tgl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
M
Mika Kuoppala 已提交
928 929 930 931 932
	/* Wa_1409420604:tgl */
	if (IS_TGL_REVID(i915, TGL_REVID_A0, TGL_REVID_A0))
		wa_write_or(wal,
			    SUBSLICE_UNIT_LEVEL_CLKGATE2,
			    CPSSUNIT_CLKGATE_DIS);
M
Mika Kuoppala 已提交
933 934 935 936 937 938

	/* Wa_1409180338:tgl */
	if (IS_TGL_REVID(i915, TGL_REVID_A0, TGL_REVID_A0))
		wa_write_or(wal,
			    SLICE_UNIT_LEVEL_CLKGATE,
			    L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
939 940
}

941 942
static void
gt_init_workarounds(struct drm_i915_private *i915, struct i915_wa_list *wal)
943
{
944 945 946
	if (IS_GEN(i915, 12))
		tgl_gt_workarounds_init(i915, wal);
	else if (IS_GEN(i915, 11))
947
		icl_gt_workarounds_init(i915, wal);
948
	else if (IS_CANNONLAKE(i915))
949
		cnl_gt_workarounds_init(i915, wal);
950 951 952 953 954 955 956 957 958 959 960 961
	else if (IS_COFFEELAKE(i915))
		cfl_gt_workarounds_init(i915, wal);
	else if (IS_GEMINILAKE(i915))
		glk_gt_workarounds_init(i915, wal);
	else if (IS_KABYLAKE(i915))
		kbl_gt_workarounds_init(i915, wal);
	else if (IS_BROXTON(i915))
		bxt_gt_workarounds_init(i915, wal);
	else if (IS_SKYLAKE(i915))
		skl_gt_workarounds_init(i915, wal);
	else if (INTEL_GEN(i915) <= 8)
		return;
962
	else
963
		MISSING_CASE(INTEL_GEN(i915));
964 965 966 967 968
}

void intel_gt_init_workarounds(struct drm_i915_private *i915)
{
	struct i915_wa_list *wal = &i915->gt_wa_list;
969

970
	wa_init_start(wal, "GT", "global");
971
	gt_init_workarounds(i915, wal);
972 973 974 975
	wa_init_finish(wal);
}

static enum forcewake_domains
976
wal_get_fw_for_rmw(struct intel_uncore *uncore, const struct i915_wa_list *wal)
977 978 979 980 981 982
{
	enum forcewake_domains fw = 0;
	struct i915_wa *wa;
	unsigned int i;

	for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
983
		fw |= intel_uncore_forcewake_for_reg(uncore,
984 985 986 987 988 989 990
						     wa->reg,
						     FW_REG_READ |
						     FW_REG_WRITE);

	return fw;
}

991 992 993
static bool
wa_verify(const struct i915_wa *wa, u32 cur, const char *name, const char *from)
{
994
	if ((cur ^ wa->val) & wa->read) {
995
		DRM_ERROR("%s workaround lost on %s! (%x=%x/%x, expected %x, mask=%x)\n",
996 997 998
			  name, from, i915_mmio_reg_offset(wa->reg),
			  cur, cur & wa->read,
			  wa->val, wa->mask);
999 1000 1001 1002 1003 1004 1005

		return false;
	}

	return true;
}

1006
static void
1007
wa_list_apply(struct intel_uncore *uncore, const struct i915_wa_list *wal)
1008 1009 1010 1011 1012 1013 1014 1015 1016
{
	enum forcewake_domains fw;
	unsigned long flags;
	struct i915_wa *wa;
	unsigned int i;

	if (!wal->count)
		return;

1017
	fw = wal_get_fw_for_rmw(uncore, wal);
1018

1019 1020
	spin_lock_irqsave(&uncore->lock, flags);
	intel_uncore_forcewake_get__locked(uncore, fw);
1021 1022

	for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
1023
		intel_uncore_rmw_fw(uncore, wa->reg, wa->mask, wa->val);
1024 1025 1026 1027
		if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
			wa_verify(wa,
				  intel_uncore_read_fw(uncore, wa->reg),
				  wal->name, "application");
1028 1029
	}

1030 1031
	intel_uncore_forcewake_put__locked(uncore, fw);
	spin_unlock_irqrestore(&uncore->lock, flags);
1032 1033
}

1034
void intel_gt_apply_workarounds(struct intel_gt *gt)
1035
{
1036
	wa_list_apply(gt->uncore, &gt->i915->gt_wa_list);
1037 1038
}

1039
static bool wa_list_verify(struct intel_uncore *uncore,
1040 1041 1042 1043 1044 1045 1046 1047
			   const struct i915_wa_list *wal,
			   const char *from)
{
	struct i915_wa *wa;
	unsigned int i;
	bool ok = true;

	for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
1048 1049 1050
		ok &= wa_verify(wa,
				intel_uncore_read(uncore, wa->reg),
				wal->name, from);
1051 1052 1053 1054

	return ok;
}

1055
bool intel_gt_verify_workarounds(struct intel_gt *gt, const char *from)
1056
{
1057
	return wa_list_verify(gt->uncore, &gt->i915->gt_wa_list, from);
1058 1059
}

1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073
static inline bool is_nonpriv_flags_valid(u32 flags)
{
	/* Check only valid flag bits are set */
	if (flags & ~RING_FORCE_TO_NONPRIV_MASK_VALID)
		return false;

	/* NB: Only 3 out of 4 enum values are valid for access field */
	if ((flags & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
	    RING_FORCE_TO_NONPRIV_ACCESS_INVALID)
		return false;

	return true;
}

1074
static void
1075
whitelist_reg_ext(struct i915_wa_list *wal, i915_reg_t reg, u32 flags)
1076
{
1077 1078 1079
	struct i915_wa wa = {
		.reg = reg
	};
1080

1081 1082
	if (GEM_DEBUG_WARN_ON(wal->count >= RING_MAX_NONPRIV_SLOTS))
		return;
1083

1084 1085 1086
	if (GEM_DEBUG_WARN_ON(!is_nonpriv_flags_valid(flags)))
		return;

1087
	wa.reg.reg |= flags;
1088
	_wa_add(wal, &wa);
1089 1090
}

1091 1092 1093
static void
whitelist_reg(struct i915_wa_list *wal, i915_reg_t reg)
{
1094
	whitelist_reg_ext(wal, reg, RING_FORCE_TO_NONPRIV_ACCESS_RW);
1095 1096
}

1097
static void gen9_whitelist_build(struct i915_wa_list *w)
1098 1099
{
	/* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
1100
	whitelist_reg(w, GEN9_CTX_PREEMPT_REG);
1101 1102

	/* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */
1103
	whitelist_reg(w, GEN8_CS_CHICKEN1);
1104 1105

	/* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk,cfl */
1106
	whitelist_reg(w, GEN8_HDC_CHICKEN1);
1107 1108 1109

	/* WaSendPushConstantsFromMMIO:skl,bxt */
	whitelist_reg(w, COMMON_SLICE_CHICKEN2);
1110 1111
}

1112
static void skl_whitelist_build(struct intel_engine_cs *engine)
1113
{
1114 1115 1116 1117 1118
	struct i915_wa_list *w = &engine->whitelist;

	if (engine->class != RENDER_CLASS)
		return;

1119
	gen9_whitelist_build(w);
1120 1121

	/* WaDisableLSQCROPERFforOCL:skl */
1122
	whitelist_reg(w, GEN8_L3SQCREG4);
1123 1124
}

1125
static void bxt_whitelist_build(struct intel_engine_cs *engine)
1126
{
1127 1128 1129 1130
	if (engine->class != RENDER_CLASS)
		return;

	gen9_whitelist_build(&engine->whitelist);
1131 1132
}

1133
static void kbl_whitelist_build(struct intel_engine_cs *engine)
1134
{
1135 1136 1137 1138 1139
	struct i915_wa_list *w = &engine->whitelist;

	if (engine->class != RENDER_CLASS)
		return;

1140
	gen9_whitelist_build(w);
1141

1142
	/* WaDisableLSQCROPERFforOCL:kbl */
1143
	whitelist_reg(w, GEN8_L3SQCREG4);
1144 1145
}

1146
static void glk_whitelist_build(struct intel_engine_cs *engine)
1147
{
1148 1149 1150 1151 1152
	struct i915_wa_list *w = &engine->whitelist;

	if (engine->class != RENDER_CLASS)
		return;

1153
	gen9_whitelist_build(w);
1154

1155
	/* WA #0862: Userspace has to set "Barrier Mode" to avoid hangs. */
1156
	whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
1157
}
1158

1159
static void cfl_whitelist_build(struct intel_engine_cs *engine)
1160
{
1161 1162
	struct i915_wa_list *w = &engine->whitelist;

1163 1164 1165
	if (engine->class != RENDER_CLASS)
		return;

1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177
	gen9_whitelist_build(w);

	/*
	 * WaAllowPMDepthAndInvocationCountAccessFromUMD:cfl,whl,cml,aml
	 *
	 * This covers 4 register which are next to one another :
	 *   - PS_INVOCATION_COUNT
	 *   - PS_INVOCATION_COUNT_UDW
	 *   - PS_DEPTH_COUNT
	 *   - PS_DEPTH_COUNT_UDW
	 */
	whitelist_reg_ext(w, PS_INVOCATION_COUNT,
1178
			  RING_FORCE_TO_NONPRIV_ACCESS_RD |
1179
			  RING_FORCE_TO_NONPRIV_RANGE_4);
1180 1181
}

1182
static void cnl_whitelist_build(struct intel_engine_cs *engine)
1183
{
1184 1185 1186 1187 1188
	struct i915_wa_list *w = &engine->whitelist;

	if (engine->class != RENDER_CLASS)
		return;

1189
	/* WaEnablePreemptionGranularityControlByUMD:cnl */
1190 1191 1192
	whitelist_reg(w, GEN8_CS_CHICKEN1);
}

1193
static void icl_whitelist_build(struct intel_engine_cs *engine)
1194
{
1195 1196
	struct i915_wa_list *w = &engine->whitelist;

1197 1198 1199 1200 1201 1202 1203 1204 1205 1206
	switch (engine->class) {
	case RENDER_CLASS:
		/* WaAllowUMDToModifyHalfSliceChicken7:icl */
		whitelist_reg(w, GEN9_HALF_SLICE_CHICKEN7);

		/* WaAllowUMDToModifySamplerMode:icl */
		whitelist_reg(w, GEN10_SAMPLER_MODE);

		/* WaEnableStateCacheRedirectToCS:icl */
		whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217

		/*
		 * WaAllowPMDepthAndInvocationCountAccessFromUMD:icl
		 *
		 * This covers 4 register which are next to one another :
		 *   - PS_INVOCATION_COUNT
		 *   - PS_INVOCATION_COUNT_UDW
		 *   - PS_DEPTH_COUNT
		 *   - PS_DEPTH_COUNT_UDW
		 */
		whitelist_reg_ext(w, PS_INVOCATION_COUNT,
1218
				  RING_FORCE_TO_NONPRIV_ACCESS_RD |
1219
				  RING_FORCE_TO_NONPRIV_RANGE_4);
1220 1221 1222 1223 1224
		break;

	case VIDEO_DECODE_CLASS:
		/* hucStatusRegOffset */
		whitelist_reg_ext(w, _MMIO(0x2000 + engine->mmio_base),
1225
				  RING_FORCE_TO_NONPRIV_ACCESS_RD);
1226 1227
		/* hucUKernelHdrInfoRegOffset */
		whitelist_reg_ext(w, _MMIO(0x2014 + engine->mmio_base),
1228
				  RING_FORCE_TO_NONPRIV_ACCESS_RD);
1229 1230
		/* hucStatus2RegOffset */
		whitelist_reg_ext(w, _MMIO(0x23B0 + engine->mmio_base),
1231
				  RING_FORCE_TO_NONPRIV_ACCESS_RD);
1232 1233 1234 1235 1236
		break;

	default:
		break;
	}
1237 1238
}

1239 1240
static void tgl_whitelist_build(struct intel_engine_cs *engine)
{
1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260
	struct i915_wa_list *w = &engine->whitelist;

	switch (engine->class) {
	case RENDER_CLASS:
		/*
		 * WaAllowPMDepthAndInvocationCountAccessFromUMD:tgl
		 *
		 * This covers 4 registers which are next to one another :
		 *   - PS_INVOCATION_COUNT
		 *   - PS_INVOCATION_COUNT_UDW
		 *   - PS_DEPTH_COUNT
		 *   - PS_DEPTH_COUNT_UDW
		 */
		whitelist_reg_ext(w, PS_INVOCATION_COUNT,
				  RING_FORCE_TO_NONPRIV_ACCESS_RD |
				  RING_FORCE_TO_NONPRIV_RANGE_4);
		break;
	default:
		break;
	}
1261 1262
}

1263
void intel_engine_init_whitelist(struct intel_engine_cs *engine)
1264 1265
{
	struct drm_i915_private *i915 = engine->i915;
1266
	struct i915_wa_list *w = &engine->whitelist;
1267

1268
	wa_init_start(w, "whitelist", engine->name);
1269

1270 1271 1272
	if (IS_GEN(i915, 12))
		tgl_whitelist_build(engine);
	else if (IS_GEN(i915, 11))
1273
		icl_whitelist_build(engine);
1274
	else if (IS_CANNONLAKE(i915))
1275
		cnl_whitelist_build(engine);
1276
	else if (IS_COFFEELAKE(i915))
1277
		cfl_whitelist_build(engine);
1278
	else if (IS_GEMINILAKE(i915))
1279
		glk_whitelist_build(engine);
1280
	else if (IS_KABYLAKE(i915))
1281
		kbl_whitelist_build(engine);
1282
	else if (IS_BROXTON(i915))
1283
		bxt_whitelist_build(engine);
1284
	else if (IS_SKYLAKE(i915))
1285
		skl_whitelist_build(engine);
1286 1287
	else if (INTEL_GEN(i915) <= 8)
		return;
1288 1289
	else
		MISSING_CASE(INTEL_GEN(i915));
1290

1291
	wa_init_finish(w);
1292 1293
}

1294
void intel_engine_apply_whitelist(struct intel_engine_cs *engine)
1295
{
1296
	const struct i915_wa_list *wal = &engine->whitelist;
1297
	struct intel_uncore *uncore = engine->uncore;
1298
	const u32 base = engine->mmio_base;
1299
	struct i915_wa *wa;
1300 1301
	unsigned int i;

1302
	if (!wal->count)
1303
		return;
1304

1305
	for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
1306 1307 1308
		intel_uncore_write(uncore,
				   RING_FORCE_TO_NONPRIV(base, i),
				   i915_mmio_reg_offset(wa->reg));
1309

1310 1311
	/* And clear the rest just in case of garbage */
	for (; i < RING_MAX_NONPRIV_SLOTS; i++)
1312 1313 1314
		intel_uncore_write(uncore,
				   RING_FORCE_TO_NONPRIV(base, i),
				   i915_mmio_reg_offset(RING_NOPID(base)));
1315 1316
}

1317 1318
static void
rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
1319 1320 1321
{
	struct drm_i915_private *i915 = engine->i915;

1322 1323 1324 1325 1326
	if (IS_TGL_REVID(i915, TGL_REVID_A0, TGL_REVID_A0)) {
		/* Wa_1606700617:tgl */
		wa_masked_en(wal,
			     GEN9_CS_DEBUG_MODE1,
			     FF_DOP_CLOCK_GATE_DISABLE);
M
Mika Kuoppala 已提交
1327 1328 1329 1330 1331

		/* Wa_1607138336:tgl */
		wa_write_or(wal,
			    GEN9_CTX_PREEMPT_REG,
			    GEN12_DISABLE_POSH_BUSY_FF_DOP_CG);
1332 1333 1334 1335 1336 1337 1338 1339

		/* Wa_1607030317:tgl */
		/* Wa_1607186500:tgl */
		/* Wa_1607297627:tgl */
		wa_masked_en(wal,
			     GEN6_RC_SLEEP_PSMI_CONTROL,
			     GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE |
			     GEN8_RC_SEMA_IDLE_MSG_DISABLE);
R
Radhakrishna Sripada 已提交
1340 1341 1342 1343 1344 1345 1346 1347

		/*
		 * Wa_1606679103:tgl
		 * (see also Wa_1606682166:icl)
		 */
		wa_write_or(wal,
			    GEN7_SARCHKMD,
			    GEN7_DISABLE_SAMPLER_PREFETCH);
1348 1349
	}

1350
	if (IS_GEN(i915, 11)) {
1351 1352 1353 1354 1355 1356
		/* This is not an Wa. Enable for better image quality */
		wa_masked_en(wal,
			     _3D_CHICKEN3,
			     _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE);

		/* WaPipelineFlushCoherentLines:icl */
1357 1358 1359
		wa_write_or(wal,
			    GEN8_L3SQCREG4,
			    GEN8_LQSC_FLUSH_COHERENT_LINES);
1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385

		/*
		 * Wa_1405543622:icl
		 * Formerly known as WaGAPZPriorityScheme
		 */
		wa_write_or(wal,
			    GEN8_GARBCNTL,
			    GEN11_ARBITRATION_PRIO_ORDER_MASK);

		/*
		 * Wa_1604223664:icl
		 * Formerly known as WaL3BankAddressHashing
		 */
		wa_write_masked_or(wal,
				   GEN8_GARBCNTL,
				   GEN11_HASH_CTRL_EXCL_MASK,
				   GEN11_HASH_CTRL_EXCL_BIT0);
		wa_write_masked_or(wal,
				   GEN11_GLBLINVL,
				   GEN11_BANK_HASH_ADDR_EXCL_MASK,
				   GEN11_BANK_HASH_ADDR_EXCL_BIT0);

		/*
		 * Wa_1405733216:icl
		 * Formerly known as WaDisableCleanEvicts
		 */
1386 1387 1388
		wa_write_or(wal,
			    GEN8_L3SQCREG4,
			    GEN11_LQSC_CLEAN_EVICT_DISABLE);
1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400

		/* WaForwardProgressSoftReset:icl */
		wa_write_or(wal,
			    GEN10_SCRATCH_LNCF2,
			    PMFLUSHDONE_LNICRSDROP |
			    PMFLUSH_GAPL3UNBLOCK |
			    PMFLUSHDONE_LNEBLK);

		/* Wa_1406609255:icl (pre-prod) */
		if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
			wa_write_or(wal,
				    GEN7_SARCHKMD,
1401 1402 1403 1404 1405 1406
				    GEN7_DISABLE_DEMAND_PREFETCH);

		/* Wa_1606682166:icl */
		wa_write_or(wal,
			    GEN7_SARCHKMD,
			    GEN7_DISABLE_SAMPLER_PREFETCH);
T
Tvrtko Ursulin 已提交
1407 1408 1409 1410 1411 1412

		/* Wa_1409178092:icl */
		wa_write_masked_or(wal,
				   GEN11_SCRATCH2,
				   GEN11_COHERENT_PARTIAL_WRITE_MERGE_ENABLE,
				   0);
1413 1414
	}

1415 1416
	if (IS_GEN_RANGE(i915, 9, 11)) {
		/* FtrPerCtxtPreemptionGranularityControl:skl,bxt,kbl,cfl,cnl,icl */
1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435
		wa_masked_en(wal,
			     GEN7_FF_SLICE_CS_CHICKEN1,
			     GEN9_FFSC_PERCTX_PREEMPT_CTRL);
	}

	if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) || IS_COFFEELAKE(i915)) {
		/* WaEnableGapsTsvCreditFix:skl,kbl,cfl */
		wa_write_or(wal,
			    GEN8_GARBCNTL,
			    GEN9_GAPS_TSV_CREDIT_DISABLE);
	}

	if (IS_BROXTON(i915)) {
		/* WaDisablePooledEuLoadBalancingFix:bxt */
		wa_masked_en(wal,
			     FF_SLICE_CS_CHICKEN2,
			     GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
	}

1436
	if (IS_GEN(i915, 9)) {
1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461
		/* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
		wa_masked_en(wal,
			     GEN9_CSFE_CHICKEN1_RCS,
			     GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE);

		/* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
		wa_write_or(wal,
			    BDW_SCRATCH1,
			    GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);

		/* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
		if (IS_GEN9_LP(i915))
			wa_write_masked_or(wal,
					   GEN8_L3SQCREG1,
					   L3_PRIO_CREDITS_MASK,
					   L3_GENERAL_PRIO_CREDITS(62) |
					   L3_HIGH_PRIO_CREDITS(2));

		/* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
		wa_write_or(wal,
			    GEN8_L3SQCREG4,
			    GEN8_LQSC_FLUSH_COHERENT_LINES);
	}
}

1462 1463
static void
xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474
{
	struct drm_i915_private *i915 = engine->i915;

	/* WaKBLVECSSemaphoreWaitPoll:kbl */
	if (IS_KBL_REVID(i915, KBL_REVID_A0, KBL_REVID_E0)) {
		wa_write(wal,
			 RING_SEMA_WAIT_POLL(engine->mmio_base),
			 1);
	}
}

1475 1476 1477 1478 1479 1480
static void
engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal)
{
	if (I915_SELFTEST_ONLY(INTEL_GEN(engine->i915) < 8))
		return;

1481
	if (engine->class == RENDER_CLASS)
1482 1483 1484 1485 1486
		rcs_engine_wa_init(engine, wal);
	else
		xcs_engine_wa_init(engine, wal);
}

1487 1488 1489 1490
void intel_engine_init_workarounds(struct intel_engine_cs *engine)
{
	struct i915_wa_list *wal = &engine->wa_list;

1491
	if (INTEL_GEN(engine->i915) < 8)
1492 1493
		return;

1494
	wa_init_start(wal, "engine", engine->name);
1495
	engine_init_workarounds(engine, wal);
1496 1497 1498 1499 1500
	wa_init_finish(wal);
}

void intel_engine_apply_workarounds(struct intel_engine_cs *engine)
{
1501
	wa_list_apply(engine->uncore, &engine->wa_list);
1502 1503
}

1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536
static struct i915_vma *
create_scratch(struct i915_address_space *vm, int count)
{
	struct drm_i915_gem_object *obj;
	struct i915_vma *vma;
	unsigned int size;
	int err;

	size = round_up(count * sizeof(u32), PAGE_SIZE);
	obj = i915_gem_object_create_internal(vm->i915, size);
	if (IS_ERR(obj))
		return ERR_CAST(obj);

	i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);

	vma = i915_vma_instance(obj, vm, NULL);
	if (IS_ERR(vma)) {
		err = PTR_ERR(vma);
		goto err_obj;
	}

	err = i915_vma_pin(vma, 0, 0,
			   i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER);
	if (err)
		goto err_obj;

	return vma;

err_obj:
	i915_gem_object_put(obj);
	return ERR_PTR(err);
}

1537 1538 1539 1540 1541 1542 1543
static bool mcr_range(struct drm_i915_private *i915, u32 offset)
{
	/*
	 * Registers in this range are affected by the MCR selector
	 * which only controls CPU initiated MMIO. Routing does not
	 * work for CS access so we cannot verify them on this path.
	 */
1544
	if (INTEL_GEN(i915) >= 8 && (offset >= 0xb000 && offset <= 0xb4ff))
1545 1546 1547 1548 1549
		return true;

	return false;
}

1550 1551 1552 1553 1554
static int
wa_list_srm(struct i915_request *rq,
	    const struct i915_wa_list *wal,
	    struct i915_vma *vma)
{
1555 1556
	struct drm_i915_private *i915 = rq->i915;
	unsigned int i, count = 0;
1557 1558 1559 1560
	const struct i915_wa *wa;
	u32 srm, *cs;

	srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
1561
	if (INTEL_GEN(i915) >= 8)
1562 1563
		srm++;

1564 1565 1566 1567 1568 1569
	for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
		if (!mcr_range(i915, i915_mmio_reg_offset(wa->reg)))
			count++;
	}

	cs = intel_ring_begin(rq, 4 * count);
1570 1571 1572 1573
	if (IS_ERR(cs))
		return PTR_ERR(cs);

	for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
1574 1575 1576 1577 1578
		u32 offset = i915_mmio_reg_offset(wa->reg);

		if (mcr_range(i915, offset))
			continue;

1579
		*cs++ = srm;
1580
		*cs++ = offset;
1581 1582 1583 1584 1585 1586 1587 1588
		*cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
		*cs++ = 0;
	}
	intel_ring_advance(rq, cs);

	return 0;
}

1589
static int engine_wa_list_verify(struct intel_context *ce,
1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602
				 const struct i915_wa_list * const wal,
				 const char *from)
{
	const struct i915_wa *wa;
	struct i915_request *rq;
	struct i915_vma *vma;
	unsigned int i;
	u32 *results;
	int err;

	if (!wal->count)
		return 0;

1603
	vma = create_scratch(&ce->engine->gt->ggtt->vm, wal->count);
1604 1605 1606
	if (IS_ERR(vma))
		return PTR_ERR(vma);

1607
	intel_engine_pm_get(ce->engine);
1608
	rq = intel_context_create_request(ce);
1609
	intel_engine_pm_put(ce->engine);
1610 1611 1612 1613 1614 1615 1616 1617 1618
	if (IS_ERR(rq)) {
		err = PTR_ERR(rq);
		goto err_vma;
	}

	err = wa_list_srm(rq, wal, vma);
	if (err)
		goto err_vma;

1619
	i915_request_get(rq);
1620
	i915_request_add(rq);
1621
	if (i915_request_wait(rq, 0, HZ / 5) < 0) {
1622
		err = -ETIME;
1623
		goto err_rq;
1624 1625 1626 1627 1628
	}

	results = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
	if (IS_ERR(results)) {
		err = PTR_ERR(results);
1629
		goto err_rq;
1630 1631 1632
	}

	err = 0;
1633 1634 1635 1636
	for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
		if (mcr_range(rq->i915, i915_mmio_reg_offset(wa->reg)))
			continue;

1637 1638
		if (!wa_verify(wa, results[i], wal->name, from))
			err = -ENXIO;
1639
	}
1640 1641 1642

	i915_gem_object_unpin_map(vma->obj);

1643 1644
err_rq:
	i915_request_put(rq);
1645 1646 1647 1648 1649 1650 1651 1652 1653
err_vma:
	i915_vma_unpin(vma);
	i915_vma_put(vma);
	return err;
}

int intel_engine_verify_workarounds(struct intel_engine_cs *engine,
				    const char *from)
{
1654 1655 1656
	return engine_wa_list_verify(engine->kernel_context,
				     &engine->wa_list,
				     from);
1657 1658
}

1659
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1660
#include "selftest_workarounds.c"
1661
#endif