intel_workarounds.c 58.2 KB
Newer Older
C
Chris Wilson 已提交
1
// SPDX-License-Identifier: MIT
2 3 4 5 6
/*
 * Copyright © 2014-2018 Intel Corporation
 */

#include "i915_drv.h"
7
#include "intel_context.h"
8
#include "intel_engine_pm.h"
9
#include "intel_gpu_commands.h"
10
#include "intel_gt.h"
11
#include "intel_ring.h"
12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43
#include "intel_workarounds.h"

/**
 * DOC: Hardware workarounds
 *
 * This file is intended as a central place to implement most [1]_ of the
 * required workarounds for hardware to work as originally intended. They fall
 * in five basic categories depending on how/when they are applied:
 *
 * - Workarounds that touch registers that are saved/restored to/from the HW
 *   context image. The list is emitted (via Load Register Immediate commands)
 *   everytime a new context is created.
 * - GT workarounds. The list of these WAs is applied whenever these registers
 *   revert to default values (on GPU reset, suspend/resume [2]_, etc..).
 * - Display workarounds. The list is applied during display clock-gating
 *   initialization.
 * - Workarounds that whitelist a privileged register, so that UMDs can manage
 *   them directly. This is just a special case of a MMMIO workaround (as we
 *   write the list of these to/be-whitelisted registers to some special HW
 *   registers).
 * - Workaround batchbuffers, that get executed automatically by the hardware
 *   on every HW context restore.
 *
 * .. [1] Please notice that there are other WAs that, due to their nature,
 *    cannot be applied from a central place. Those are peppered around the rest
 *    of the code, as needed.
 *
 * .. [2] Technically, some registers are powercontext saved & restored, so they
 *    survive a suspend/resume. In practice, writing them again is not too
 *    costly and simplifies things. We can revisit this in the future.
 *
 * Layout
44
 * ~~~~~~
45 46 47 48 49 50 51 52 53 54
 *
 * Keep things in this file ordered by WA type, as per the above (context, GT,
 * display, register whitelist, batchbuffer). Then, inside each type, keep the
 * following order:
 *
 * - Infrastructure functions and macros
 * - WAs per platform in standard gen/chrono order
 * - Public functions to init or apply the given workaround type.
 */

55
static void wa_init_start(struct i915_wa_list *wal, const char *name, const char *engine_name)
56 57
{
	wal->name = name;
58
	wal->engine_name = engine_name;
59 60
}

61 62
#define WA_LIST_CHUNK (1 << 4)

63 64
static void wa_init_finish(struct i915_wa_list *wal)
{
65 66 67 68 69 70 71 72 73 74 75 76
	/* Trim unused entries. */
	if (!IS_ALIGNED(wal->count, WA_LIST_CHUNK)) {
		struct i915_wa *list = kmemdup(wal->list,
					       wal->count * sizeof(*list),
					       GFP_KERNEL);

		if (list) {
			kfree(wal->list);
			wal->list = list;
		}
	}

77 78 79
	if (!wal->count)
		return;

80 81
	DRM_DEBUG_DRIVER("Initialized %u %s workarounds on %s\n",
			 wal->wa_count, wal->name, wal->engine_name);
82 83
}

84
static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa)
85
{
86 87
	unsigned int addr = i915_mmio_reg_offset(wa->reg);
	unsigned int start = 0, end = wal->count;
88
	const unsigned int grow = WA_LIST_CHUNK;
89 90 91 92 93 94 95 96 97 98 99 100 101 102
	struct i915_wa *wa_;

	GEM_BUG_ON(!is_power_of_2(grow));

	if (IS_ALIGNED(wal->count, grow)) { /* Either uninitialized or full. */
		struct i915_wa *list;

		list = kmalloc_array(ALIGN(wal->count + 1, grow), sizeof(*wa),
				     GFP_KERNEL);
		if (!list) {
			DRM_ERROR("No space for workaround init!\n");
			return;
		}

103
		if (wal->list) {
104
			memcpy(list, wal->list, sizeof(*wa) * wal->count);
105 106
			kfree(wal->list);
		}
107 108 109

		wal->list = list;
	}
110 111 112 113

	while (start < end) {
		unsigned int mid = start + (end - start) / 2;

114
		if (i915_mmio_reg_offset(wal->list[mid].reg) < addr) {
115
			start = mid + 1;
116
		} else if (i915_mmio_reg_offset(wal->list[mid].reg) > addr) {
117 118
			end = mid;
		} else {
119
			wa_ = &wal->list[mid];
120

121 122
			if ((wa->clr | wa_->clr) && !(wa->clr & ~wa_->clr)) {
				DRM_ERROR("Discarding overwritten w/a for reg %04x (clear: %08x, set: %08x)\n",
123
					  i915_mmio_reg_offset(wa_->reg),
124
					  wa_->clr, wa_->set);
125

126
				wa_->set &= ~wa->clr;
127 128
			}

129
			wal->wa_count++;
130 131
			wa_->set |= wa->set;
			wa_->clr |= wa->clr;
132
			wa_->read |= wa->read;
133 134 135
			return;
		}
	}
136

137 138 139
	wal->wa_count++;
	wa_ = &wal->list[wal->count++];
	*wa_ = *wa;
140

141 142 143 144 145
	while (wa_-- > wal->list) {
		GEM_BUG_ON(i915_mmio_reg_offset(wa_[0].reg) ==
			   i915_mmio_reg_offset(wa_[1].reg));
		if (i915_mmio_reg_offset(wa_[1].reg) >
		    i915_mmio_reg_offset(wa_[0].reg))
146
			break;
147

148
		swap(wa_[1], wa_[0]);
149
	}
150 151
}

152 153
static void wa_add(struct i915_wa_list *wal, i915_reg_t reg,
		   u32 clear, u32 set, u32 read_mask)
154 155
{
	struct i915_wa wa = {
156
		.reg  = reg,
157 158
		.clr  = clear,
		.set  = set,
159
		.read = read_mask,
160 161 162 163 164
	};

	_wa_add(wal, &wa);
}

165
static void
166
wa_write_clr_set(struct i915_wa_list *wal, i915_reg_t reg, u32 clear, u32 set)
167
{
168
	wa_add(wal, reg, clear, set, clear);
169 170
}

171
static void
172 173
wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
{
174
	wa_write_clr_set(wal, reg, ~0, set);
175 176 177 178
}

static void
wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
179
{
180
	wa_write_clr_set(wal, reg, set, set);
181 182
}

183 184 185
static void
wa_write_clr(struct i915_wa_list *wal, i915_reg_t reg, u32 clr)
{
186
	wa_write_clr_set(wal, reg, clr, 0);
187 188
}

189 190 191 192 193 194 195 196 197 198 199
/*
 * WA operations on "masked register". A masked register has the upper 16 bits
 * documented as "masked" in b-spec. Its purpose is to allow writing to just a
 * portion of the register without a rmw: you simply write in the upper 16 bits
 * the mask of bits you are going to modify.
 *
 * The wa_masked_* family of functions already does the necessary operations to
 * calculate the mask based on the parameters passed, so user only has to
 * provide the lower 16 bits of that register.
 */

200
static void
201
wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
202
{
203
	wa_add(wal, reg, 0, _MASKED_BIT_ENABLE(val), val);
204 205 206
}

static void
207
wa_masked_dis(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
208
{
209
	wa_add(wal, reg, 0, _MASKED_BIT_DISABLE(val), val);
210 211
}

212 213 214 215
static void
wa_masked_field_set(struct i915_wa_list *wal, i915_reg_t reg,
		    u32 mask, u32 val)
{
216
	wa_add(wal, reg, 0, _MASKED_FIELD(mask, val), mask);
217
}
218

219 220 221
static void gen6_ctx_workarounds_init(struct intel_engine_cs *engine,
				      struct i915_wa_list *wal)
{
222
	wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
223 224 225 226 227
}

static void gen7_ctx_workarounds_init(struct intel_engine_cs *engine,
				      struct i915_wa_list *wal)
{
228
	wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
229 230
}

231 232
static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine,
				      struct i915_wa_list *wal)
233
{
234
	wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
235 236

	/* WaDisableAsyncFlipPerfMode:bdw,chv */
237
	wa_masked_en(wal, MI_MODE, ASYNC_FLIP_PERF_DISABLE);
238 239

	/* WaDisablePartialInstShootdown:bdw,chv */
240 241
	wa_masked_en(wal, GEN8_ROW_CHICKEN,
		     PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
242 243

	/* Use Force Non-Coherent whenever executing a 3D context. This is a
244
	 * workaround for a possible hang in the unlikely event a TLB
245 246 247 248
	 * invalidation occurs during a PSD flush.
	 */
	/* WaForceEnableNonCoherent:bdw,chv */
	/* WaHdcDisableFetchWhenMasked:bdw,chv */
249 250 251
	wa_masked_en(wal, HDC_CHICKEN0,
		     HDC_DONOT_FETCH_MEM_WHEN_MASKED |
		     HDC_FORCE_NON_COHERENT);
252 253 254 255 256 257 258 259 260

	/* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
	 * "The Hierarchical Z RAW Stall Optimization allows non-overlapping
	 *  polygons in the same 8x4 pixel/sample area to be processed without
	 *  stalling waiting for the earlier ones to write to Hierarchical Z
	 *  buffer."
	 *
	 * This optimization is off by default for BDW and CHV; turn it on.
	 */
261
	wa_masked_dis(wal, CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
262 263

	/* Wa4x4STCOptimizationDisable:bdw,chv */
264
	wa_masked_en(wal, CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
265 266 267 268 269 270 271 272 273

	/*
	 * BSpec recommends 8x4 when MSAA is used,
	 * however in practice 16x4 seems fastest.
	 *
	 * Note that PS/WM thread counts depend on the WIZ hashing
	 * disable bit, which we don't touch here, but it's good
	 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
	 */
274
	wa_masked_field_set(wal, GEN7_GT_MODE,
275 276 277 278
			    GEN6_WIZ_HASHING_MASK,
			    GEN6_WIZ_HASHING_16x4);
}

279 280
static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine,
				     struct i915_wa_list *wal)
281
{
282
	struct drm_i915_private *i915 = engine->i915;
283

284
	gen8_ctx_workarounds_init(engine, wal);
285 286

	/* WaDisableThreadStallDopClockGating:bdw (pre-production) */
287
	wa_masked_en(wal, GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
288 289 290

	/* WaDisableDopClockGating:bdw
	 *
291
	 * Also see the related UCGTCL1 write in bdw_init_clock_gating()
292 293
	 * to disable EUTC clock gating.
	 */
294 295
	wa_masked_en(wal, GEN7_ROW_CHICKEN2,
		     DOP_CLOCK_GATING_DISABLE);
296

297 298
	wa_masked_en(wal, HALF_SLICE_CHICKEN3,
		     GEN8_SAMPLER_POWER_BYPASS_DIS);
299

300 301 302 303 304
	wa_masked_en(wal, HDC_CHICKEN0,
		     /* WaForceContextSaveRestoreNonCoherent:bdw */
		     HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
		     /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
		     (IS_BDW_GT3(i915) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
305 306
}

307 308
static void chv_ctx_workarounds_init(struct intel_engine_cs *engine,
				     struct i915_wa_list *wal)
309
{
310
	gen8_ctx_workarounds_init(engine, wal);
311 312

	/* WaDisableThreadStallDopClockGating:chv */
313
	wa_masked_en(wal, GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
314 315

	/* Improve HiZ throughput on CHV. */
316
	wa_masked_en(wal, HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
317 318
}

319 320
static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine,
				      struct i915_wa_list *wal)
321
{
322 323 324
	struct drm_i915_private *i915 = engine->i915;

	if (HAS_LLC(i915)) {
325 326 327 328 329
		/* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
		 *
		 * Must match Display Engine. See
		 * WaCompressedResourceDisplayNewHashMode.
		 */
330 331 332 333
		wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
			     GEN9_PBE_COMPRESSED_HASH_SELECTION);
		wa_masked_en(wal, GEN9_HALF_SLICE_CHICKEN7,
			     GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR);
334 335 336 337
	}

	/* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */
	/* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */
338 339 340
	wa_masked_en(wal, GEN8_ROW_CHICKEN,
		     FLOW_CONTROL_ENABLE |
		     PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
341 342 343

	/* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */
	/* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */
344 345 346
	wa_masked_en(wal, GEN9_HALF_SLICE_CHICKEN7,
		     GEN9_ENABLE_YV12_BUGFIX |
		     GEN9_ENABLE_GPGPU_PREEMPTION);
347 348 349

	/* Wa4x4STCOptimizationDisable:skl,bxt,kbl,glk,cfl */
	/* WaDisablePartialResolveInVc:skl,bxt,kbl,cfl */
350 351 352
	wa_masked_en(wal, CACHE_MODE_1,
		     GEN8_4x4_STC_OPTIMIZATION_DISABLE |
		     GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE);
353 354

	/* WaCcsTlbPrefetchDisable:skl,bxt,kbl,glk,cfl */
355 356
	wa_masked_dis(wal, GEN9_HALF_SLICE_CHICKEN5,
		      GEN9_CCS_TLB_PREFETCH_ENABLE);
357 358

	/* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl,cfl */
359 360 361
	wa_masked_en(wal, HDC_CHICKEN0,
		     HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
		     HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
362 363 364 365 366 367 368 369 370 371 372 373 374 375 376

	/* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
	 * both tied to WaForceContextSaveRestoreNonCoherent
	 * in some hsds for skl. We keep the tie for all gen9. The
	 * documentation is a bit hazy and so we want to get common behaviour,
	 * even though there is no clear evidence we would need both on kbl/bxt.
	 * This area has been source of system hangs so we play it safe
	 * and mimic the skl regardless of what bspec says.
	 *
	 * Use Force Non-Coherent whenever executing a 3D context. This
	 * is a workaround for a possible hang in the unlikely event
	 * a TLB invalidation occurs during a PSD flush.
	 */

	/* WaForceEnableNonCoherent:skl,bxt,kbl,cfl */
377 378
	wa_masked_en(wal, HDC_CHICKEN0,
		     HDC_FORCE_NON_COHERENT);
379 380

	/* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */
381 382 383 384
	if (IS_SKYLAKE(i915) ||
	    IS_KABYLAKE(i915) ||
	    IS_COFFEELAKE(i915) ||
	    IS_COMETLAKE(i915))
385 386
		wa_masked_en(wal, HALF_SLICE_CHICKEN3,
			     GEN8_SAMPLER_POWER_BYPASS_DIS);
387 388

	/* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk,cfl */
389
	wa_masked_en(wal, HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
390 391 392 393 394 395 396 397 398 399 400 401 402

	/*
	 * Supporting preemption with fine-granularity requires changes in the
	 * batch buffer programming. Since we can't break old userspace, we
	 * need to set our default preemption level to safe value. Userspace is
	 * still able to use more fine-grained preemption levels, since in
	 * WaEnablePreemptionGranularityControlByUMD we're whitelisting the
	 * per-ctx register. As such, WaDisable{3D,GPGPU}MidCmdPreemption are
	 * not real HW workarounds, but merely a way to start using preemption
	 * while maintaining old contract with userspace.
	 */

	/* WaDisable3DMidCmdPreemption:skl,bxt,glk,cfl,[cnl] */
403
	wa_masked_dis(wal, GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
404 405

	/* WaDisableGPGPUMidCmdPreemption:skl,bxt,blk,cfl,[cnl] */
406
	wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
407 408 409
			    GEN9_PREEMPT_GPGPU_LEVEL_MASK,
			    GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);

410
	/* WaClearHIZ_WM_CHICKEN3:bxt,glk */
411
	if (IS_GEN9_LP(i915))
412
		wa_masked_en(wal, GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ);
413 414
}

415 416
static void skl_tune_iz_hashing(struct intel_engine_cs *engine,
				struct i915_wa_list *wal)
417
{
418
	struct intel_gt *gt = engine->gt;
419 420 421 422 423 424 425 426 427 428
	u8 vals[3] = { 0, 0, 0 };
	unsigned int i;

	for (i = 0; i < 3; i++) {
		u8 ss;

		/*
		 * Only consider slices where one, and only one, subslice has 7
		 * EUs
		 */
429
		if (!is_power_of_2(gt->info.sseu.subslice_7eu[i]))
430 431 432 433 434 435 436 437
			continue;

		/*
		 * subslice_7eu[i] != 0 (because of the check above) and
		 * ss_max == 4 (maximum number of subslices possible per slice)
		 *
		 * ->    0 <= ss <= 3;
		 */
438
		ss = ffs(gt->info.sseu.subslice_7eu[i]) - 1;
439 440 441 442
		vals[i] = 3 - ss;
	}

	if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
443
		return;
444 445

	/* Tune IZ hashing. See intel_device_info_runtime_init() */
446
	wa_masked_field_set(wal, GEN7_GT_MODE,
447 448 449 450 451 452 453 454
			    GEN9_IZ_HASHING_MASK(2) |
			    GEN9_IZ_HASHING_MASK(1) |
			    GEN9_IZ_HASHING_MASK(0),
			    GEN9_IZ_HASHING(2, vals[2]) |
			    GEN9_IZ_HASHING(1, vals[1]) |
			    GEN9_IZ_HASHING(0, vals[0]));
}

455 456
static void skl_ctx_workarounds_init(struct intel_engine_cs *engine,
				     struct i915_wa_list *wal)
457
{
458 459
	gen9_ctx_workarounds_init(engine, wal);
	skl_tune_iz_hashing(engine, wal);
460
}
461

462 463
static void bxt_ctx_workarounds_init(struct intel_engine_cs *engine,
				     struct i915_wa_list *wal)
464
{
465
	gen9_ctx_workarounds_init(engine, wal);
466

467
	/* WaDisableThreadStallDopClockGating:bxt */
468 469
	wa_masked_en(wal, GEN8_ROW_CHICKEN,
		     STALL_DOP_GATING_DISABLE);
470 471

	/* WaToEnableHwFixForPushConstHWBug:bxt */
472 473
	wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
		     GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
474 475
}

476 477
static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine,
				     struct i915_wa_list *wal)
478
{
479
	struct drm_i915_private *i915 = engine->i915;
480

481
	gen9_ctx_workarounds_init(engine, wal);
482

483
	/* WaToEnableHwFixForPushConstHWBug:kbl */
484
	if (IS_KBL_GT_STEP(i915, STEP_C0, STEP_FOREVER))
485 486
		wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
			     GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
487

488
	/* WaDisableSbeCacheDispatchPortSharing:kbl */
489 490
	wa_masked_en(wal, GEN7_HALF_SLICE_CHICKEN1,
		     GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
491 492
}

493 494
static void glk_ctx_workarounds_init(struct intel_engine_cs *engine,
				     struct i915_wa_list *wal)
495
{
496
	gen9_ctx_workarounds_init(engine, wal);
497 498

	/* WaToEnableHwFixForPushConstHWBug:glk */
499 500
	wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
		     GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
501 502
}

503 504
static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine,
				     struct i915_wa_list *wal)
505
{
506
	gen9_ctx_workarounds_init(engine, wal);
507 508

	/* WaToEnableHwFixForPushConstHWBug:cfl */
509 510
	wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
		     GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
511

512
	/* WaDisableSbeCacheDispatchPortSharing:cfl */
513 514
	wa_masked_en(wal, GEN7_HALF_SLICE_CHICKEN1,
		     GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
515 516
}

517 518
static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
				     struct i915_wa_list *wal)
519
{
520
	/* Wa_1406697149 (WaDisableBankHangMode:icl) */
521 522 523 524 525
	wa_write(wal,
		 GEN8_L3CNTLREG,
		 intel_uncore_read(engine->uncore, GEN8_L3CNTLREG) |
		 GEN8_ERRDETBCTRL);

526 527 528 529 530 531 532
	/* WaForceEnableNonCoherent:icl
	 * This is not the same workaround as in early Gen9 platforms, where
	 * lacking this could cause system hangs, but coherency performance
	 * overhead is high and only a few compute workloads really need it
	 * (the register is whitelisted in hardware now, so UMDs can opt in
	 * for coherency if they have a good reason).
	 */
533
	wa_masked_en(wal, ICL_HDC_MODE, HDC_FORCE_NON_COHERENT);
534

535
	/* WaEnableFloatBlendOptimization:icl */
536 537 538 539
	wa_write_clr_set(wal,
			 GEN10_CACHE_MODE_SS,
			 0, /* write-only, so skip validation */
			 _MASKED_BIT_ENABLE(FLOAT_BLEND_OPTIMIZATION_ENABLE));
540 541

	/* WaDisableGPGPUMidThreadPreemption:icl */
542
	wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
543 544
			    GEN9_PREEMPT_GPGPU_LEVEL_MASK,
			    GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
545 546

	/* allow headerless messages for preemptible GPGPU context */
547 548
	wa_masked_en(wal, GEN10_SAMPLER_MODE,
		     GEN11_SAMPLER_ENABLE_HEADLESS_MSG);
M
Matt Roper 已提交
549 550 551

	/* Wa_1604278689:icl,ehl */
	wa_write(wal, IVB_FBC_RT_BASE, 0xFFFFFFFF & ~ILK_FBC_RT_VALID);
552 553 554
	wa_write_clr_set(wal, IVB_FBC_RT_BASE_UPPER,
			 0, /* write-only register; skip validation */
			 0xFFFFFFFF);
M
Matt Roper 已提交
555 556 557

	/* Wa_1406306137:icl,ehl */
	wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN11_DIS_PICK_2ND_EU);
558 559
}

560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586
/*
 * These settings aren't actually workarounds, but general tuning settings that
 * need to be programmed on several platforms.
 */
static void gen12_ctx_gt_tuning_init(struct intel_engine_cs *engine,
				     struct i915_wa_list *wal)
{
	/*
	 * Although some platforms refer to it as Wa_1604555607, we need to
	 * program it even on those that don't explicitly list that
	 * workaround.
	 *
	 * Note that the programming of this register is further modified
	 * according to the FF_MODE2 guidance given by Wa_1608008084:gen12.
	 * Wa_1608008084 tells us the FF_MODE2 register will return the wrong
	 * value when read. The default value for this register is zero for all
	 * fields and there are no bit masks. So instead of doing a RMW we
	 * should just write TDS timer value. For the same reason read
	 * verification is ignored.
	 */
	wa_add(wal,
	       FF_MODE2,
	       FF_MODE2_TDS_TIMER_MASK,
	       FF_MODE2_TDS_TIMER_128,
	       0);
}

587 588
static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine,
				       struct i915_wa_list *wal)
589
{
590 591
	gen12_ctx_gt_tuning_init(engine, wal);

592
	/*
593 594 595 596 597 598 599 600 601 602
	 * Wa_1409142259:tgl,dg1,adl-p
	 * Wa_1409347922:tgl,dg1,adl-p
	 * Wa_1409252684:tgl,dg1,adl-p
	 * Wa_1409217633:tgl,dg1,adl-p
	 * Wa_1409207793:tgl,dg1,adl-p
	 * Wa_1409178076:tgl,dg1,adl-p
	 * Wa_1408979724:tgl,dg1,adl-p
	 * Wa_14010443199:tgl,rkl,dg1,adl-p
	 * Wa_14010698770:tgl,rkl,dg1,adl-s,adl-p
	 * Wa_1409342910:tgl,rkl,dg1,adl-s,adl-p
603
	 */
604 605
	wa_masked_en(wal, GEN11_COMMON_SLICE_CHICKEN3,
		     GEN12_DISABLE_CPS_AWARE_COLOR_PIPE);
606

607
	/* WaDisableGPGPUMidThreadPreemption:gen12 */
608
	wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
609 610 611
			    GEN9_PREEMPT_GPGPU_LEVEL_MASK,
			    GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);

612
	/*
613
	 * Wa_16011163337
614
	 *
615 616
	 * Like in gen12_ctx_gt_tuning_init(), read verification is ignored due
	 * to Wa_1608008084.
617
	 */
618 619
	wa_add(wal,
	       FF_MODE2,
620 621
	       FF_MODE2_GS_TIMER_MASK,
	       FF_MODE2_GS_TIMER_224,
622
	       0);
623 624 625 626 627 628 629

	/*
	 * Wa_14012131227:dg1
	 * Wa_1508744258:tgl,rkl,dg1,adl-s,adl-p
	 */
	wa_masked_en(wal, GEN7_COMMON_SLICE_CHICKEN1,
		     GEN9_RHWO_OPTIMIZATION_DISABLE);
630 631
}

632 633 634 635 636 637
static void dg1_ctx_workarounds_init(struct intel_engine_cs *engine,
				     struct i915_wa_list *wal)
{
	gen12_ctx_workarounds_init(engine, wal);

	/* Wa_1409044764 */
638 639
	wa_masked_dis(wal, GEN11_COMMON_SLICE_CHICKEN3,
		      DG1_FLOAT_POINT_BLEND_OPT_STRICT_MODE_EN);
640 641

	/* Wa_22010493298 */
642 643
	wa_masked_en(wal, HIZ_CHICKEN,
		     DG1_HZ_READ_SUPPRESSION_OPTIMIZATION_DISABLE);
644 645
}

646 647 648 649
static void
__intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
			   struct i915_wa_list *wal,
			   const char *name)
650
{
651 652
	struct drm_i915_private *i915 = engine->i915;

653 654 655
	if (engine->class != RENDER_CLASS)
		return;

656
	wa_init_start(wal, name, engine->name);
657

658 659
	if (IS_DG1(i915))
		dg1_ctx_workarounds_init(engine, wal);
660
	else if (GRAPHICS_VER(i915) == 12)
661
		gen12_ctx_workarounds_init(engine, wal);
662
	else if (GRAPHICS_VER(i915) == 11)
663
		icl_ctx_workarounds_init(engine, wal);
664
	else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915))
665
		cfl_ctx_workarounds_init(engine, wal);
666
	else if (IS_GEMINILAKE(i915))
667
		glk_ctx_workarounds_init(engine, wal);
668
	else if (IS_KABYLAKE(i915))
669
		kbl_ctx_workarounds_init(engine, wal);
670
	else if (IS_BROXTON(i915))
671
		bxt_ctx_workarounds_init(engine, wal);
672
	else if (IS_SKYLAKE(i915))
673
		skl_ctx_workarounds_init(engine, wal);
674
	else if (IS_CHERRYVIEW(i915))
675
		chv_ctx_workarounds_init(engine, wal);
676
	else if (IS_BROADWELL(i915))
677
		bdw_ctx_workarounds_init(engine, wal);
678
	else if (GRAPHICS_VER(i915) == 7)
679
		gen7_ctx_workarounds_init(engine, wal);
680
	else if (GRAPHICS_VER(i915) == 6)
681
		gen6_ctx_workarounds_init(engine, wal);
682
	else if (GRAPHICS_VER(i915) < 8)
683
		;
684
	else
685
		MISSING_CASE(GRAPHICS_VER(i915));
686

687
	wa_init_finish(wal);
688 689
}

690 691 692 693 694
void intel_engine_init_ctx_wa(struct intel_engine_cs *engine)
{
	__intel_engine_init_ctx_wa(engine, &engine->ctx_wa_list, "context");
}

695
int intel_engine_emit_ctx_wa(struct i915_request *rq)
696
{
697 698 699
	struct i915_wa_list *wal = &rq->engine->ctx_wa_list;
	struct i915_wa *wa;
	unsigned int i;
700
	u32 *cs;
701
	int ret;
702

703
	if (wal->count == 0)
704 705 706
		return 0;

	ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
707 708 709
	if (ret)
		return ret;

710
	cs = intel_ring_begin(rq, (wal->count * 2 + 2));
711 712 713
	if (IS_ERR(cs))
		return PTR_ERR(cs);

714 715 716
	*cs++ = MI_LOAD_REGISTER_IMM(wal->count);
	for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
		*cs++ = i915_mmio_reg_offset(wa->reg);
717
		*cs++ = wa->set;
718 719 720 721 722 723 724 725 726 727 728 729
	}
	*cs++ = MI_NOOP;

	intel_ring_advance(rq, cs);

	ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
	if (ret)
		return ret;

	return 0;
}

730
static void
731 732
gen4_gt_workarounds_init(struct drm_i915_private *i915,
			 struct i915_wa_list *wal)
733
{
734 735 736 737 738 739 740 741
	/* WaDisable_RenderCache_OperationalFlush:gen4,ilk */
	wa_masked_dis(wal, CACHE_MODE_0, RC_OP_FLUSH_ENABLE);
}

static void
g4x_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
	gen4_gt_workarounds_init(i915, wal);
742

743
	/* WaDisableRenderCachePipelinedFlush:g4x,ilk */
744
	wa_masked_en(wal, CACHE_MODE_0, CM0_PIPELINED_RENDER_FLUSH_DISABLE);
745
}
746

747 748 749 750 751 752
static void
ilk_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
	g4x_gt_workarounds_init(i915, wal);

	wa_masked_en(wal, _3D_CHICKEN2, _3D_CHICKEN2_WM_READ_PIPELINED);
753 754
}

755 756 757 758 759
static void
snb_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
}

760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775
static void
ivb_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
	/* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
	wa_masked_dis(wal,
		      GEN7_COMMON_SLICE_CHICKEN1,
		      GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);

	/* WaApplyL3ControlAndL3ChickenMode:ivb */
	wa_write(wal, GEN7_L3CNTLREG1, GEN7_WA_FOR_GEN7_L3_CONTROL);
	wa_write(wal, GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);

	/* WaForceL3Serialization:ivb */
	wa_write_clr(wal, GEN7_L3SQCREG4, L3SQ_URB_READ_CAM_MATCH_DISABLE);
}

776 777 778 779 780 781 782 783 784 785 786 787 788
static void
vlv_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
	/* WaForceL3Serialization:vlv */
	wa_write_clr(wal, GEN7_L3SQCREG4, L3SQ_URB_READ_CAM_MATCH_DISABLE);

	/*
	 * WaIncreaseL3CreditsForVLVB0:vlv
	 * This is the hardware default actually.
	 */
	wa_write(wal, GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
}

789 790 791 792 793 794 795 796 797 798 799 800 801 802 803
static void
hsw_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
	/* L3 caching of data atomics doesn't work -- disable it. */
	wa_write(wal, HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);

	wa_add(wal,
	       HSW_ROW_CHICKEN3, 0,
	       _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE),
		0 /* XXX does this reg exist? */);

	/* WaVSRefCountFullforceMissDisable:hsw */
	wa_write_clr(wal, GEN7_FF_THREAD_MODE, GEN7_FF_VS_REF_CNT_FFME);
}

804 805
static void
gen9_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
806
{
807
	/* WaDisableKillLogic:bxt,skl,kbl */
808
	if (!IS_COFFEELAKE(i915) && !IS_COMETLAKE(i915))
809 810 811
		wa_write_or(wal,
			    GAM_ECOCHK,
			    ECOCHK_DIS_TLB);
812

813
	if (HAS_LLC(i915)) {
814 815 816 817 818
		/* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
		 *
		 * Must match Display Engine. See
		 * WaCompressedResourceDisplayNewHashMode.
		 */
819 820 821
		wa_write_or(wal,
			    MMCD_MISC_CTRL,
			    MMCD_PCLA | MMCD_HOTSPOT_EN);
822 823 824
	}

	/* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */
825 826 827
	wa_write_or(wal,
		    GAM_ECOCHK,
		    BDW_DISABLE_HDC_INVALIDATION);
828 829
}

830 831
static void
skl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
832
{
833
	gen9_gt_workarounds_init(i915, wal);
834 835

	/* WaDisableGafsUnitClkGating:skl */
836 837 838
	wa_write_or(wal,
		    GEN7_UCGCTL4,
		    GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
839 840

	/* WaInPlaceDecompressionHang:skl */
841
	if (IS_SKL_GT_STEP(i915, STEP_A0, STEP_H0))
842 843 844
		wa_write_or(wal,
			    GEN9_GAMT_ECO_REG_RW_IA,
			    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
845 846
}

847 848
static void
kbl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
849
{
850
	gen9_gt_workarounds_init(i915, wal);
851

852
	/* WaDisableDynamicCreditSharing:kbl */
853
	if (IS_KBL_GT_STEP(i915, 0, STEP_C0))
854 855 856
		wa_write_or(wal,
			    GAMT_CHKN_BIT_REG,
			    GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
857

858
	/* WaDisableGafsUnitClkGating:kbl */
859 860 861
	wa_write_or(wal,
		    GEN7_UCGCTL4,
		    GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
862

863
	/* WaInPlaceDecompressionHang:kbl */
864 865 866
	wa_write_or(wal,
		    GEN9_GAMT_ECO_REG_RW_IA,
		    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
867
}
868

869 870
static void
glk_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
871
{
872
	gen9_gt_workarounds_init(i915, wal);
873 874
}

875 876
static void
cfl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
877
{
878
	gen9_gt_workarounds_init(i915, wal);
879 880

	/* WaDisableGafsUnitClkGating:cfl */
881 882 883
	wa_write_or(wal,
		    GEN7_UCGCTL4,
		    GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
884

885
	/* WaInPlaceDecompressionHang:cfl */
886 887 888
	wa_write_or(wal,
		    GEN9_GAMT_ECO_REG_RW_IA,
		    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
889
}
890

891
static void
892
icl_wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
893
{
894
	const struct sseu_dev_info *sseu = &i915->gt.info.sseu;
895
	unsigned int slice, subslice;
896
	u32 mcr, mcr_mask;
897

898 899 900
	GEM_BUG_ON(GRAPHICS_VER(i915) < 11);
	GEM_BUG_ON(hweight8(sseu->slice_mask) > 1);
	slice = 0;
901

902
	/*
903 904 905 906 907 908 909
	 * Although a platform may have subslices, we need to always steer
	 * reads to the lowest instance that isn't fused off.  When Render
	 * Power Gating is enabled, grabbing forcewake will only power up a
	 * single subslice (the "minconfig") if there isn't a real workload
	 * that needs to be run; this means that if we steer register reads to
	 * one of the higher subslices, we run the risk of reading back 0's or
	 * random garbage.
910
	 */
911
	subslice = __ffs(intel_sseu_get_subslices(sseu, slice));
912

913 914 915 916 917 918 919
	/*
	 * If the subslice we picked above also steers us to a valid L3 bank,
	 * then we can just rely on the default steering and won't need to
	 * worry about explicitly re-steering L3BANK reads later.
	 */
	if (i915->gt.info.l3bank_mask & BIT(subslice))
		i915->gt.steering_table[L3BANK] = NULL;
920

921 922
	mcr = GEN11_MCR_SLICE(slice) | GEN11_MCR_SUBSLICE(subslice);
	mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK;
923

924
	drm_dbg(&i915->drm, "MCR slice/subslice = %x\n", mcr);
925

926
	wa_write_clr_set(wal, GEN8_MCR_SELECTOR, mcr_mask, mcr);
927 928
}

929 930
static void
icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
931
{
932
	icl_wa_init_mcr(i915, wal);
933

934
	/* WaModifyGamTlbPartitioning:icl */
935 936 937 938
	wa_write_clr_set(wal,
			 GEN11_GACB_PERF_CTRL,
			 GEN11_HASH_CTRL_MASK,
			 GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
O
Oscar Mateo 已提交
939

O
Oscar Mateo 已提交
940 941 942
	/* Wa_1405766107:icl
	 * Formerly known as WaCL2SFHalfMaxAlloc
	 */
943 944 945 946
	wa_write_or(wal,
		    GEN11_LSN_UNSLCVC,
		    GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC |
		    GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC);
O
Oscar Mateo 已提交
947 948 949 950

	/* Wa_220166154:icl
	 * Formerly known as WaDisCtxReload
	 */
951 952 953
	wa_write_or(wal,
		    GEN8_GAMW_ECO_DEV_RW_IA,
		    GAMW_ECO_DEV_CTX_RELOAD_DISABLE);
O
Oscar Mateo 已提交
954

O
Oscar Mateo 已提交
955 956 957
	/* Wa_1406463099:icl
	 * Formerly known as WaGamTlbPendError
	 */
958 959 960
	wa_write_or(wal,
		    GAMT_CHKN_BIT_REG,
		    GAMT_CHKN_DISABLE_L3_COH_PIPE);
M
Mika Kuoppala 已提交
961

962 963
	/* Wa_1607087056:icl,ehl,jsl */
	if (IS_ICELAKE(i915) ||
964
	    IS_JSL_EHL_GT_STEP(i915, STEP_A0, STEP_B0))
965 966 967
		wa_write_or(wal,
			    SLICE_UNIT_LEVEL_CLKGATE,
			    L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
968 969 970 971 972 973

	/*
	 * This is not a documented workaround, but rather an optimization
	 * to reduce sampler power.
	 */
	wa_write_clr(wal, GEN10_DFR_RATIO_EN_AND_CHICKEN, DFR_DISABLE);
974 975
}

976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998
/*
 * Though there are per-engine instances of these registers,
 * they retain their value through engine resets and should
 * only be provided on the GT workaround list rather than
 * the engine-specific workaround list.
 */
static void
wa_14011060649(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
	struct intel_engine_cs *engine;
	struct intel_gt *gt = &i915->gt;
	int id;

	for_each_engine(engine, gt, id) {
		if (engine->class != VIDEO_DECODE_CLASS ||
		    (engine->instance % 2))
			continue;

		wa_write_or(wal, VDBOX_CGCTL3F10(engine->mmio_base),
			    IECPUNIT_CLKGATE_DIS);
	}
}

999
static void
1000 1001
gen12_gt_workarounds_init(struct drm_i915_private *i915,
			  struct i915_wa_list *wal)
1002
{
1003
	icl_wa_init_mcr(i915, wal);
1004

1005
	/* Wa_14011060649:tgl,rkl,dg1,adl-s,adl-p */
1006
	wa_14011060649(i915, wal);
1007 1008 1009

	/* Wa_14011059788:tgl,rkl,adl-s,dg1,adl-p */
	wa_write_or(wal, GEN10_DFR_RATIO_EN_AND_CHICKEN, DFR_DISABLE);
1010 1011 1012 1013 1014 1015
}

static void
tgl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
	gen12_gt_workarounds_init(i915, wal);
1016

M
Mika Kuoppala 已提交
1017
	/* Wa_1409420604:tgl */
1018
	if (IS_TGL_UY_GT_STEP(i915, STEP_A0, STEP_B0))
M
Mika Kuoppala 已提交
1019 1020 1021
		wa_write_or(wal,
			    SUBSLICE_UNIT_LEVEL_CLKGATE2,
			    CPSSUNIT_CLKGATE_DIS);
M
Mika Kuoppala 已提交
1022

1023
	/* Wa_1607087056:tgl also know as BUG:1409180338 */
1024
	if (IS_TGL_UY_GT_STEP(i915, STEP_A0, STEP_B0))
M
Mika Kuoppala 已提交
1025 1026 1027
		wa_write_or(wal,
			    SLICE_UNIT_LEVEL_CLKGATE,
			    L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
1028 1029

	/* Wa_1408615072:tgl[a0] */
1030
	if (IS_TGL_UY_GT_STEP(i915, STEP_A0, STEP_B0))
1031 1032
		wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
			    VSUNIT_CLKGATE_DIS_TGL);
1033 1034
}

1035 1036 1037 1038 1039 1040
static void
dg1_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
	gen12_gt_workarounds_init(i915, wal);

	/* Wa_1607087056:dg1 */
1041
	if (IS_DG1_GT_STEP(i915, STEP_A0, STEP_B0))
1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058
		wa_write_or(wal,
			    SLICE_UNIT_LEVEL_CLKGATE,
			    L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);

	/* Wa_1409420604:dg1 */
	if (IS_DG1(i915))
		wa_write_or(wal,
			    SUBSLICE_UNIT_LEVEL_CLKGATE2,
			    CPSSUNIT_CLKGATE_DIS);

	/* Wa_1408615072:dg1 */
	/* Empirical testing shows this register is unaffected by engine reset. */
	if (IS_DG1(i915))
		wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
			    VSUNIT_CLKGATE_DIS_TGL);
}

1059 1060
static void
gt_init_workarounds(struct drm_i915_private *i915, struct i915_wa_list *wal)
1061
{
1062 1063 1064
	if (IS_DG1(i915))
		dg1_gt_workarounds_init(i915, wal);
	else if (IS_TIGERLAKE(i915))
1065
		tgl_gt_workarounds_init(i915, wal);
1066
	else if (GRAPHICS_VER(i915) == 12)
1067
		gen12_gt_workarounds_init(i915, wal);
1068
	else if (GRAPHICS_VER(i915) == 11)
1069
		icl_gt_workarounds_init(i915, wal);
1070
	else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915))
1071 1072 1073 1074 1075 1076
		cfl_gt_workarounds_init(i915, wal);
	else if (IS_GEMINILAKE(i915))
		glk_gt_workarounds_init(i915, wal);
	else if (IS_KABYLAKE(i915))
		kbl_gt_workarounds_init(i915, wal);
	else if (IS_BROXTON(i915))
1077
		gen9_gt_workarounds_init(i915, wal);
1078 1079
	else if (IS_SKYLAKE(i915))
		skl_gt_workarounds_init(i915, wal);
1080 1081
	else if (IS_HASWELL(i915))
		hsw_gt_workarounds_init(i915, wal);
1082 1083
	else if (IS_VALLEYVIEW(i915))
		vlv_gt_workarounds_init(i915, wal);
1084 1085
	else if (IS_IVYBRIDGE(i915))
		ivb_gt_workarounds_init(i915, wal);
1086
	else if (GRAPHICS_VER(i915) == 6)
1087
		snb_gt_workarounds_init(i915, wal);
1088
	else if (GRAPHICS_VER(i915) == 5)
1089
		ilk_gt_workarounds_init(i915, wal);
1090 1091
	else if (IS_G4X(i915))
		g4x_gt_workarounds_init(i915, wal);
1092
	else if (GRAPHICS_VER(i915) == 4)
1093
		gen4_gt_workarounds_init(i915, wal);
1094
	else if (GRAPHICS_VER(i915) <= 8)
1095
		;
1096
	else
1097
		MISSING_CASE(GRAPHICS_VER(i915));
1098 1099 1100 1101 1102
}

void intel_gt_init_workarounds(struct drm_i915_private *i915)
{
	struct i915_wa_list *wal = &i915->gt_wa_list;
1103

1104
	wa_init_start(wal, "GT", "global");
1105
	gt_init_workarounds(i915, wal);
1106 1107 1108 1109
	wa_init_finish(wal);
}

static enum forcewake_domains
1110
wal_get_fw_for_rmw(struct intel_uncore *uncore, const struct i915_wa_list *wal)
1111 1112 1113 1114 1115 1116
{
	enum forcewake_domains fw = 0;
	struct i915_wa *wa;
	unsigned int i;

	for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
1117
		fw |= intel_uncore_forcewake_for_reg(uncore,
1118 1119 1120 1121 1122 1123 1124
						     wa->reg,
						     FW_REG_READ |
						     FW_REG_WRITE);

	return fw;
}

1125 1126 1127
static bool
wa_verify(const struct i915_wa *wa, u32 cur, const char *name, const char *from)
{
1128
	if ((cur ^ wa->set) & wa->read) {
1129
		DRM_ERROR("%s workaround lost on %s! (reg[%x]=0x%x, relevant bits were 0x%x vs expected 0x%x)\n",
1130
			  name, from, i915_mmio_reg_offset(wa->reg),
1131
			  cur, cur & wa->read, wa->set & wa->read);
1132 1133 1134 1135 1136 1137 1138

		return false;
	}

	return true;
}

1139
static void
1140
wa_list_apply(struct intel_gt *gt, const struct i915_wa_list *wal)
1141
{
1142
	struct intel_uncore *uncore = gt->uncore;
1143 1144 1145 1146 1147 1148 1149 1150
	enum forcewake_domains fw;
	unsigned long flags;
	struct i915_wa *wa;
	unsigned int i;

	if (!wal->count)
		return;

1151
	fw = wal_get_fw_for_rmw(uncore, wal);
1152

1153 1154
	spin_lock_irqsave(&uncore->lock, flags);
	intel_uncore_forcewake_get__locked(uncore, fw);
1155 1156

	for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
1157 1158 1159 1160 1161 1162 1163 1164
		u32 val, old = 0;

		/* open-coded rmw due to steering */
		old = wa->clr ? intel_gt_read_register_fw(gt, wa->reg) : 0;
		val = (old & ~wa->clr) | wa->set;
		if (val != old || !wa->clr)
			intel_uncore_write_fw(uncore, wa->reg, val);

1165
		if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
1166
			wa_verify(wa, intel_gt_read_register_fw(gt, wa->reg),
1167
				  wal->name, "application");
1168 1169
	}

1170 1171
	intel_uncore_forcewake_put__locked(uncore, fw);
	spin_unlock_irqrestore(&uncore->lock, flags);
1172 1173
}

1174
void intel_gt_apply_workarounds(struct intel_gt *gt)
1175
{
1176
	wa_list_apply(gt, &gt->i915->gt_wa_list);
1177 1178
}

1179
static bool wa_list_verify(struct intel_gt *gt,
1180 1181 1182
			   const struct i915_wa_list *wal,
			   const char *from)
{
1183
	struct intel_uncore *uncore = gt->uncore;
1184
	struct i915_wa *wa;
1185 1186
	enum forcewake_domains fw;
	unsigned long flags;
1187 1188 1189
	unsigned int i;
	bool ok = true;

1190 1191 1192 1193 1194
	fw = wal_get_fw_for_rmw(uncore, wal);

	spin_lock_irqsave(&uncore->lock, flags);
	intel_uncore_forcewake_get__locked(uncore, fw);

1195
	for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
1196
		ok &= wa_verify(wa,
1197
				intel_gt_read_register_fw(gt, wa->reg),
1198
				wal->name, from);
1199

1200 1201 1202
	intel_uncore_forcewake_put__locked(uncore, fw);
	spin_unlock_irqrestore(&uncore->lock, flags);

1203 1204 1205
	return ok;
}

1206
bool intel_gt_verify_workarounds(struct intel_gt *gt, const char *from)
1207
{
1208
	return wa_list_verify(gt, &gt->i915->gt_wa_list, from);
1209 1210
}

1211
__maybe_unused
C
Chris Wilson 已提交
1212
static bool is_nonpriv_flags_valid(u32 flags)
1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225
{
	/* Check only valid flag bits are set */
	if (flags & ~RING_FORCE_TO_NONPRIV_MASK_VALID)
		return false;

	/* NB: Only 3 out of 4 enum values are valid for access field */
	if ((flags & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
	    RING_FORCE_TO_NONPRIV_ACCESS_INVALID)
		return false;

	return true;
}

1226
static void
1227
whitelist_reg_ext(struct i915_wa_list *wal, i915_reg_t reg, u32 flags)
1228
{
1229 1230 1231
	struct i915_wa wa = {
		.reg = reg
	};
1232

1233 1234
	if (GEM_DEBUG_WARN_ON(wal->count >= RING_MAX_NONPRIV_SLOTS))
		return;
1235

1236 1237 1238
	if (GEM_DEBUG_WARN_ON(!is_nonpriv_flags_valid(flags)))
		return;

1239
	wa.reg.reg |= flags;
1240
	_wa_add(wal, &wa);
1241 1242
}

1243 1244 1245
static void
whitelist_reg(struct i915_wa_list *wal, i915_reg_t reg)
{
1246
	whitelist_reg_ext(wal, reg, RING_FORCE_TO_NONPRIV_ACCESS_RW);
1247 1248
}

1249
static void gen9_whitelist_build(struct i915_wa_list *w)
1250 1251
{
	/* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
1252
	whitelist_reg(w, GEN9_CTX_PREEMPT_REG);
1253 1254

	/* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */
1255
	whitelist_reg(w, GEN8_CS_CHICKEN1);
1256 1257

	/* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk,cfl */
1258
	whitelist_reg(w, GEN8_HDC_CHICKEN1);
1259 1260 1261

	/* WaSendPushConstantsFromMMIO:skl,bxt */
	whitelist_reg(w, COMMON_SLICE_CHICKEN2);
1262 1263
}

1264
static void skl_whitelist_build(struct intel_engine_cs *engine)
1265
{
1266 1267 1268 1269 1270
	struct i915_wa_list *w = &engine->whitelist;

	if (engine->class != RENDER_CLASS)
		return;

1271
	gen9_whitelist_build(w);
1272 1273

	/* WaDisableLSQCROPERFforOCL:skl */
1274
	whitelist_reg(w, GEN8_L3SQCREG4);
1275 1276
}

1277
static void bxt_whitelist_build(struct intel_engine_cs *engine)
1278
{
1279 1280 1281 1282
	if (engine->class != RENDER_CLASS)
		return;

	gen9_whitelist_build(&engine->whitelist);
1283 1284
}

1285
static void kbl_whitelist_build(struct intel_engine_cs *engine)
1286
{
1287 1288 1289 1290 1291
	struct i915_wa_list *w = &engine->whitelist;

	if (engine->class != RENDER_CLASS)
		return;

1292
	gen9_whitelist_build(w);
1293

1294
	/* WaDisableLSQCROPERFforOCL:kbl */
1295
	whitelist_reg(w, GEN8_L3SQCREG4);
1296 1297
}

1298
static void glk_whitelist_build(struct intel_engine_cs *engine)
1299
{
1300 1301 1302 1303 1304
	struct i915_wa_list *w = &engine->whitelist;

	if (engine->class != RENDER_CLASS)
		return;

1305
	gen9_whitelist_build(w);
1306

1307
	/* WA #0862: Userspace has to set "Barrier Mode" to avoid hangs. */
1308
	whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
1309
}
1310

1311
static void cfl_whitelist_build(struct intel_engine_cs *engine)
1312
{
1313 1314
	struct i915_wa_list *w = &engine->whitelist;

1315 1316 1317
	if (engine->class != RENDER_CLASS)
		return;

1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329
	gen9_whitelist_build(w);

	/*
	 * WaAllowPMDepthAndInvocationCountAccessFromUMD:cfl,whl,cml,aml
	 *
	 * This covers 4 register which are next to one another :
	 *   - PS_INVOCATION_COUNT
	 *   - PS_INVOCATION_COUNT_UDW
	 *   - PS_DEPTH_COUNT
	 *   - PS_DEPTH_COUNT_UDW
	 */
	whitelist_reg_ext(w, PS_INVOCATION_COUNT,
1330
			  RING_FORCE_TO_NONPRIV_ACCESS_RD |
1331
			  RING_FORCE_TO_NONPRIV_RANGE_4);
1332 1333
}

1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345
static void cml_whitelist_build(struct intel_engine_cs *engine)
{
	struct i915_wa_list *w = &engine->whitelist;

	if (engine->class != RENDER_CLASS)
		whitelist_reg_ext(w,
				  RING_CTX_TIMESTAMP(engine->mmio_base),
				  RING_FORCE_TO_NONPRIV_ACCESS_RD);

	cfl_whitelist_build(engine);
}

1346
static void icl_whitelist_build(struct intel_engine_cs *engine)
1347
{
1348 1349
	struct i915_wa_list *w = &engine->whitelist;

1350 1351 1352 1353 1354 1355 1356 1357 1358 1359
	switch (engine->class) {
	case RENDER_CLASS:
		/* WaAllowUMDToModifyHalfSliceChicken7:icl */
		whitelist_reg(w, GEN9_HALF_SLICE_CHICKEN7);

		/* WaAllowUMDToModifySamplerMode:icl */
		whitelist_reg(w, GEN10_SAMPLER_MODE);

		/* WaEnableStateCacheRedirectToCS:icl */
		whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370

		/*
		 * WaAllowPMDepthAndInvocationCountAccessFromUMD:icl
		 *
		 * This covers 4 register which are next to one another :
		 *   - PS_INVOCATION_COUNT
		 *   - PS_INVOCATION_COUNT_UDW
		 *   - PS_DEPTH_COUNT
		 *   - PS_DEPTH_COUNT_UDW
		 */
		whitelist_reg_ext(w, PS_INVOCATION_COUNT,
1371
				  RING_FORCE_TO_NONPRIV_ACCESS_RD |
1372
				  RING_FORCE_TO_NONPRIV_RANGE_4);
1373 1374 1375 1376 1377
		break;

	case VIDEO_DECODE_CLASS:
		/* hucStatusRegOffset */
		whitelist_reg_ext(w, _MMIO(0x2000 + engine->mmio_base),
1378
				  RING_FORCE_TO_NONPRIV_ACCESS_RD);
1379 1380
		/* hucUKernelHdrInfoRegOffset */
		whitelist_reg_ext(w, _MMIO(0x2014 + engine->mmio_base),
1381
				  RING_FORCE_TO_NONPRIV_ACCESS_RD);
1382 1383
		/* hucStatus2RegOffset */
		whitelist_reg_ext(w, _MMIO(0x23B0 + engine->mmio_base),
1384
				  RING_FORCE_TO_NONPRIV_ACCESS_RD);
1385 1386 1387
		whitelist_reg_ext(w,
				  RING_CTX_TIMESTAMP(engine->mmio_base),
				  RING_FORCE_TO_NONPRIV_ACCESS_RD);
1388 1389 1390
		break;

	default:
1391 1392 1393
		whitelist_reg_ext(w,
				  RING_CTX_TIMESTAMP(engine->mmio_base),
				  RING_FORCE_TO_NONPRIV_ACCESS_RD);
1394 1395
		break;
	}
1396 1397
}

1398 1399
static void tgl_whitelist_build(struct intel_engine_cs *engine)
{
1400 1401 1402 1403 1404 1405
	struct i915_wa_list *w = &engine->whitelist;

	switch (engine->class) {
	case RENDER_CLASS:
		/*
		 * WaAllowPMDepthAndInvocationCountAccessFromUMD:tgl
1406
		 * Wa_1408556865:tgl
1407 1408 1409 1410 1411 1412 1413 1414 1415 1416
		 *
		 * This covers 4 registers which are next to one another :
		 *   - PS_INVOCATION_COUNT
		 *   - PS_INVOCATION_COUNT_UDW
		 *   - PS_DEPTH_COUNT
		 *   - PS_DEPTH_COUNT_UDW
		 */
		whitelist_reg_ext(w, PS_INVOCATION_COUNT,
				  RING_FORCE_TO_NONPRIV_ACCESS_RD |
				  RING_FORCE_TO_NONPRIV_RANGE_4);
1417 1418 1419

		/* Wa_1808121037:tgl */
		whitelist_reg(w, GEN7_COMMON_SLICE_CHICKEN1);
1420 1421 1422

		/* Wa_1806527549:tgl */
		whitelist_reg(w, HIZ_CHICKEN);
1423 1424
		break;
	default:
1425 1426 1427
		whitelist_reg_ext(w,
				  RING_CTX_TIMESTAMP(engine->mmio_base),
				  RING_FORCE_TO_NONPRIV_ACCESS_RD);
1428 1429
		break;
	}
1430 1431
}

1432 1433 1434 1435 1436 1437 1438
static void dg1_whitelist_build(struct intel_engine_cs *engine)
{
	struct i915_wa_list *w = &engine->whitelist;

	tgl_whitelist_build(engine);

	/* GEN:BUG:1409280441:dg1 */
1439
	if (IS_DG1_GT_STEP(engine->i915, STEP_A0, STEP_B0) &&
1440 1441 1442 1443 1444 1445
	    (engine->class == RENDER_CLASS ||
	     engine->class == COPY_ENGINE_CLASS))
		whitelist_reg_ext(w, RING_ID(engine->mmio_base),
				  RING_FORCE_TO_NONPRIV_ACCESS_RD);
}

1446
void intel_engine_init_whitelist(struct intel_engine_cs *engine)
1447 1448
{
	struct drm_i915_private *i915 = engine->i915;
1449
	struct i915_wa_list *w = &engine->whitelist;
1450

1451
	wa_init_start(w, "whitelist", engine->name);
1452

1453 1454
	if (IS_DG1(i915))
		dg1_whitelist_build(engine);
1455
	else if (GRAPHICS_VER(i915) == 12)
1456
		tgl_whitelist_build(engine);
1457
	else if (GRAPHICS_VER(i915) == 11)
1458
		icl_whitelist_build(engine);
1459 1460 1461
	else if (IS_COMETLAKE(i915))
		cml_whitelist_build(engine);
	else if (IS_COFFEELAKE(i915))
1462
		cfl_whitelist_build(engine);
1463
	else if (IS_GEMINILAKE(i915))
1464
		glk_whitelist_build(engine);
1465
	else if (IS_KABYLAKE(i915))
1466
		kbl_whitelist_build(engine);
1467
	else if (IS_BROXTON(i915))
1468
		bxt_whitelist_build(engine);
1469
	else if (IS_SKYLAKE(i915))
1470
		skl_whitelist_build(engine);
1471
	else if (GRAPHICS_VER(i915) <= 8)
1472
		;
1473
	else
1474
		MISSING_CASE(GRAPHICS_VER(i915));
1475

1476
	wa_init_finish(w);
1477 1478
}

1479
void intel_engine_apply_whitelist(struct intel_engine_cs *engine)
1480
{
1481
	const struct i915_wa_list *wal = &engine->whitelist;
1482
	struct intel_uncore *uncore = engine->uncore;
1483
	const u32 base = engine->mmio_base;
1484
	struct i915_wa *wa;
1485 1486
	unsigned int i;

1487
	if (!wal->count)
1488
		return;
1489

1490
	for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
1491 1492 1493
		intel_uncore_write(uncore,
				   RING_FORCE_TO_NONPRIV(base, i),
				   i915_mmio_reg_offset(wa->reg));
1494

1495 1496
	/* And clear the rest just in case of garbage */
	for (; i < RING_MAX_NONPRIV_SLOTS; i++)
1497 1498 1499
		intel_uncore_write(uncore,
				   RING_FORCE_TO_NONPRIV(base, i),
				   i915_mmio_reg_offset(RING_NOPID(base)));
1500 1501
}

1502 1503
static void
rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
1504 1505 1506
{
	struct drm_i915_private *i915 = engine->i915;

1507 1508
	if (IS_DG1_GT_STEP(i915, STEP_A0, STEP_B0) ||
	    IS_TGL_UY_GT_STEP(i915, STEP_A0, STEP_B0)) {
1509
		/*
1510 1511
		 * Wa_1607138336:tgl[a0],dg1[a0]
		 * Wa_1607063988:tgl[a0],dg1[a0]
1512
		 */
M
Mika Kuoppala 已提交
1513 1514 1515
		wa_write_or(wal,
			    GEN9_CTX_PREEMPT_REG,
			    GEN12_DISABLE_POSH_BUSY_FF_DOP_CG);
1516
	}
1517

1518
	if (IS_TGL_UY_GT_STEP(i915, STEP_A0, STEP_B0)) {
R
Radhakrishna Sripada 已提交
1519 1520 1521 1522 1523 1524 1525
		/*
		 * Wa_1606679103:tgl
		 * (see also Wa_1606682166:icl)
		 */
		wa_write_or(wal,
			    GEN7_SARCHKMD,
			    GEN7_DISABLE_SAMPLER_PREFETCH);
1526 1527
	}

1528
	if (IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) || IS_DG1(i915) ||
1529
	    IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
1530
		/* Wa_1606931601:tgl,rkl,dg1,adl-s,adl-p */
1531 1532
		wa_masked_en(wal, GEN7_ROW_CHICKEN2, GEN12_DISABLE_EARLY_READ);

1533 1534 1535 1536
		/*
		 * Wa_1407928979:tgl A*
		 * Wa_18011464164:tgl[B0+],dg1[B0+]
		 * Wa_22010931296:tgl[B0+],dg1[B0+]
1537
		 * Wa_14010919138:rkl,dg1,adl-s,adl-p
1538 1539 1540
		 */
		wa_write_or(wal, GEN7_FF_THREAD_MODE,
			    GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
1541 1542

		/*
1543 1544 1545
		 * Wa_1606700617:tgl,dg1,adl-p
		 * Wa_22010271021:tgl,rkl,dg1,adl-s,adl-p
		 * Wa_14010826681:tgl,dg1,rkl,adl-p
1546 1547 1548 1549
		 */
		wa_masked_en(wal,
			     GEN9_CS_DEBUG_MODE1,
			     FF_DOP_CLOCK_GATE_DISABLE);
1550 1551
	}

1552
	if (IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) ||
1553
	    IS_DG1_GT_STEP(i915, STEP_A0, STEP_B0) ||
1554
	    IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
1555
		/* Wa_1409804808:tgl,rkl,dg1[a0],adl-s,adl-p */
1556 1557
		wa_masked_en(wal, GEN7_ROW_CHICKEN2,
			     GEN12_PUSH_CONST_DEREF_HOLD_DIS);
1558

1559 1560
		/*
		 * Wa_1409085225:tgl
1561
		 * Wa_14010229206:tgl,rkl,dg1[a0],adl-s,adl-p
1562 1563
		 */
		wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH);
1564 1565
	}

1566

1567
	if (IS_DG1_GT_STEP(i915, STEP_A0, STEP_B0) ||
1568
	    IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
1569 1570 1571
		/*
		 * Wa_1607030317:tgl
		 * Wa_1607186500:tgl
1572 1573 1574 1575 1576 1577
		 * Wa_1607297627:tgl,rkl,dg1[a0]
		 *
		 * On TGL and RKL there are multiple entries for this WA in the
		 * BSpec; some indicate this is an A0-only WA, others indicate
		 * it applies to all steppings so we trust the "all steppings."
		 * For DG1 this only applies to A0.
1578 1579 1580 1581 1582
		 */
		wa_masked_en(wal,
			     GEN6_RC_SLEEP_PSMI_CONTROL,
			     GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE |
			     GEN8_RC_SEMA_IDLE_MSG_DISABLE);
1583 1584
	}

1585
	if (IS_DG1(i915) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915) ||
1586 1587
	    IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) {
		/* Wa_1406941453:tgl,rkl,dg1,adl-s,adl-p */
1588 1589 1590 1591 1592
		wa_masked_en(wal,
			     GEN10_SAMPLER_MODE,
			     ENABLE_SMALLPL);
	}

1593
	if (GRAPHICS_VER(i915) == 11) {
1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610
		/* This is not an Wa. Enable for better image quality */
		wa_masked_en(wal,
			     _3D_CHICKEN3,
			     _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE);

		/*
		 * Wa_1405543622:icl
		 * Formerly known as WaGAPZPriorityScheme
		 */
		wa_write_or(wal,
			    GEN8_GARBCNTL,
			    GEN11_ARBITRATION_PRIO_ORDER_MASK);

		/*
		 * Wa_1604223664:icl
		 * Formerly known as WaL3BankAddressHashing
		 */
1611 1612 1613 1614 1615 1616 1617 1618
		wa_write_clr_set(wal,
				 GEN8_GARBCNTL,
				 GEN11_HASH_CTRL_EXCL_MASK,
				 GEN11_HASH_CTRL_EXCL_BIT0);
		wa_write_clr_set(wal,
				 GEN11_GLBLINVL,
				 GEN11_BANK_HASH_ADDR_EXCL_MASK,
				 GEN11_BANK_HASH_ADDR_EXCL_BIT0);
1619 1620 1621 1622 1623

		/*
		 * Wa_1405733216:icl
		 * Formerly known as WaDisableCleanEvicts
		 */
1624 1625 1626
		wa_write_or(wal,
			    GEN8_L3SQCREG4,
			    GEN11_LQSC_CLEAN_EVICT_DISABLE);
1627

1628 1629 1630 1631
		/* Wa_1606682166:icl */
		wa_write_or(wal,
			    GEN7_SARCHKMD,
			    GEN7_DISABLE_SAMPLER_PREFETCH);
T
Tvrtko Ursulin 已提交
1632 1633

		/* Wa_1409178092:icl */
1634 1635 1636 1637
		wa_write_clr_set(wal,
				 GEN11_SCRATCH2,
				 GEN11_COHERENT_PARTIAL_WRITE_MERGE_ENABLE,
				 0);
1638 1639 1640 1641 1642 1643 1644 1645 1646

		/* WaEnable32PlaneMode:icl */
		wa_masked_en(wal, GEN9_CSFE_CHICKEN1_RCS,
			     GEN11_ENABLE_32_PLANE_MODE);

		/*
		 * Wa_1408615072:icl,ehl  (vsunit)
		 * Wa_1407596294:icl,ehl  (hsunit)
		 */
1647 1648
		wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
			    VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS);
1649 1650

		/* Wa_1407352427:icl,ehl */
1651 1652
		wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
			    PSDUNIT_CLKGATE_DIS);
1653 1654 1655 1656 1657

		/* Wa_1406680159:icl,ehl */
		wa_write_or(wal,
			    SUBSLICE_UNIT_LEVEL_CLKGATE,
			    GWUNIT_CLKGATE_DIS);
1658 1659 1660 1661 1662 1663 1664 1665

		/*
		 * Wa_1408767742:icl[a2..forever],ehl[all]
		 * Wa_1605460711:icl[a0..c0]
		 */
		wa_write_or(wal,
			    GEN7_FF_THREAD_MODE,
			    GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
M
Matt Atwood 已提交
1666

1667 1668 1669 1670
		/* Wa_22010271021 */
		wa_masked_en(wal,
			     GEN9_CS_DEBUG_MODE1,
			     FF_DOP_CLOCK_GATE_DISABLE);
1671 1672
	}

1673
	if (IS_GRAPHICS_VER(i915, 9, 12)) {
1674
		/* FtrPerCtxtPreemptionGranularityControl:skl,bxt,kbl,cfl,cnl,icl,tgl */
1675 1676 1677 1678 1679
		wa_masked_en(wal,
			     GEN7_FF_SLICE_CS_CHICKEN1,
			     GEN9_FFSC_PERCTX_PREEMPT_CTRL);
	}

1680 1681 1682 1683
	if (IS_SKYLAKE(i915) ||
	    IS_KABYLAKE(i915) ||
	    IS_COFFEELAKE(i915) ||
	    IS_COMETLAKE(i915)) {
1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696
		/* WaEnableGapsTsvCreditFix:skl,kbl,cfl */
		wa_write_or(wal,
			    GEN8_GARBCNTL,
			    GEN9_GAPS_TSV_CREDIT_DISABLE);
	}

	if (IS_BROXTON(i915)) {
		/* WaDisablePooledEuLoadBalancingFix:bxt */
		wa_masked_en(wal,
			     FF_SLICE_CS_CHICKEN2,
			     GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
	}

1697
	if (GRAPHICS_VER(i915) == 9) {
1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709
		/* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
		wa_masked_en(wal,
			     GEN9_CSFE_CHICKEN1_RCS,
			     GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE);

		/* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
		wa_write_or(wal,
			    BDW_SCRATCH1,
			    GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);

		/* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
		if (IS_GEN9_LP(i915))
1710 1711 1712 1713 1714
			wa_write_clr_set(wal,
					 GEN8_L3SQCREG1,
					 L3_PRIO_CREDITS_MASK,
					 L3_GENERAL_PRIO_CREDITS(62) |
					 L3_HIGH_PRIO_CREDITS(2));
1715 1716 1717 1718 1719

		/* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
		wa_write_or(wal,
			    GEN8_L3SQCREG4,
			    GEN8_LQSC_FLUSH_COHERENT_LINES);
1720 1721 1722 1723 1724 1725 1726 1727

		/* Disable atomics in L3 to prevent unrecoverable hangs */
		wa_write_clr_set(wal, GEN9_SCRATCH_LNCF1,
				 GEN9_LNCF_NONIA_COHERENT_ATOMICS_ENABLE, 0);
		wa_write_clr_set(wal, GEN8_L3SQCREG4,
				 GEN8_LQSQ_NONIA_COHERENT_ATOMICS_ENABLE, 0);
		wa_write_clr_set(wal, GEN9_SCRATCH1,
				 EVICTION_PERF_FIX_ENABLE, 0);
1728
	}
1729

1730 1731 1732 1733 1734 1735 1736 1737 1738
	if (IS_HASWELL(i915)) {
		/* WaSampleCChickenBitEnable:hsw */
		wa_masked_en(wal,
			     HALF_SLICE_CHICKEN3, HSW_SAMPLE_C_PERFORMANCE);

		wa_masked_dis(wal,
			      CACHE_MODE_0_GEN7,
			      /* enable HiZ Raw Stall Optimization */
			      HIZ_RAW_STALL_OPT_DISABLE);
1739 1740 1741 1742 1743 1744 1745
	}

	if (IS_VALLEYVIEW(i915)) {
		/* WaDisableEarlyCull:vlv */
		wa_masked_en(wal,
			     _3D_CHICKEN3,
			     _3D_CHICKEN_SF_DISABLE_OBJEND_CULL);
1746 1747

		/*
1748
		 * WaVSThreadDispatchOverride:ivb,vlv
1749
		 *
1750 1751
		 * This actually overrides the dispatch
		 * mode for all thread types.
1752
		 */
1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765
		wa_write_clr_set(wal,
				 GEN7_FF_THREAD_MODE,
				 GEN7_FF_SCHED_MASK,
				 GEN7_FF_TS_SCHED_HW |
				 GEN7_FF_VS_SCHED_HW |
				 GEN7_FF_DS_SCHED_HW);

		/* WaPsdDispatchEnable:vlv */
		/* WaDisablePSDDualDispatchEnable:vlv */
		wa_masked_en(wal,
			     GEN7_HALF_SLICE_CHICKEN1,
			     GEN7_MAX_PS_THREAD_DEP |
			     GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
1766 1767
	}

1768 1769
	if (IS_IVYBRIDGE(i915)) {
		/* WaDisableEarlyCull:ivb */
1770 1771 1772 1773
		wa_masked_en(wal,
			     _3D_CHICKEN3,
			     _3D_CHICKEN_SF_DISABLE_OBJEND_CULL);

1774 1775 1776 1777 1778 1779 1780
		if (0) { /* causes HiZ corruption on ivb:gt1 */
			/* enable HiZ Raw Stall Optimization */
			wa_masked_dis(wal,
				      CACHE_MODE_0_GEN7,
				      HIZ_RAW_STALL_OPT_DISABLE);
		}

1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793
		/*
		 * WaVSThreadDispatchOverride:ivb,vlv
		 *
		 * This actually overrides the dispatch
		 * mode for all thread types.
		 */
		wa_write_clr_set(wal,
				 GEN7_FF_THREAD_MODE,
				 GEN7_FF_SCHED_MASK,
				 GEN7_FF_TS_SCHED_HW |
				 GEN7_FF_VS_SCHED_HW |
				 GEN7_FF_DS_SCHED_HW);

1794 1795 1796 1797 1798 1799 1800
		/* WaDisablePSDDualDispatchEnable:ivb */
		if (IS_IVB_GT1(i915))
			wa_masked_en(wal,
				     GEN7_HALF_SLICE_CHICKEN1,
				     GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
	}

1801
	if (GRAPHICS_VER(i915) == 7) {
1802 1803 1804 1805 1806 1807
		/* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
		wa_masked_en(wal,
			     GFX_MODE_GEN7,
			     GFX_TLB_INVALIDATE_EXPLICIT | GFX_REPLAY_MODE);

		/* WaDisable_RenderCache_OperationalFlush:ivb,vlv,hsw */
1808 1809 1810 1811
		wa_masked_dis(wal, CACHE_MODE_0_GEN7, RC_OP_FLUSH_ENABLE);

		/*
		 * BSpec says this must be set, even though
1812
		 * WaDisable4x2SubspanOptimization:ivb,hsw
1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832
		 * WaDisable4x2SubspanOptimization isn't listed for VLV.
		 */
		wa_masked_en(wal,
			     CACHE_MODE_1,
			     PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);

		/*
		 * BSpec recommends 8x4 when MSAA is used,
		 * however in practice 16x4 seems fastest.
		 *
		 * Note that PS/WM thread counts depend on the WIZ hashing
		 * disable bit, which we don't touch here, but it's good
		 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
		 */
		wa_add(wal, GEN7_GT_MODE, 0,
		       _MASKED_FIELD(GEN6_WIZ_HASHING_MASK,
				     GEN6_WIZ_HASHING_16x4),
		       GEN6_WIZ_HASHING_16x4);
	}

1833
	if (IS_GRAPHICS_VER(i915, 6, 7))
1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844
		/*
		 * We need to disable the AsyncFlip performance optimisations in
		 * order to use MI_WAIT_FOR_EVENT within the CS. It should
		 * already be programmed to '1' on all products.
		 *
		 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
		 */
		wa_masked_en(wal,
			     MI_MODE,
			     ASYNC_FLIP_PERF_DISABLE);

1845
	if (GRAPHICS_VER(i915) == 6) {
1846 1847 1848 1849 1850 1851 1852 1853 1854
		/*
		 * Required for the hardware to program scanline values for
		 * waiting
		 * WaEnableFlushTlbInvalidationMode:snb
		 */
		wa_masked_en(wal,
			     GFX_MODE,
			     GFX_TLB_INVALIDATE_EXPLICIT);

1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887
		/* WaDisableHiZPlanesWhenMSAAEnabled:snb */
		wa_masked_en(wal,
			     _3D_CHICKEN,
			     _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB);

		wa_masked_en(wal,
			     _3D_CHICKEN3,
			     /* WaStripsFansDisableFastClipPerformanceFix:snb */
			     _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL |
			     /*
			      * Bspec says:
			      * "This bit must be set if 3DSTATE_CLIP clip mode is set
			      * to normal and 3DSTATE_SF number of SF output attributes
			      * is more than 16."
			      */
			     _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH);

		/*
		 * BSpec recommends 8x4 when MSAA is used,
		 * however in practice 16x4 seems fastest.
		 *
		 * Note that PS/WM thread counts depend on the WIZ hashing
		 * disable bit, which we don't touch here, but it's good
		 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
		 */
		wa_add(wal,
		       GEN6_GT_MODE, 0,
		       _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4),
		       GEN6_WIZ_HASHING_16x4);

		/* WaDisable_RenderCache_OperationalFlush:snb */
		wa_masked_dis(wal, CACHE_MODE_0, RC_OP_FLUSH_ENABLE);

1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898
		/*
		 * From the Sandybridge PRM, volume 1 part 3, page 24:
		 * "If this bit is set, STCunit will have LRA as replacement
		 *  policy. [...] This bit must be reset. LRA replacement
		 *  policy is not supported."
		 */
		wa_masked_dis(wal,
			      CACHE_MODE_0,
			      CM0_STC_EVICT_DISABLE_LRA_SNB);
	}

1899
	if (IS_GRAPHICS_VER(i915, 4, 6))
1900 1901 1902 1903 1904
		/* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
		wa_add(wal, MI_MODE,
		       0, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH),
		       /* XXX bit doesn't stick on Broadwater */
		       IS_I965G(i915) ? 0 : VS_TIMER_DISPATCH);
1905

1906
	if (GRAPHICS_VER(i915) == 4)
1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919
		/*
		 * Disable CONSTANT_BUFFER before it is loaded from the context
		 * image. For as it is loaded, it is executed and the stored
		 * address may no longer be valid, leading to a GPU hang.
		 *
		 * This imposes the requirement that userspace reload their
		 * CONSTANT_BUFFER on every batch, fortunately a requirement
		 * they are already accustomed to from before contexts were
		 * enabled.
		 */
		wa_add(wal, ECOSKPD,
		       0, _MASKED_BIT_ENABLE(ECO_CONSTANT_BUFFER_SR_DISABLE),
		       0 /* XXX bit doesn't stick on Broadwater */);
1920 1921
}

1922 1923
static void
xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
1924 1925 1926 1927
{
	struct drm_i915_private *i915 = engine->i915;

	/* WaKBLVECSSemaphoreWaitPoll:kbl */
1928
	if (IS_KBL_GT_STEP(i915, STEP_A0, STEP_F0)) {
1929 1930 1931 1932 1933 1934
		wa_write(wal,
			 RING_SEMA_WAIT_POLL(engine->mmio_base),
			 1);
	}
}

1935 1936 1937
static void
engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal)
{
1938
	if (I915_SELFTEST_ONLY(GRAPHICS_VER(engine->i915) < 4))
1939 1940
		return;

1941
	if (engine->class == RENDER_CLASS)
1942 1943 1944 1945 1946
		rcs_engine_wa_init(engine, wal);
	else
		xcs_engine_wa_init(engine, wal);
}

1947 1948 1949 1950
void intel_engine_init_workarounds(struct intel_engine_cs *engine)
{
	struct i915_wa_list *wal = &engine->wa_list;

1951
	if (GRAPHICS_VER(engine->i915) < 4)
1952 1953
		return;

1954
	wa_init_start(wal, "engine", engine->name);
1955
	engine_init_workarounds(engine, wal);
1956 1957 1958 1959 1960
	wa_init_finish(wal);
}

void intel_engine_apply_workarounds(struct intel_engine_cs *engine)
{
1961
	wa_list_apply(engine->gt, &engine->wa_list);
1962 1963
}

1964
struct mcr_range {
M
Matt Roper 已提交
1965 1966
	u32 start;
	u32 end;
1967 1968 1969
};

static const struct mcr_range mcr_ranges_gen8[] = {
M
Matt Roper 已提交
1970 1971 1972 1973 1974 1975 1976 1977
	{ .start = 0x5500, .end = 0x55ff },
	{ .start = 0x7000, .end = 0x7fff },
	{ .start = 0x9400, .end = 0x97ff },
	{ .start = 0xb000, .end = 0xb3ff },
	{ .start = 0xe000, .end = 0xe7ff },
	{},
};

1978 1979 1980 1981 1982 1983 1984 1985 1986
static const struct mcr_range mcr_ranges_gen12[] = {
	{ .start =  0x8150, .end =  0x815f },
	{ .start =  0x9520, .end =  0x955f },
	{ .start =  0xb100, .end =  0xb3ff },
	{ .start =  0xde80, .end =  0xe8ff },
	{ .start = 0x24a00, .end = 0x24a7f },
	{},
};

1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002
static const struct mcr_range mcr_ranges_xehp[] = {
	{ .start =  0x4000, .end =  0x4aff },
	{ .start =  0x5200, .end =  0x52ff },
	{ .start =  0x5400, .end =  0x7fff },
	{ .start =  0x8140, .end =  0x815f },
	{ .start =  0x8c80, .end =  0x8dff },
	{ .start =  0x94d0, .end =  0x955f },
	{ .start =  0x9680, .end =  0x96ff },
	{ .start =  0xb000, .end =  0xb3ff },
	{ .start =  0xc800, .end =  0xcfff },
	{ .start =  0xd800, .end =  0xd8ff },
	{ .start =  0xdc00, .end =  0xffff },
	{ .start = 0x17000, .end = 0x17fff },
	{ .start = 0x24a00, .end = 0x24a7f },
};

2003 2004
static bool mcr_range(struct drm_i915_private *i915, u32 offset)
{
2005
	const struct mcr_range *mcr_ranges;
M
Matt Roper 已提交
2006 2007
	int i;

2008 2009 2010
	if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
		mcr_ranges = mcr_ranges_xehp;
	else if (GRAPHICS_VER(i915) >= 12)
2011
		mcr_ranges = mcr_ranges_gen12;
2012
	else if (GRAPHICS_VER(i915) >= 8)
2013 2014
		mcr_ranges = mcr_ranges_gen8;
	else
M
Matt Roper 已提交
2015 2016
		return false;

2017
	/*
M
Matt Roper 已提交
2018
	 * Registers in these ranges are affected by the MCR selector
2019 2020 2021
	 * which only controls CPU initiated MMIO. Routing does not
	 * work for CS access so we cannot verify them on this path.
	 */
2022 2023 2024
	for (i = 0; mcr_ranges[i].start; i++)
		if (offset >= mcr_ranges[i].start &&
		    offset <= mcr_ranges[i].end)
M
Matt Roper 已提交
2025
			return true;
2026 2027 2028 2029

	return false;
}

2030 2031 2032 2033 2034
static int
wa_list_srm(struct i915_request *rq,
	    const struct i915_wa_list *wal,
	    struct i915_vma *vma)
{
2035
	struct drm_i915_private *i915 = rq->engine->i915;
2036
	unsigned int i, count = 0;
2037 2038 2039 2040
	const struct i915_wa *wa;
	u32 srm, *cs;

	srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
2041
	if (GRAPHICS_VER(i915) >= 8)
2042 2043
		srm++;

2044 2045 2046 2047 2048 2049
	for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
		if (!mcr_range(i915, i915_mmio_reg_offset(wa->reg)))
			count++;
	}

	cs = intel_ring_begin(rq, 4 * count);
2050 2051 2052 2053
	if (IS_ERR(cs))
		return PTR_ERR(cs);

	for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
2054 2055 2056 2057 2058
		u32 offset = i915_mmio_reg_offset(wa->reg);

		if (mcr_range(i915, offset))
			continue;

2059
		*cs++ = srm;
2060
		*cs++ = offset;
2061 2062 2063 2064 2065 2066 2067 2068
		*cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
		*cs++ = 0;
	}
	intel_ring_advance(rq, cs);

	return 0;
}

2069
static int engine_wa_list_verify(struct intel_context *ce,
2070 2071 2072 2073 2074 2075
				 const struct i915_wa_list * const wal,
				 const char *from)
{
	const struct i915_wa *wa;
	struct i915_request *rq;
	struct i915_vma *vma;
2076
	struct i915_gem_ww_ctx ww;
2077 2078 2079 2080 2081 2082 2083
	unsigned int i;
	u32 *results;
	int err;

	if (!wal->count)
		return 0;

2084 2085
	vma = __vm_create_scratch_for_read(&ce->engine->gt->ggtt->vm,
					   wal->count * sizeof(u32));
2086 2087 2088
	if (IS_ERR(vma))
		return PTR_ERR(vma);

2089
	intel_engine_pm_get(ce->engine);
2090 2091 2092 2093 2094 2095 2096 2097
	i915_gem_ww_ctx_init(&ww, false);
retry:
	err = i915_gem_object_lock(vma->obj, &ww);
	if (err == 0)
		err = intel_context_pin_ww(ce, &ww);
	if (err)
		goto err_pm;

2098 2099 2100 2101 2102
	err = i915_vma_pin_ww(vma, &ww, 0, 0,
			   i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER);
	if (err)
		goto err_unpin;

2103
	rq = i915_request_create(ce);
2104 2105
	if (IS_ERR(rq)) {
		err = PTR_ERR(rq);
2106
		goto err_vma;
2107 2108
	}

2109 2110 2111
	err = i915_request_await_object(rq, vma->obj, true);
	if (err == 0)
		err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
2112 2113
	if (err == 0)
		err = wa_list_srm(rq, wal, vma);
2114

2115
	i915_request_get(rq);
2116 2117
	if (err)
		i915_request_set_error_once(rq, err);
2118
	i915_request_add(rq);
2119 2120 2121 2122

	if (err)
		goto err_rq;

2123
	if (i915_request_wait(rq, 0, HZ / 5) < 0) {
2124
		err = -ETIME;
2125
		goto err_rq;
2126 2127 2128 2129 2130
	}

	results = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
	if (IS_ERR(results)) {
		err = PTR_ERR(results);
2131
		goto err_rq;
2132 2133 2134
	}

	err = 0;
2135
	for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
2136
		if (mcr_range(rq->engine->i915, i915_mmio_reg_offset(wa->reg)))
2137 2138
			continue;

2139 2140
		if (!wa_verify(wa, results[i], wal->name, from))
			err = -ENXIO;
2141
	}
2142 2143 2144

	i915_gem_object_unpin_map(vma->obj);

2145 2146
err_rq:
	i915_request_put(rq);
2147 2148
err_vma:
	i915_vma_unpin(vma);
2149 2150 2151 2152 2153 2154 2155 2156 2157 2158
err_unpin:
	intel_context_unpin(ce);
err_pm:
	if (err == -EDEADLK) {
		err = i915_gem_ww_ctx_backoff(&ww);
		if (!err)
			goto retry;
	}
	i915_gem_ww_ctx_fini(&ww);
	intel_engine_pm_put(ce->engine);
2159 2160 2161 2162 2163 2164 2165
	i915_vma_put(vma);
	return err;
}

int intel_engine_verify_workarounds(struct intel_engine_cs *engine,
				    const char *from)
{
2166 2167 2168
	return engine_wa_list_verify(engine->kernel_context,
				     &engine->wa_list,
				     from);
2169 2170
}

2171
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2172
#include "selftest_workarounds.c"
2173
#endif