intel_workarounds.c 39.1 KB
Newer Older
1 2 3 4 5 6 7
/*
 * SPDX-License-Identifier: MIT
 *
 * Copyright © 2014-2018 Intel Corporation
 */

#include "i915_drv.h"
8
#include "intel_context.h"
9
#include "intel_gt.h"
10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52
#include "intel_workarounds.h"

/**
 * DOC: Hardware workarounds
 *
 * This file is intended as a central place to implement most [1]_ of the
 * required workarounds for hardware to work as originally intended. They fall
 * in five basic categories depending on how/when they are applied:
 *
 * - Workarounds that touch registers that are saved/restored to/from the HW
 *   context image. The list is emitted (via Load Register Immediate commands)
 *   everytime a new context is created.
 * - GT workarounds. The list of these WAs is applied whenever these registers
 *   revert to default values (on GPU reset, suspend/resume [2]_, etc..).
 * - Display workarounds. The list is applied during display clock-gating
 *   initialization.
 * - Workarounds that whitelist a privileged register, so that UMDs can manage
 *   them directly. This is just a special case of a MMMIO workaround (as we
 *   write the list of these to/be-whitelisted registers to some special HW
 *   registers).
 * - Workaround batchbuffers, that get executed automatically by the hardware
 *   on every HW context restore.
 *
 * .. [1] Please notice that there are other WAs that, due to their nature,
 *    cannot be applied from a central place. Those are peppered around the rest
 *    of the code, as needed.
 *
 * .. [2] Technically, some registers are powercontext saved & restored, so they
 *    survive a suspend/resume. In practice, writing them again is not too
 *    costly and simplifies things. We can revisit this in the future.
 *
 * Layout
 * ''''''
 *
 * Keep things in this file ordered by WA type, as per the above (context, GT,
 * display, register whitelist, batchbuffer). Then, inside each type, keep the
 * following order:
 *
 * - Infrastructure functions and macros
 * - WAs per platform in standard gen/chrono order
 * - Public functions to init or apply the given workaround type.
 */

53 54 55 56 57
static void wa_init_start(struct i915_wa_list *wal, const char *name)
{
	wal->name = name;
}

58 59
#define WA_LIST_CHUNK (1 << 4)

60 61
static void wa_init_finish(struct i915_wa_list *wal)
{
62 63 64 65 66 67 68 69 70 71 72 73
	/* Trim unused entries. */
	if (!IS_ALIGNED(wal->count, WA_LIST_CHUNK)) {
		struct i915_wa *list = kmemdup(wal->list,
					       wal->count * sizeof(*list),
					       GFP_KERNEL);

		if (list) {
			kfree(wal->list);
			wal->list = list;
		}
	}

74 75 76 77
	if (!wal->count)
		return;

	DRM_DEBUG_DRIVER("Initialized %u %s workarounds\n",
78
			 wal->wa_count, wal->name);
79 80
}

81
static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa)
82
{
83 84
	unsigned int addr = i915_mmio_reg_offset(wa->reg);
	unsigned int start = 0, end = wal->count;
85
	const unsigned int grow = WA_LIST_CHUNK;
86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104
	struct i915_wa *wa_;

	GEM_BUG_ON(!is_power_of_2(grow));

	if (IS_ALIGNED(wal->count, grow)) { /* Either uninitialized or full. */
		struct i915_wa *list;

		list = kmalloc_array(ALIGN(wal->count + 1, grow), sizeof(*wa),
				     GFP_KERNEL);
		if (!list) {
			DRM_ERROR("No space for workaround init!\n");
			return;
		}

		if (wal->list)
			memcpy(list, wal->list, sizeof(*wa) * wal->count);

		wal->list = list;
	}
105 106 107 108

	while (start < end) {
		unsigned int mid = start + (end - start) / 2;

109
		if (i915_mmio_reg_offset(wal->list[mid].reg) < addr) {
110
			start = mid + 1;
111
		} else if (i915_mmio_reg_offset(wal->list[mid].reg) > addr) {
112 113
			end = mid;
		} else {
114
			wa_ = &wal->list[mid];
115

116
			if ((wa->mask & ~wa_->mask) == 0) {
117
				DRM_ERROR("Discarding overwritten w/a for reg %04x (mask: %08x, value: %08x)\n",
118 119
					  i915_mmio_reg_offset(wa_->reg),
					  wa_->mask, wa_->val);
120

121
				wa_->val &= ~wa->mask;
122 123
			}

124 125 126
			wal->wa_count++;
			wa_->val |= wa->val;
			wa_->mask |= wa->mask;
127
			wa_->read |= wa->read;
128 129 130
			return;
		}
	}
131

132 133 134
	wal->wa_count++;
	wa_ = &wal->list[wal->count++];
	*wa_ = *wa;
135

136 137 138 139 140
	while (wa_-- > wal->list) {
		GEM_BUG_ON(i915_mmio_reg_offset(wa_[0].reg) ==
			   i915_mmio_reg_offset(wa_[1].reg));
		if (i915_mmio_reg_offset(wa_[1].reg) >
		    i915_mmio_reg_offset(wa_[0].reg))
141
			break;
142

143
		swap(wa_[1], wa_[0]);
144
	}
145 146
}

147
static void
148 149
wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask,
		   u32 val)
150 151
{
	struct i915_wa wa = {
152
		.reg  = reg,
153
		.mask = mask,
154 155
		.val  = val,
		.read = mask,
156 157 158 159 160
	};

	_wa_add(wal, &wa);
}

161 162 163
static void
wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
{
164
	wa_write_masked_or(wal, reg, val, _MASKED_BIT_ENABLE(val));
165 166 167 168 169 170 171 172 173 174 175 176 177 178
}

static void
wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
{
	wa_write_masked_or(wal, reg, ~0, val);
}

static void
wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
{
	wa_write_masked_or(wal, reg, val, val);
}

179 180 181 182 183 184 185 186 187 188 189 190 191
static void
ignore_wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask, u32 val)
{
	struct i915_wa wa = {
		.reg  = reg,
		.mask = mask,
		.val  = val,
		/* Bonkers HW, skip verifying */
	};

	_wa_add(wal, &wa);
}

192
#define WA_SET_BIT_MASKED(addr, mask) \
193
	wa_write_masked_or(wal, (addr), (mask), _MASKED_BIT_ENABLE(mask))
194 195

#define WA_CLR_BIT_MASKED(addr, mask) \
196
	wa_write_masked_or(wal, (addr), (mask), _MASKED_BIT_DISABLE(mask))
197 198

#define WA_SET_FIELD_MASKED(addr, mask, value) \
199
	wa_write_masked_or(wal, (addr), (mask), _MASKED_FIELD((mask), (value)))
200

201 202
static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine,
				      struct i915_wa_list *wal)
203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248
{
	WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);

	/* WaDisableAsyncFlipPerfMode:bdw,chv */
	WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);

	/* WaDisablePartialInstShootdown:bdw,chv */
	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
			  PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);

	/* Use Force Non-Coherent whenever executing a 3D context. This is a
	 * workaround for for a possible hang in the unlikely event a TLB
	 * invalidation occurs during a PSD flush.
	 */
	/* WaForceEnableNonCoherent:bdw,chv */
	/* WaHdcDisableFetchWhenMasked:bdw,chv */
	WA_SET_BIT_MASKED(HDC_CHICKEN0,
			  HDC_DONOT_FETCH_MEM_WHEN_MASKED |
			  HDC_FORCE_NON_COHERENT);

	/* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
	 * "The Hierarchical Z RAW Stall Optimization allows non-overlapping
	 *  polygons in the same 8x4 pixel/sample area to be processed without
	 *  stalling waiting for the earlier ones to write to Hierarchical Z
	 *  buffer."
	 *
	 * This optimization is off by default for BDW and CHV; turn it on.
	 */
	WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);

	/* Wa4x4STCOptimizationDisable:bdw,chv */
	WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);

	/*
	 * BSpec recommends 8x4 when MSAA is used,
	 * however in practice 16x4 seems fastest.
	 *
	 * Note that PS/WM thread counts depend on the WIZ hashing
	 * disable bit, which we don't touch here, but it's good
	 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
	 */
	WA_SET_FIELD_MASKED(GEN7_GT_MODE,
			    GEN6_WIZ_HASHING_MASK,
			    GEN6_WIZ_HASHING_16x4);
}

249 250
static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine,
				     struct i915_wa_list *wal)
251
{
252
	struct drm_i915_private *i915 = engine->i915;
253

254
	gen8_ctx_workarounds_init(engine, wal);
255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273

	/* WaDisableThreadStallDopClockGating:bdw (pre-production) */
	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);

	/* WaDisableDopClockGating:bdw
	 *
	 * Also see the related UCGTCL1 write in broadwell_init_clock_gating()
	 * to disable EUTC clock gating.
	 */
	WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
			  DOP_CLOCK_GATING_DISABLE);

	WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
			  GEN8_SAMPLER_POWER_BYPASS_DIS);

	WA_SET_BIT_MASKED(HDC_CHICKEN0,
			  /* WaForceContextSaveRestoreNonCoherent:bdw */
			  HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
			  /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
274
			  (IS_BDW_GT3(i915) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
275 276
}

277 278
static void chv_ctx_workarounds_init(struct intel_engine_cs *engine,
				     struct i915_wa_list *wal)
279
{
280
	gen8_ctx_workarounds_init(engine, wal);
281 282 283 284 285 286 287 288

	/* WaDisableThreadStallDopClockGating:chv */
	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);

	/* Improve HiZ throughput on CHV. */
	WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
}

289 290
static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine,
				      struct i915_wa_list *wal)
291
{
292 293 294
	struct drm_i915_private *i915 = engine->i915;

	if (HAS_LLC(i915)) {
295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312
		/* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
		 *
		 * Must match Display Engine. See
		 * WaCompressedResourceDisplayNewHashMode.
		 */
		WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
				  GEN9_PBE_COMPRESSED_HASH_SELECTION);
		WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
				  GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR);
	}

	/* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */
	/* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */
	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
			  FLOW_CONTROL_ENABLE |
			  PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);

	/* Syncing dependencies between camera and graphics:skl,bxt,kbl */
313
	if (!IS_COFFEELAKE(i915))
314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355
		WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
				  GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);

	/* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */
	/* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */
	WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
			  GEN9_ENABLE_YV12_BUGFIX |
			  GEN9_ENABLE_GPGPU_PREEMPTION);

	/* Wa4x4STCOptimizationDisable:skl,bxt,kbl,glk,cfl */
	/* WaDisablePartialResolveInVc:skl,bxt,kbl,cfl */
	WA_SET_BIT_MASKED(CACHE_MODE_1,
			  GEN8_4x4_STC_OPTIMIZATION_DISABLE |
			  GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE);

	/* WaCcsTlbPrefetchDisable:skl,bxt,kbl,glk,cfl */
	WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
			  GEN9_CCS_TLB_PREFETCH_ENABLE);

	/* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl,cfl */
	WA_SET_BIT_MASKED(HDC_CHICKEN0,
			  HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
			  HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);

	/* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
	 * both tied to WaForceContextSaveRestoreNonCoherent
	 * in some hsds for skl. We keep the tie for all gen9. The
	 * documentation is a bit hazy and so we want to get common behaviour,
	 * even though there is no clear evidence we would need both on kbl/bxt.
	 * This area has been source of system hangs so we play it safe
	 * and mimic the skl regardless of what bspec says.
	 *
	 * Use Force Non-Coherent whenever executing a 3D context. This
	 * is a workaround for a possible hang in the unlikely event
	 * a TLB invalidation occurs during a PSD flush.
	 */

	/* WaForceEnableNonCoherent:skl,bxt,kbl,cfl */
	WA_SET_BIT_MASKED(HDC_CHICKEN0,
			  HDC_FORCE_NON_COHERENT);

	/* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */
356
	if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) || IS_COFFEELAKE(i915))
357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381
		WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
				  GEN8_SAMPLER_POWER_BYPASS_DIS);

	/* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk,cfl */
	WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);

	/*
	 * Supporting preemption with fine-granularity requires changes in the
	 * batch buffer programming. Since we can't break old userspace, we
	 * need to set our default preemption level to safe value. Userspace is
	 * still able to use more fine-grained preemption levels, since in
	 * WaEnablePreemptionGranularityControlByUMD we're whitelisting the
	 * per-ctx register. As such, WaDisable{3D,GPGPU}MidCmdPreemption are
	 * not real HW workarounds, but merely a way to start using preemption
	 * while maintaining old contract with userspace.
	 */

	/* WaDisable3DMidCmdPreemption:skl,bxt,glk,cfl,[cnl] */
	WA_CLR_BIT_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);

	/* WaDisableGPGPUMidCmdPreemption:skl,bxt,blk,cfl,[cnl] */
	WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
			    GEN9_PREEMPT_GPGPU_LEVEL_MASK,
			    GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);

382
	/* WaClearHIZ_WM_CHICKEN3:bxt,glk */
383
	if (IS_GEN9_LP(i915))
384
		WA_SET_BIT_MASKED(GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ);
385 386
}

387 388
static void skl_tune_iz_hashing(struct intel_engine_cs *engine,
				struct i915_wa_list *wal)
389
{
390
	struct drm_i915_private *i915 = engine->i915;
391 392 393 394 395 396 397 398 399 400
	u8 vals[3] = { 0, 0, 0 };
	unsigned int i;

	for (i = 0; i < 3; i++) {
		u8 ss;

		/*
		 * Only consider slices where one, and only one, subslice has 7
		 * EUs
		 */
401
		if (!is_power_of_2(RUNTIME_INFO(i915)->sseu.subslice_7eu[i]))
402 403 404 405 406 407 408 409
			continue;

		/*
		 * subslice_7eu[i] != 0 (because of the check above) and
		 * ss_max == 4 (maximum number of subslices possible per slice)
		 *
		 * ->    0 <= ss <= 3;
		 */
410
		ss = ffs(RUNTIME_INFO(i915)->sseu.subslice_7eu[i]) - 1;
411 412 413 414
		vals[i] = 3 - ss;
	}

	if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
415
		return;
416 417 418 419 420 421 422 423 424 425 426

	/* Tune IZ hashing. See intel_device_info_runtime_init() */
	WA_SET_FIELD_MASKED(GEN7_GT_MODE,
			    GEN9_IZ_HASHING_MASK(2) |
			    GEN9_IZ_HASHING_MASK(1) |
			    GEN9_IZ_HASHING_MASK(0),
			    GEN9_IZ_HASHING(2, vals[2]) |
			    GEN9_IZ_HASHING(1, vals[1]) |
			    GEN9_IZ_HASHING(0, vals[0]));
}

427 428
static void skl_ctx_workarounds_init(struct intel_engine_cs *engine,
				     struct i915_wa_list *wal)
429
{
430 431
	gen9_ctx_workarounds_init(engine, wal);
	skl_tune_iz_hashing(engine, wal);
432
}
433

434 435
static void bxt_ctx_workarounds_init(struct intel_engine_cs *engine,
				     struct i915_wa_list *wal)
436
{
437
	gen9_ctx_workarounds_init(engine, wal);
438

439 440 441 442 443 444 445
	/* WaDisableThreadStallDopClockGating:bxt */
	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
			  STALL_DOP_GATING_DISABLE);

	/* WaToEnableHwFixForPushConstHWBug:bxt */
	WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
			  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
446 447
}

448 449
static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine,
				     struct i915_wa_list *wal)
450
{
451
	struct drm_i915_private *i915 = engine->i915;
452

453
	gen9_ctx_workarounds_init(engine, wal);
454

455
	/* WaToEnableHwFixForPushConstHWBug:kbl */
456
	if (IS_KBL_REVID(i915, KBL_REVID_C0, REVID_FOREVER))
457 458
		WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
				  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
459

460 461 462 463 464
	/* WaDisableSbeCacheDispatchPortSharing:kbl */
	WA_SET_BIT_MASKED(GEN7_HALF_SLICE_CHICKEN1,
			  GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
}

465 466
static void glk_ctx_workarounds_init(struct intel_engine_cs *engine,
				     struct i915_wa_list *wal)
467
{
468
	gen9_ctx_workarounds_init(engine, wal);
469 470

	/* WaToEnableHwFixForPushConstHWBug:glk */
471 472 473 474
	WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
			  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
}

475 476
static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine,
				     struct i915_wa_list *wal)
477
{
478
	gen9_ctx_workarounds_init(engine, wal);
479 480 481 482

	/* WaToEnableHwFixForPushConstHWBug:cfl */
	WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
			  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
483

484 485 486 487 488
	/* WaDisableSbeCacheDispatchPortSharing:cfl */
	WA_SET_BIT_MASKED(GEN7_HALF_SLICE_CHICKEN1,
			  GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
}

489 490
static void cnl_ctx_workarounds_init(struct intel_engine_cs *engine,
				     struct i915_wa_list *wal)
491
{
492 493
	struct drm_i915_private *i915 = engine->i915;

494 495 496 497 498
	/* WaForceContextSaveRestoreNonCoherent:cnl */
	WA_SET_BIT_MASKED(CNL_HDC_CHICKEN0,
			  HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT);

	/* WaThrottleEUPerfToAvoidTDBackPressure:cnl(pre-prod) */
499
	if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0))
500 501 502 503 504 505 506
		WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, THROTTLE_12_5);

	/* WaDisableReplayBufferBankArbitrationOptimization:cnl */
	WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
			  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);

	/* WaDisableEnhancedSBEVertexCaching:cnl (pre-prod) */
507
	if (IS_CNL_REVID(i915, 0, CNL_REVID_B0))
508 509 510 511 512 513
		WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
				  GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE);

	/* WaPushConstantDereferenceHoldDisable:cnl */
	WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, PUSH_CONSTANT_DEREF_DISABLE);

514
	/* FtrEnableFastAnisoL1BankingFix:cnl */
515 516 517 518 519 520 521 522 523 524 525 526 527 528
	WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, CNL_FAST_ANISO_L1_BANKING_FIX);

	/* WaDisable3DMidCmdPreemption:cnl */
	WA_CLR_BIT_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);

	/* WaDisableGPGPUMidCmdPreemption:cnl */
	WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
			    GEN9_PREEMPT_GPGPU_LEVEL_MASK,
			    GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);

	/* WaDisableEarlyEOT:cnl */
	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, DISABLE_EARLY_EOT);
}

529 530
static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
				     struct i915_wa_list *wal)
531
{
532 533
	struct drm_i915_private *i915 = engine->i915;

534 535 536 537 538 539
	/* WaDisableBankHangMode:icl */
	wa_write(wal,
		 GEN8_L3CNTLREG,
		 intel_uncore_read(engine->uncore, GEN8_L3CNTLREG) |
		 GEN8_ERRDETBCTRL);

540 541 542
	/* Wa_1604370585:icl (pre-prod)
	 * Formerly known as WaPushConstantDereferenceHoldDisable
	 */
543
	if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
544 545 546 547 548 549 550 551 552 553 554 555
		WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
				  PUSH_CONSTANT_DEREF_DISABLE);

	/* WaForceEnableNonCoherent:icl
	 * This is not the same workaround as in early Gen9 platforms, where
	 * lacking this could cause system hangs, but coherency performance
	 * overhead is high and only a few compute workloads really need it
	 * (the register is whitelisted in hardware now, so UMDs can opt in
	 * for coherency if they have a good reason).
	 */
	WA_SET_BIT_MASKED(ICL_HDC_MODE, HDC_FORCE_NON_COHERENT);

556 557 558
	/* Wa_2006611047:icl (pre-prod)
	 * Formerly known as WaDisableImprovedTdlClkGating
	 */
559
	if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
560 561 562
		WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
				  GEN11_TDL_CLOCK_GATING_FIX_DISABLE);

O
Oscar Mateo 已提交
563
	/* Wa_2006665173:icl (pre-prod) */
564
	if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
O
Oscar Mateo 已提交
565 566
		WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
				  GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC);
567 568 569 570 571 572

	/* WaEnableFloatBlendOptimization:icl */
	wa_write_masked_or(wal,
			   GEN10_CACHE_MODE_SS,
			   0, /* write-only, so skip validation */
			   _MASKED_BIT_ENABLE(FLOAT_BLEND_OPTIMIZATION_ENABLE));
573 574 575 576 577

	/* WaDisableGPGPUMidThreadPreemption:icl */
	WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
			    GEN9_PREEMPT_GPGPU_LEVEL_MASK,
			    GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
578 579 580 581

	/* allow headerless messages for preemptible GPGPU context */
	WA_SET_BIT_MASKED(GEN10_SAMPLER_MODE,
			  GEN11_SAMPLER_ENABLE_HEADLESS_MSG);
582 583
}

584 585 586 587
static void
__intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
			   struct i915_wa_list *wal,
			   const char *name)
588
{
589 590
	struct drm_i915_private *i915 = engine->i915;

591 592 593 594
	if (engine->class != RENDER_CLASS)
		return;

	wa_init_start(wal, name);
595

596
	if (IS_GEN(i915, 11))
597
		icl_ctx_workarounds_init(engine, wal);
598
	else if (IS_CANNONLAKE(i915))
599
		cnl_ctx_workarounds_init(engine, wal);
600
	else if (IS_COFFEELAKE(i915))
601
		cfl_ctx_workarounds_init(engine, wal);
602
	else if (IS_GEMINILAKE(i915))
603
		glk_ctx_workarounds_init(engine, wal);
604
	else if (IS_KABYLAKE(i915))
605
		kbl_ctx_workarounds_init(engine, wal);
606
	else if (IS_BROXTON(i915))
607
		bxt_ctx_workarounds_init(engine, wal);
608
	else if (IS_SKYLAKE(i915))
609
		skl_ctx_workarounds_init(engine, wal);
610
	else if (IS_CHERRYVIEW(i915))
611
		chv_ctx_workarounds_init(engine, wal);
612
	else if (IS_BROADWELL(i915))
613
		bdw_ctx_workarounds_init(engine, wal);
614 615
	else if (INTEL_GEN(i915) < 8)
		return;
616
	else
617
		MISSING_CASE(INTEL_GEN(i915));
618

619
	wa_init_finish(wal);
620 621
}

622 623 624 625 626
void intel_engine_init_ctx_wa(struct intel_engine_cs *engine)
{
	__intel_engine_init_ctx_wa(engine, &engine->ctx_wa_list, "context");
}

627
int intel_engine_emit_ctx_wa(struct i915_request *rq)
628
{
629 630 631
	struct i915_wa_list *wal = &rq->engine->ctx_wa_list;
	struct i915_wa *wa;
	unsigned int i;
632
	u32 *cs;
633
	int ret;
634

635
	if (wal->count == 0)
636 637 638
		return 0;

	ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
639 640 641
	if (ret)
		return ret;

642
	cs = intel_ring_begin(rq, (wal->count * 2 + 2));
643 644 645
	if (IS_ERR(cs))
		return PTR_ERR(cs);

646 647 648 649
	*cs++ = MI_LOAD_REGISTER_IMM(wal->count);
	for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
		*cs++ = i915_mmio_reg_offset(wa->reg);
		*cs++ = wa->val;
650 651 652 653 654 655 656 657 658 659 660 661
	}
	*cs++ = MI_NOOP;

	intel_ring_advance(rq, cs);

	ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
	if (ret)
		return ret;

	return 0;
}

662 663
static void
gen9_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
664
{
665
	/* WaDisableKillLogic:bxt,skl,kbl */
666 667 668 669
	if (!IS_COFFEELAKE(i915))
		wa_write_or(wal,
			    GAM_ECOCHK,
			    ECOCHK_DIS_TLB);
670

671
	if (HAS_LLC(i915)) {
672 673 674 675 676
		/* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
		 *
		 * Must match Display Engine. See
		 * WaCompressedResourceDisplayNewHashMode.
		 */
677 678 679
		wa_write_or(wal,
			    MMCD_MISC_CTRL,
			    MMCD_PCLA | MMCD_HOTSPOT_EN);
680 681 682
	}

	/* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */
683 684 685
	wa_write_or(wal,
		    GAM_ECOCHK,
		    BDW_DISABLE_HDC_INVALIDATION);
686 687
}

688 689
static void
skl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
690
{
691
	gen9_gt_workarounds_init(i915, wal);
692 693

	/* WaDisableGafsUnitClkGating:skl */
694 695 696
	wa_write_or(wal,
		    GEN7_UCGCTL4,
		    GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
697 698

	/* WaInPlaceDecompressionHang:skl */
699 700 701 702
	if (IS_SKL_REVID(i915, SKL_REVID_H0, REVID_FOREVER))
		wa_write_or(wal,
			    GEN9_GAMT_ECO_REG_RW_IA,
			    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
703 704
}

705 706
static void
bxt_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
707
{
708
	gen9_gt_workarounds_init(i915, wal);
709 710

	/* WaInPlaceDecompressionHang:bxt */
711 712 713
	wa_write_or(wal,
		    GEN9_GAMT_ECO_REG_RW_IA,
		    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
714 715
}

716 717
static void
kbl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
718
{
719
	gen9_gt_workarounds_init(i915, wal);
720

721
	/* WaDisableDynamicCreditSharing:kbl */
722 723 724 725
	if (IS_KBL_REVID(i915, 0, KBL_REVID_B0))
		wa_write_or(wal,
			    GAMT_CHKN_BIT_REG,
			    GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
726

727
	/* WaDisableGafsUnitClkGating:kbl */
728 729 730
	wa_write_or(wal,
		    GEN7_UCGCTL4,
		    GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
731

732
	/* WaInPlaceDecompressionHang:kbl */
733 734 735
	wa_write_or(wal,
		    GEN9_GAMT_ECO_REG_RW_IA,
		    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
736
}
737

738 739
static void
glk_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
740
{
741
	gen9_gt_workarounds_init(i915, wal);
742 743
}

744 745
static void
cfl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
746
{
747
	gen9_gt_workarounds_init(i915, wal);
748 749

	/* WaDisableGafsUnitClkGating:cfl */
750 751 752
	wa_write_or(wal,
		    GEN7_UCGCTL4,
		    GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
753

754
	/* WaInPlaceDecompressionHang:cfl */
755 756 757
	wa_write_or(wal,
		    GEN9_GAMT_ECO_REG_RW_IA,
		    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
758
}
759

760
static void
761
wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
762
{
763
	const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
764 765
	u32 mcr_slice_subslice_mask;

766 767 768 769 770 771 772 773 774 775 776 777 778
	/*
	 * WaProgramMgsrForL3BankSpecificMmioReads: cnl,icl
	 * L3Banks could be fused off in single slice scenario. If that is
	 * the case, we might need to program MCR select to a valid L3Bank
	 * by default, to make sure we correctly read certain registers
	 * later on (in the range 0xB100 - 0xB3FF).
	 * This might be incompatible with
	 * WaProgramMgsrForCorrectSliceSpecificMmioReads.
	 * Fortunately, this should not happen in production hardware, so
	 * we only assert that this is the case (instead of implementing
	 * something more complex that requires checking the range of every
	 * MMIO read).
	 */
779
	if (INTEL_GEN(i915) >= 10 &&
780 781 782 783 784 785
	    is_power_of_2(sseu->slice_mask)) {
		/*
		 * read FUSE3 for enabled L3 Bank IDs, if L3 Bank matches
		 * enabled subslice, no need to redirect MCR packet
		 */
		u32 slice = fls(sseu->slice_mask);
786 787
		u32 fuse3 =
			intel_uncore_read(&i915->uncore, GEN10_MIRROR_FUSE3);
788
		u8 ss_mask = sseu->subslice_mask[slice];
789 790 791 792 793 794 795 796 797 798 799 800

		u8 enabled_mask = (ss_mask | ss_mask >>
				   GEN10_L3BANK_PAIR_COUNT) & GEN10_L3BANK_MASK;
		u8 disabled_mask = fuse3 & GEN10_L3BANK_MASK;

		/*
		 * Production silicon should have matched L3Bank and
		 * subslice enabled
		 */
		WARN_ON((enabled_mask & disabled_mask) != enabled_mask);
	}

801
	if (INTEL_GEN(i915) >= 11)
802 803 804 805 806
		mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK |
					  GEN11_MCR_SUBSLICE_MASK;
	else
		mcr_slice_subslice_mask = GEN8_MCR_SLICE_MASK |
					  GEN8_MCR_SUBSLICE_MASK;
807
	/*
808
	 * WaProgramMgsrForCorrectSliceSpecificMmioReads:cnl,icl
809 810 811 812 813 814 815 816 817
	 * Before any MMIO read into slice/subslice specific registers, MCR
	 * packet control register needs to be programmed to point to any
	 * enabled s/ss pair. Otherwise, incorrect values will be returned.
	 * This means each subsequent MMIO read will be forwarded to an
	 * specific s/ss combination, but this is OK since these registers
	 * are consistent across s/ss in almost all cases. In the rare
	 * occasions, such as INSTDONE, where this value is dependent
	 * on s/ss combo, the read should be done with read_subslice_reg.
	 */
818 819 820
	wa_write_masked_or(wal,
			   GEN8_MCR_SELECTOR,
			   mcr_slice_subslice_mask,
821
			   intel_calculate_mcr_s_ss_select(i915));
822 823
}

824 825
static void
cnl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
826
{
827
	wa_init_mcr(i915, wal);
828

829
	/* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */
830 831 832 833
	if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0))
		wa_write_or(wal,
			    GAMT_CHKN_BIT_REG,
			    GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT);
834 835

	/* WaInPlaceDecompressionHang:cnl */
836 837 838
	wa_write_or(wal,
		    GEN9_GAMT_ECO_REG_RW_IA,
		    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
839 840
}

841 842
static void
icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
843
{
844
	wa_init_mcr(i915, wal);
845

846
	/* WaInPlaceDecompressionHang:icl */
847 848 849
	wa_write_or(wal,
		    GEN9_GAMT_ECO_REG_RW_IA,
		    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
850

851
	/* WaModifyGamTlbPartitioning:icl */
852 853 854 855
	wa_write_masked_or(wal,
			   GEN11_GACB_PERF_CTRL,
			   GEN11_HASH_CTRL_MASK,
			   GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
O
Oscar Mateo 已提交
856

O
Oscar Mateo 已提交
857 858 859
	/* Wa_1405766107:icl
	 * Formerly known as WaCL2SFHalfMaxAlloc
	 */
860 861 862 863
	wa_write_or(wal,
		    GEN11_LSN_UNSLCVC,
		    GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC |
		    GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC);
O
Oscar Mateo 已提交
864 865 866 867

	/* Wa_220166154:icl
	 * Formerly known as WaDisCtxReload
	 */
868 869 870
	wa_write_or(wal,
		    GEN8_GAMW_ECO_DEV_RW_IA,
		    GAMW_ECO_DEV_CTX_RELOAD_DISABLE);
O
Oscar Mateo 已提交
871 872

	/* Wa_1405779004:icl (pre-prod) */
873 874 875 876
	if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
		wa_write_or(wal,
			    SLICE_UNIT_LEVEL_CLKGATE,
			    MSCUNIT_CLKGATE_DIS);
O
Oscar Mateo 已提交
877 878

	/* Wa_1406680159:icl */
879 880 881
	wa_write_or(wal,
		    SUBSLICE_UNIT_LEVEL_CLKGATE,
		    GWUNIT_CLKGATE_DIS);
O
Oscar Mateo 已提交
882

O
Oscar Mateo 已提交
883
	/* Wa_1406838659:icl (pre-prod) */
884 885 886 887
	if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
		wa_write_or(wal,
			    INF_UNIT_LEVEL_CLKGATE,
			    CGPSF_CLKGATE_DIS);
888

O
Oscar Mateo 已提交
889 890 891
	/* Wa_1406463099:icl
	 * Formerly known as WaGamTlbPendError
	 */
892 893 894
	wa_write_or(wal,
		    GAMT_CHKN_BIT_REG,
		    GAMT_CHKN_DISABLE_L3_COH_PIPE);
895 896
}

897 898
static void
gt_init_workarounds(struct drm_i915_private *i915, struct i915_wa_list *wal)
899
{
900
	if (IS_GEN(i915, 11))
901
		icl_gt_workarounds_init(i915, wal);
902
	else if (IS_CANNONLAKE(i915))
903
		cnl_gt_workarounds_init(i915, wal);
904 905 906 907 908 909 910 911 912 913 914 915
	else if (IS_COFFEELAKE(i915))
		cfl_gt_workarounds_init(i915, wal);
	else if (IS_GEMINILAKE(i915))
		glk_gt_workarounds_init(i915, wal);
	else if (IS_KABYLAKE(i915))
		kbl_gt_workarounds_init(i915, wal);
	else if (IS_BROXTON(i915))
		bxt_gt_workarounds_init(i915, wal);
	else if (IS_SKYLAKE(i915))
		skl_gt_workarounds_init(i915, wal);
	else if (INTEL_GEN(i915) <= 8)
		return;
916
	else
917
		MISSING_CASE(INTEL_GEN(i915));
918 919 920 921 922
}

void intel_gt_init_workarounds(struct drm_i915_private *i915)
{
	struct i915_wa_list *wal = &i915->gt_wa_list;
923

924 925
	wa_init_start(wal, "GT");
	gt_init_workarounds(i915, wal);
926 927 928 929
	wa_init_finish(wal);
}

static enum forcewake_domains
930
wal_get_fw_for_rmw(struct intel_uncore *uncore, const struct i915_wa_list *wal)
931 932 933 934 935 936
{
	enum forcewake_domains fw = 0;
	struct i915_wa *wa;
	unsigned int i;

	for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
937
		fw |= intel_uncore_forcewake_for_reg(uncore,
938 939 940 941 942 943 944
						     wa->reg,
						     FW_REG_READ |
						     FW_REG_WRITE);

	return fw;
}

945 946 947
static bool
wa_verify(const struct i915_wa *wa, u32 cur, const char *name, const char *from)
{
948
	if ((cur ^ wa->val) & wa->read) {
949
		DRM_ERROR("%s workaround lost on %s! (%x=%x/%x, expected %x, mask=%x)\n",
950 951 952
			  name, from, i915_mmio_reg_offset(wa->reg),
			  cur, cur & wa->read,
			  wa->val, wa->mask);
953 954 955 956 957 958 959

		return false;
	}

	return true;
}

960
static void
961
wa_list_apply(struct intel_uncore *uncore, const struct i915_wa_list *wal)
962 963 964 965 966 967 968 969 970
{
	enum forcewake_domains fw;
	unsigned long flags;
	struct i915_wa *wa;
	unsigned int i;

	if (!wal->count)
		return;

971
	fw = wal_get_fw_for_rmw(uncore, wal);
972

973 974
	spin_lock_irqsave(&uncore->lock, flags);
	intel_uncore_forcewake_get__locked(uncore, fw);
975 976

	for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
977
		intel_uncore_rmw_fw(uncore, wa->reg, wa->mask, wa->val);
978 979 980 981
		if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
			wa_verify(wa,
				  intel_uncore_read_fw(uncore, wa->reg),
				  wal->name, "application");
982 983
	}

984 985
	intel_uncore_forcewake_put__locked(uncore, fw);
	spin_unlock_irqrestore(&uncore->lock, flags);
986 987
}

988
void intel_gt_apply_workarounds(struct intel_gt *gt)
989
{
990
	wa_list_apply(gt->uncore, &gt->i915->gt_wa_list);
991 992
}

993
static bool wa_list_verify(struct intel_uncore *uncore,
994 995 996 997 998 999 1000 1001
			   const struct i915_wa_list *wal,
			   const char *from)
{
	struct i915_wa *wa;
	unsigned int i;
	bool ok = true;

	for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
1002 1003 1004
		ok &= wa_verify(wa,
				intel_uncore_read(uncore, wa->reg),
				wal->name, from);
1005 1006 1007 1008

	return ok;
}

1009
bool intel_gt_verify_workarounds(struct intel_gt *gt, const char *from)
1010
{
1011
	return wa_list_verify(gt->uncore, &gt->i915->gt_wa_list, from);
1012 1013
}

1014
static void
1015
whitelist_reg_ext(struct i915_wa_list *wal, i915_reg_t reg, u32 flags)
1016
{
1017 1018 1019
	struct i915_wa wa = {
		.reg = reg
	};
1020

1021 1022
	if (GEM_DEBUG_WARN_ON(wal->count >= RING_MAX_NONPRIV_SLOTS))
		return;
1023

1024
	wa.reg.reg |= flags;
1025
	_wa_add(wal, &wa);
1026 1027
}

1028 1029 1030 1031 1032 1033
static void
whitelist_reg(struct i915_wa_list *wal, i915_reg_t reg)
{
	whitelist_reg_ext(wal, reg, RING_FORCE_TO_NONPRIV_RW);
}

1034
static void gen9_whitelist_build(struct i915_wa_list *w)
1035 1036
{
	/* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
1037
	whitelist_reg(w, GEN9_CTX_PREEMPT_REG);
1038 1039

	/* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */
1040
	whitelist_reg(w, GEN8_CS_CHICKEN1);
1041 1042

	/* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk,cfl */
1043
	whitelist_reg(w, GEN8_HDC_CHICKEN1);
1044 1045
}

1046
static void skl_whitelist_build(struct intel_engine_cs *engine)
1047
{
1048 1049 1050 1051 1052
	struct i915_wa_list *w = &engine->whitelist;

	if (engine->class != RENDER_CLASS)
		return;

1053
	gen9_whitelist_build(w);
1054 1055

	/* WaDisableLSQCROPERFforOCL:skl */
1056
	whitelist_reg(w, GEN8_L3SQCREG4);
1057 1058
}

1059
static void bxt_whitelist_build(struct intel_engine_cs *engine)
1060
{
1061 1062 1063 1064
	if (engine->class != RENDER_CLASS)
		return;

	gen9_whitelist_build(&engine->whitelist);
1065 1066
}

1067
static void kbl_whitelist_build(struct intel_engine_cs *engine)
1068
{
1069 1070 1071 1072 1073
	struct i915_wa_list *w = &engine->whitelist;

	if (engine->class != RENDER_CLASS)
		return;

1074
	gen9_whitelist_build(w);
1075

1076
	/* WaDisableLSQCROPERFforOCL:kbl */
1077
	whitelist_reg(w, GEN8_L3SQCREG4);
1078 1079
}

1080
static void glk_whitelist_build(struct intel_engine_cs *engine)
1081
{
1082 1083 1084 1085 1086
	struct i915_wa_list *w = &engine->whitelist;

	if (engine->class != RENDER_CLASS)
		return;

1087
	gen9_whitelist_build(w);
1088

1089
	/* WA #0862: Userspace has to set "Barrier Mode" to avoid hangs. */
1090
	whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
1091
}
1092

1093
static void cfl_whitelist_build(struct intel_engine_cs *engine)
1094
{
1095 1096 1097 1098
	if (engine->class != RENDER_CLASS)
		return;

	gen9_whitelist_build(&engine->whitelist);
1099 1100
}

1101
static void cnl_whitelist_build(struct intel_engine_cs *engine)
1102
{
1103 1104 1105 1106 1107
	struct i915_wa_list *w = &engine->whitelist;

	if (engine->class != RENDER_CLASS)
		return;

1108
	/* WaEnablePreemptionGranularityControlByUMD:cnl */
1109 1110 1111
	whitelist_reg(w, GEN8_CS_CHICKEN1);
}

1112
static void icl_whitelist_build(struct intel_engine_cs *engine)
1113
{
1114 1115
	struct i915_wa_list *w = &engine->whitelist;

1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142
	switch (engine->class) {
	case RENDER_CLASS:
		/* WaAllowUMDToModifyHalfSliceChicken7:icl */
		whitelist_reg(w, GEN9_HALF_SLICE_CHICKEN7);

		/* WaAllowUMDToModifySamplerMode:icl */
		whitelist_reg(w, GEN10_SAMPLER_MODE);

		/* WaEnableStateCacheRedirectToCS:icl */
		whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
		break;

	case VIDEO_DECODE_CLASS:
		/* hucStatusRegOffset */
		whitelist_reg_ext(w, _MMIO(0x2000 + engine->mmio_base),
				  RING_FORCE_TO_NONPRIV_RD);
		/* hucUKernelHdrInfoRegOffset */
		whitelist_reg_ext(w, _MMIO(0x2014 + engine->mmio_base),
				  RING_FORCE_TO_NONPRIV_RD);
		/* hucStatus2RegOffset */
		whitelist_reg_ext(w, _MMIO(0x23B0 + engine->mmio_base),
				  RING_FORCE_TO_NONPRIV_RD);
		break;

	default:
		break;
	}
1143 1144
}

1145
void intel_engine_init_whitelist(struct intel_engine_cs *engine)
1146 1147
{
	struct drm_i915_private *i915 = engine->i915;
1148
	struct i915_wa_list *w = &engine->whitelist;
1149

1150
	wa_init_start(w, "whitelist");
1151

1152
	if (IS_GEN(i915, 11))
1153
		icl_whitelist_build(engine);
1154
	else if (IS_CANNONLAKE(i915))
1155
		cnl_whitelist_build(engine);
1156
	else if (IS_COFFEELAKE(i915))
1157
		cfl_whitelist_build(engine);
1158
	else if (IS_GEMINILAKE(i915))
1159
		glk_whitelist_build(engine);
1160
	else if (IS_KABYLAKE(i915))
1161
		kbl_whitelist_build(engine);
1162
	else if (IS_BROXTON(i915))
1163
		bxt_whitelist_build(engine);
1164
	else if (IS_SKYLAKE(i915))
1165
		skl_whitelist_build(engine);
1166 1167
	else if (INTEL_GEN(i915) <= 8)
		return;
1168 1169
	else
		MISSING_CASE(INTEL_GEN(i915));
1170

1171
	wa_init_finish(w);
1172 1173
}

1174
void intel_engine_apply_whitelist(struct intel_engine_cs *engine)
1175
{
1176
	const struct i915_wa_list *wal = &engine->whitelist;
1177
	struct intel_uncore *uncore = engine->uncore;
1178
	const u32 base = engine->mmio_base;
1179
	struct i915_wa *wa;
1180 1181
	unsigned int i;

1182
	if (!wal->count)
1183
		return;
1184

1185
	for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
1186 1187 1188
		intel_uncore_write(uncore,
				   RING_FORCE_TO_NONPRIV(base, i),
				   i915_mmio_reg_offset(wa->reg));
1189

1190 1191
	/* And clear the rest just in case of garbage */
	for (; i < RING_MAX_NONPRIV_SLOTS; i++)
1192 1193 1194
		intel_uncore_write(uncore,
				   RING_FORCE_TO_NONPRIV(base, i),
				   i915_mmio_reg_offset(RING_NOPID(base)));
1195 1196
}

1197 1198
static void
rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
1199 1200 1201
{
	struct drm_i915_private *i915 = engine->i915;

1202
	if (IS_GEN(i915, 11)) {
1203 1204 1205 1206 1207 1208
		/* This is not an Wa. Enable for better image quality */
		wa_masked_en(wal,
			     _3D_CHICKEN3,
			     _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE);

		/* WaPipelineFlushCoherentLines:icl */
1209 1210 1211 1212
		ignore_wa_write_or(wal,
				   GEN8_L3SQCREG4,
				   GEN8_LQSC_FLUSH_COHERENT_LINES,
				   GEN8_LQSC_FLUSH_COHERENT_LINES);
1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238

		/*
		 * Wa_1405543622:icl
		 * Formerly known as WaGAPZPriorityScheme
		 */
		wa_write_or(wal,
			    GEN8_GARBCNTL,
			    GEN11_ARBITRATION_PRIO_ORDER_MASK);

		/*
		 * Wa_1604223664:icl
		 * Formerly known as WaL3BankAddressHashing
		 */
		wa_write_masked_or(wal,
				   GEN8_GARBCNTL,
				   GEN11_HASH_CTRL_EXCL_MASK,
				   GEN11_HASH_CTRL_EXCL_BIT0);
		wa_write_masked_or(wal,
				   GEN11_GLBLINVL,
				   GEN11_BANK_HASH_ADDR_EXCL_MASK,
				   GEN11_BANK_HASH_ADDR_EXCL_BIT0);

		/*
		 * Wa_1405733216:icl
		 * Formerly known as WaDisableCleanEvicts
		 */
1239 1240 1241 1242
		ignore_wa_write_or(wal,
				   GEN8_L3SQCREG4,
				   GEN11_LQSC_CLEAN_EVICT_DISABLE,
				   GEN11_LQSC_CLEAN_EVICT_DISABLE);
1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258

		/* WaForwardProgressSoftReset:icl */
		wa_write_or(wal,
			    GEN10_SCRATCH_LNCF2,
			    PMFLUSHDONE_LNICRSDROP |
			    PMFLUSH_GAPL3UNBLOCK |
			    PMFLUSHDONE_LNEBLK);

		/* Wa_1406609255:icl (pre-prod) */
		if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
			wa_write_or(wal,
				    GEN7_SARCHKMD,
				    GEN7_DISABLE_DEMAND_PREFETCH |
				    GEN7_DISABLE_SAMPLER_PREFETCH);
	}

1259 1260
	if (IS_GEN_RANGE(i915, 9, 11)) {
		/* FtrPerCtxtPreemptionGranularityControl:skl,bxt,kbl,cfl,cnl,icl */
1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279
		wa_masked_en(wal,
			     GEN7_FF_SLICE_CS_CHICKEN1,
			     GEN9_FFSC_PERCTX_PREEMPT_CTRL);
	}

	if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) || IS_COFFEELAKE(i915)) {
		/* WaEnableGapsTsvCreditFix:skl,kbl,cfl */
		wa_write_or(wal,
			    GEN8_GARBCNTL,
			    GEN9_GAPS_TSV_CREDIT_DISABLE);
	}

	if (IS_BROXTON(i915)) {
		/* WaDisablePooledEuLoadBalancingFix:bxt */
		wa_masked_en(wal,
			     FF_SLICE_CS_CHICKEN2,
			     GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
	}

1280
	if (IS_GEN(i915, 9)) {
1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305
		/* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
		wa_masked_en(wal,
			     GEN9_CSFE_CHICKEN1_RCS,
			     GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE);

		/* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
		wa_write_or(wal,
			    BDW_SCRATCH1,
			    GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);

		/* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
		if (IS_GEN9_LP(i915))
			wa_write_masked_or(wal,
					   GEN8_L3SQCREG1,
					   L3_PRIO_CREDITS_MASK,
					   L3_GENERAL_PRIO_CREDITS(62) |
					   L3_HIGH_PRIO_CREDITS(2));

		/* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
		wa_write_or(wal,
			    GEN8_L3SQCREG4,
			    GEN8_LQSC_FLUSH_COHERENT_LINES);
	}
}

1306 1307
static void
xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318
{
	struct drm_i915_private *i915 = engine->i915;

	/* WaKBLVECSSemaphoreWaitPoll:kbl */
	if (IS_KBL_REVID(i915, KBL_REVID_A0, KBL_REVID_E0)) {
		wa_write(wal,
			 RING_SEMA_WAIT_POLL(engine->mmio_base),
			 1);
	}
}

1319 1320 1321 1322 1323 1324
static void
engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal)
{
	if (I915_SELFTEST_ONLY(INTEL_GEN(engine->i915) < 8))
		return;

1325
	if (engine->id == RCS0)
1326 1327 1328 1329 1330
		rcs_engine_wa_init(engine, wal);
	else
		xcs_engine_wa_init(engine, wal);
}

1331 1332 1333 1334 1335 1336 1337 1338
void intel_engine_init_workarounds(struct intel_engine_cs *engine)
{
	struct i915_wa_list *wal = &engine->wa_list;

	if (GEM_WARN_ON(INTEL_GEN(engine->i915) < 8))
		return;

	wa_init_start(wal, engine->name);
1339
	engine_init_workarounds(engine, wal);
1340 1341 1342 1343 1344
	wa_init_finish(wal);
}

void intel_engine_apply_workarounds(struct intel_engine_cs *engine)
{
1345
	wa_list_apply(engine->uncore, &engine->wa_list);
1346 1347
}

1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408
static struct i915_vma *
create_scratch(struct i915_address_space *vm, int count)
{
	struct drm_i915_gem_object *obj;
	struct i915_vma *vma;
	unsigned int size;
	int err;

	size = round_up(count * sizeof(u32), PAGE_SIZE);
	obj = i915_gem_object_create_internal(vm->i915, size);
	if (IS_ERR(obj))
		return ERR_CAST(obj);

	i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);

	vma = i915_vma_instance(obj, vm, NULL);
	if (IS_ERR(vma)) {
		err = PTR_ERR(vma);
		goto err_obj;
	}

	err = i915_vma_pin(vma, 0, 0,
			   i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER);
	if (err)
		goto err_obj;

	return vma;

err_obj:
	i915_gem_object_put(obj);
	return ERR_PTR(err);
}

static int
wa_list_srm(struct i915_request *rq,
	    const struct i915_wa_list *wal,
	    struct i915_vma *vma)
{
	const struct i915_wa *wa;
	unsigned int i;
	u32 srm, *cs;

	srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
	if (INTEL_GEN(rq->i915) >= 8)
		srm++;

	cs = intel_ring_begin(rq, 4 * wal->count);
	if (IS_ERR(cs))
		return PTR_ERR(cs);

	for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
		*cs++ = srm;
		*cs++ = i915_mmio_reg_offset(wa->reg);
		*cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
		*cs++ = 0;
	}
	intel_ring_advance(rq, cs);

	return 0;
}

1409
static int engine_wa_list_verify(struct intel_context *ce,
1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422
				 const struct i915_wa_list * const wal,
				 const char *from)
{
	const struct i915_wa *wa;
	struct i915_request *rq;
	struct i915_vma *vma;
	unsigned int i;
	u32 *results;
	int err;

	if (!wal->count)
		return 0;

1423
	vma = create_scratch(&ce->engine->gt->ggtt->vm, wal->count);
1424 1425 1426
	if (IS_ERR(vma))
		return PTR_ERR(vma);

1427
	rq = intel_context_create_request(ce);
1428 1429 1430 1431 1432 1433 1434 1435 1436 1437
	if (IS_ERR(rq)) {
		err = PTR_ERR(rq);
		goto err_vma;
	}

	err = wa_list_srm(rq, wal, vma);
	if (err)
		goto err_vma;

	i915_request_add(rq);
1438
	if (i915_request_wait(rq, 0, HZ / 5) < 0) {
1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464
		err = -ETIME;
		goto err_vma;
	}

	results = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
	if (IS_ERR(results)) {
		err = PTR_ERR(results);
		goto err_vma;
	}

	err = 0;
	for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
		if (!wa_verify(wa, results[i], wal->name, from))
			err = -ENXIO;

	i915_gem_object_unpin_map(vma->obj);

err_vma:
	i915_vma_unpin(vma);
	i915_vma_put(vma);
	return err;
}

int intel_engine_verify_workarounds(struct intel_engine_cs *engine,
				    const char *from)
{
1465 1466 1467
	return engine_wa_list_verify(engine->kernel_context,
				     &engine->wa_list,
				     from);
1468 1469
}

1470
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1471
#include "selftest_workarounds.c"
1472
#endif