selftest_workarounds.c 27.2 KB
Newer Older
1 2 3 4 5 6
/*
 * SPDX-License-Identifier: MIT
 *
 * Copyright © 2018 Intel Corporation
 */

7
#include "gem/i915_gem_pm.h"
8
#include "gt/intel_engine_user.h"
9
#include "gt/intel_gt.h"
10 11
#include "i915_selftest.h"
#include "intel_reset.h"
12

13 14 15 16
#include "selftests/igt_flush_test.h"
#include "selftests/igt_reset.h"
#include "selftests/igt_spinner.h"
#include "selftests/mock_drm.h"
17

18 19 20
#include "gem/selftests/igt_gem_utils.h"
#include "gem/selftests/mock_context.h"

21 22 23 24 25 26
static const struct wo_register {
	enum intel_platform platform;
	u32 reg;
} wo_registers[] = {
	{ INTEL_GEMINILAKE, 0x731c }
};
27

28 29 30 31
struct wa_lists {
	struct i915_wa_list gt_wa_list;
	struct {
		struct i915_wa_list wa_list;
32
		struct i915_wa_list ctx_wa_list;
33 34 35
	} engine[I915_NUM_ENGINES];
};

36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
static int request_add_sync(struct i915_request *rq, int err)
{
	i915_request_get(rq);
	i915_request_add(rq);
	if (i915_request_wait(rq, 0, HZ / 5) < 0)
		err = -EIO;
	i915_request_put(rq);

	return err;
}

static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
{
	int err = 0;

	i915_request_get(rq);
	i915_request_add(rq);
	if (spin && !igt_wait_for_spinner(spin, rq))
		err = -ETIMEDOUT;
	i915_request_put(rq);

	return err;
}

60
static void
61
reference_lists_init(struct intel_gt *gt, struct wa_lists *lists)
62 63 64 65 66 67
{
	struct intel_engine_cs *engine;
	enum intel_engine_id id;

	memset(lists, 0, sizeof(*lists));

68
	wa_init_start(&lists->gt_wa_list, "GT_REF", "global");
69
	gt_init_workarounds(gt->i915, &lists->gt_wa_list);
70 71
	wa_init_finish(&lists->gt_wa_list);

72
	for_each_engine(engine, gt->i915, id) {
73 74
		struct i915_wa_list *wal = &lists->engine[id].wa_list;

75
		wa_init_start(wal, "REF", engine->name);
76 77
		engine_init_workarounds(engine, wal);
		wa_init_finish(wal);
78 79 80

		__intel_engine_init_ctx_wa(engine,
					   &lists->engine[id].ctx_wa_list,
81
					   "CTX_REF");
82 83 84 85
	}
}

static void
86
reference_lists_fini(struct intel_gt *gt, struct wa_lists *lists)
87 88 89 90
{
	struct intel_engine_cs *engine;
	enum intel_engine_id id;

91
	for_each_engine(engine, gt->i915, id)
92 93 94 95 96
		intel_wa_list_free(&lists->engine[id].wa_list);

	intel_wa_list_free(&lists->gt_wa_list);
}

97 98 99
static struct drm_i915_gem_object *
read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
{
100
	const u32 base = engine->mmio_base;
101 102 103 104 105 106 107 108 109 110 111
	struct drm_i915_gem_object *result;
	struct i915_request *rq;
	struct i915_vma *vma;
	u32 srm, *cs;
	int err;
	int i;

	result = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
	if (IS_ERR(result))
		return result;

112
	i915_gem_object_set_cache_coherency(result, I915_CACHE_LLC);
113 114 115 116 117 118 119

	cs = i915_gem_object_pin_map(result, I915_MAP_WB);
	if (IS_ERR(cs)) {
		err = PTR_ERR(cs);
		goto err_obj;
	}
	memset(cs, 0xc5, PAGE_SIZE);
120
	i915_gem_object_flush_map(result);
121 122
	i915_gem_object_unpin_map(result);

123
	vma = i915_vma_instance(result, &engine->gt->ggtt->vm, NULL);
124 125 126 127 128 129 130 131 132
	if (IS_ERR(vma)) {
		err = PTR_ERR(vma);
		goto err_obj;
	}

	err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
	if (err)
		goto err_obj;

133
	rq = igt_request_alloc(ctx, engine);
134 135 136 137 138
	if (IS_ERR(rq)) {
		err = PTR_ERR(rq);
		goto err_pin;
	}

139
	i915_vma_lock(vma);
140 141 142
	err = i915_request_await_object(rq, vma->obj, true);
	if (err == 0)
		err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
143
	i915_vma_unlock(vma);
144 145 146
	if (err)
		goto err_req;

147 148 149 150 151
	srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
	if (INTEL_GEN(ctx->i915) >= 8)
		srm++;

	cs = intel_ring_begin(rq, 4 * RING_MAX_NONPRIV_SLOTS);
152 153 154 155 156
	if (IS_ERR(cs)) {
		err = PTR_ERR(cs);
		goto err_req;
	}

157 158 159 160 161 162 163 164
	for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
		*cs++ = srm;
		*cs++ = i915_mmio_reg_offset(RING_FORCE_TO_NONPRIV(base, i));
		*cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
		*cs++ = 0;
	}
	intel_ring_advance(rq, cs);

165
	i915_request_add(rq);
166 167 168 169
	i915_vma_unpin(vma);

	return result;

170 171
err_req:
	i915_request_add(rq);
172 173 174 175 176 177 178
err_pin:
	i915_vma_unpin(vma);
err_obj:
	i915_gem_object_put(result);
	return ERR_PTR(err);
}

179 180
static u32
get_whitelist_reg(const struct intel_engine_cs *engine, unsigned int i)
181
{
182 183 184 185 186
	i915_reg_t reg = i < engine->whitelist.count ?
			 engine->whitelist.list[i].reg :
			 RING_NOPID(engine->mmio_base);

	return i915_mmio_reg_offset(reg);
187 188
}

189 190
static void
print_results(const struct intel_engine_cs *engine, const u32 *results)
191 192 193 194
{
	unsigned int i;

	for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
195
		u32 expected = get_whitelist_reg(engine, i);
196 197 198 199 200 201 202
		u32 actual = results[i];

		pr_info("RING_NONPRIV[%d]: expected 0x%08x, found 0x%08x\n",
			i, expected, actual);
	}
}

203
static int check_whitelist(struct i915_gem_context *ctx,
204 205 206
			   struct intel_engine_cs *engine)
{
	struct drm_i915_gem_object *results;
207
	struct intel_wedge_me wedge;
208 209 210 211 212 213 214 215
	u32 *vaddr;
	int err;
	int i;

	results = read_nonprivs(ctx, engine);
	if (IS_ERR(results))
		return PTR_ERR(results);

216
	err = 0;
217
	i915_gem_object_lock(results);
218
	intel_wedge_on_timeout(&wedge, engine->gt, HZ / 5) /* safety net! */
219
		err = i915_gem_object_set_to_cpu_domain(results, false);
220
	i915_gem_object_unlock(results);
221
	if (intel_gt_is_wedged(engine->gt))
222
		err = -EIO;
223 224 225 226 227 228 229 230 231 232
	if (err)
		goto out_put;

	vaddr = i915_gem_object_pin_map(results, I915_MAP_WB);
	if (IS_ERR(vaddr)) {
		err = PTR_ERR(vaddr);
		goto out_put;
	}

	for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
233
		u32 expected = get_whitelist_reg(engine, i);
234 235 236
		u32 actual = vaddr[i];

		if (expected != actual) {
237
			print_results(engine, vaddr);
238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253
			pr_err("Invalid RING_NONPRIV[%d], expected 0x%08x, found 0x%08x\n",
			       i, expected, actual);

			err = -EINVAL;
			break;
		}
	}

	i915_gem_object_unpin_map(results);
out_put:
	i915_gem_object_put(results);
	return err;
}

static int do_device_reset(struct intel_engine_cs *engine)
{
254
	intel_gt_reset(engine->gt, engine->mask, "live_workarounds");
255 256 257 258 259
	return 0;
}

static int do_engine_reset(struct intel_engine_cs *engine)
{
260
	return intel_engine_reset(engine, "live_workarounds");
261 262
}

263 264 265
static int
switch_to_scratch_context(struct intel_engine_cs *engine,
			  struct igt_spinner *spin)
266 267
{
	struct i915_gem_context *ctx;
268
	struct intel_context *ce;
269
	struct i915_request *rq;
270
	int err = 0;
271 272 273 274 275

	ctx = kernel_context(engine->i915);
	if (IS_ERR(ctx))
		return PTR_ERR(ctx);

276 277
	GEM_BUG_ON(i915_gem_context_is_bannable(ctx));

278
	ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
279 280
	GEM_BUG_ON(IS_ERR(ce));

281
	rq = igt_spinner_create_request(spin, ce, MI_NOOP);
282

283
	intel_context_put(ce);
284 285 286 287 288 289

	if (IS_ERR(rq)) {
		spin = NULL;
		err = PTR_ERR(rq);
		goto err;
	}
290

291
	err = request_add_spin(rq, spin);
292 293 294 295
err:
	if (err && spin)
		igt_spinner_end(spin);

296
	kernel_context_close(ctx);
297
	return err;
298 299 300 301 302 303
}

static int check_whitelist_across_reset(struct intel_engine_cs *engine,
					int (*reset)(struct intel_engine_cs *),
					const char *name)
{
304
	struct drm_i915_private *i915 = engine->i915;
305
	struct i915_gem_context *ctx, *tmp;
306
	struct igt_spinner spin;
307
	intel_wakeref_t wakeref;
308 309
	int err;

310 311
	pr_info("Checking %d whitelisted registers on %s (RING_NONPRIV) [%s]\n",
		engine->whitelist.count, engine->name, name);
312 313

	ctx = kernel_context(i915);
314 315 316
	if (IS_ERR(ctx))
		return PTR_ERR(ctx);

317
	err = igt_spinner_init(&spin, engine->gt);
318 319 320
	if (err)
		goto out_ctx;

321
	err = check_whitelist(ctx, engine);
322 323
	if (err) {
		pr_err("Invalid whitelist *before* %s reset!\n", name);
324
		goto out_spin;
325 326
	}

327
	err = switch_to_scratch_context(engine, &spin);
328
	if (err)
329
		goto out_spin;
330

331
	with_intel_runtime_pm(engine->uncore->rpm, wakeref)
332
		err = reset(engine);
333

334
	igt_spinner_end(&spin);
335

336 337
	if (err) {
		pr_err("%s reset failed\n", name);
338
		goto out_spin;
339 340
	}

341
	err = check_whitelist(ctx, engine);
342 343 344
	if (err) {
		pr_err("Whitelist not preserved in context across %s reset!\n",
		       name);
345
		goto out_spin;
346 347
	}

348 349 350 351 352
	tmp = kernel_context(i915);
	if (IS_ERR(tmp)) {
		err = PTR_ERR(tmp);
		goto out_spin;
	}
353
	kernel_context_close(ctx);
354
	ctx = tmp;
355

356
	err = check_whitelist(ctx, engine);
357 358 359
	if (err) {
		pr_err("Invalid whitelist *after* %s reset in fresh context!\n",
		       name);
360
		goto out_spin;
361 362
	}

363 364 365
out_spin:
	igt_spinner_fini(&spin);
out_ctx:
366 367 368 369
	kernel_context_close(ctx);
	return err;
}

370 371 372
static struct i915_vma *create_batch(struct i915_gem_context *ctx)
{
	struct drm_i915_gem_object *obj;
373
	struct i915_address_space *vm;
374 375 376 377 378 379 380
	struct i915_vma *vma;
	int err;

	obj = i915_gem_object_create_internal(ctx->i915, 16 * PAGE_SIZE);
	if (IS_ERR(obj))
		return ERR_CAST(obj);

381 382 383
	vm = i915_gem_context_get_vm_rcu(ctx);
	vma = i915_vma_instance(obj, vm, NULL);
	i915_vm_put(vm);
384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417
	if (IS_ERR(vma)) {
		err = PTR_ERR(vma);
		goto err_obj;
	}

	err = i915_vma_pin(vma, 0, 0, PIN_USER);
	if (err)
		goto err_obj;

	return vma;

err_obj:
	i915_gem_object_put(obj);
	return ERR_PTR(err);
}

static u32 reg_write(u32 old, u32 new, u32 rsvd)
{
	if (rsvd == 0x0000ffff) {
		old &= ~(new >> 16);
		old |= new & (new >> 16);
	} else {
		old &= ~rsvd;
		old |= new & rsvd;
	}

	return old;
}

static bool wo_register(struct intel_engine_cs *engine, u32 reg)
{
	enum intel_platform platform = INTEL_INFO(engine->i915)->platform;
	int i;

418 419 420 421
	if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
	     RING_FORCE_TO_NONPRIV_ACCESS_WR)
		return true;

422 423 424 425 426 427 428 429 430
	for (i = 0; i < ARRAY_SIZE(wo_registers); i++) {
		if (wo_registers[i].platform == platform &&
		    wo_registers[i].reg == reg)
			return true;
	}

	return false;
}

431 432
static bool ro_register(u32 reg)
{
433 434
	if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
	     RING_FORCE_TO_NONPRIV_ACCESS_RD)
435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454
		return true;

	return false;
}

static int whitelist_writable_count(struct intel_engine_cs *engine)
{
	int count = engine->whitelist.count;
	int i;

	for (i = 0; i < engine->whitelist.count; i++) {
		u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);

		if (ro_register(reg))
			count--;
	}

	return count;
}

455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483
static int check_dirty_whitelist(struct i915_gem_context *ctx,
				 struct intel_engine_cs *engine)
{
	const u32 values[] = {
		0x00000000,
		0x01010101,
		0x10100101,
		0x03030303,
		0x30300303,
		0x05050505,
		0x50500505,
		0x0f0f0f0f,
		0xf00ff00f,
		0x10101010,
		0xf0f01010,
		0x30303030,
		0xa0a03030,
		0x50505050,
		0xc0c05050,
		0xf0f0f0f0,
		0x11111111,
		0x33333333,
		0x55555555,
		0x0000ffff,
		0x00ff00ff,
		0xff0000ff,
		0xffff00ff,
		0xffffffff,
	};
484
	struct i915_address_space *vm;
485 486 487 488 489
	struct i915_vma *scratch;
	struct i915_vma *batch;
	int err = 0, i, v;
	u32 *cs, *results;

490 491 492
	vm = i915_gem_context_get_vm_rcu(ctx);
	scratch = create_scratch(vm, 2 * ARRAY_SIZE(values) + 1);
	i915_vm_put(vm);
493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508
	if (IS_ERR(scratch))
		return PTR_ERR(scratch);

	batch = create_batch(ctx);
	if (IS_ERR(batch)) {
		err = PTR_ERR(batch);
		goto out_scratch;
	}

	for (i = 0; i < engine->whitelist.count; i++) {
		u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
		u64 addr = scratch->node.start;
		struct i915_request *rq;
		u32 srm, lrm, rsvd;
		u32 expect;
		int idx;
509
		bool ro_reg;
510 511 512 513

		if (wo_register(engine, reg))
			continue;

514
		ro_reg = ro_register(reg);
515

516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572
		srm = MI_STORE_REGISTER_MEM;
		lrm = MI_LOAD_REGISTER_MEM;
		if (INTEL_GEN(ctx->i915) >= 8)
			lrm++, srm++;

		pr_debug("%s: Writing garbage to %x\n",
			 engine->name, reg);

		cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
		if (IS_ERR(cs)) {
			err = PTR_ERR(cs);
			goto out_batch;
		}

		/* SRM original */
		*cs++ = srm;
		*cs++ = reg;
		*cs++ = lower_32_bits(addr);
		*cs++ = upper_32_bits(addr);

		idx = 1;
		for (v = 0; v < ARRAY_SIZE(values); v++) {
			/* LRI garbage */
			*cs++ = MI_LOAD_REGISTER_IMM(1);
			*cs++ = reg;
			*cs++ = values[v];

			/* SRM result */
			*cs++ = srm;
			*cs++ = reg;
			*cs++ = lower_32_bits(addr + sizeof(u32) * idx);
			*cs++ = upper_32_bits(addr + sizeof(u32) * idx);
			idx++;
		}
		for (v = 0; v < ARRAY_SIZE(values); v++) {
			/* LRI garbage */
			*cs++ = MI_LOAD_REGISTER_IMM(1);
			*cs++ = reg;
			*cs++ = ~values[v];

			/* SRM result */
			*cs++ = srm;
			*cs++ = reg;
			*cs++ = lower_32_bits(addr + sizeof(u32) * idx);
			*cs++ = upper_32_bits(addr + sizeof(u32) * idx);
			idx++;
		}
		GEM_BUG_ON(idx * sizeof(u32) > scratch->size);

		/* LRM original -- don't leave garbage in the context! */
		*cs++ = lrm;
		*cs++ = reg;
		*cs++ = lower_32_bits(addr);
		*cs++ = upper_32_bits(addr);

		*cs++ = MI_BATCH_BUFFER_END;

573
		i915_gem_object_flush_map(batch->obj);
574
		i915_gem_object_unpin_map(batch->obj);
575
		intel_gt_chipset_flush(engine->gt);
576

577
		rq = igt_request_alloc(ctx, engine);
578 579 580 581 582 583 584 585 586 587 588
		if (IS_ERR(rq)) {
			err = PTR_ERR(rq);
			goto out_batch;
		}

		if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
			err = engine->emit_init_breadcrumb(rq);
			if (err)
				goto err_request;
		}

589 590 591 592 593 594 595 596
		i915_vma_lock(batch);
		err = i915_request_await_object(rq, batch->obj, false);
		if (err == 0)
			err = i915_vma_move_to_active(batch, rq, 0);
		i915_vma_unlock(batch);
		if (err)
			goto err_request;

597 598 599 600 601 602 603
		err = engine->emit_bb_start(rq,
					    batch->node.start, PAGE_SIZE,
					    0);
		if (err)
			goto err_request;

err_request:
604 605
		err = request_add_sync(rq, err);
		if (err) {
606 607
			pr_err("%s: Futzing %x timedout; cancelling test\n",
			       engine->name, reg);
608
			intel_gt_set_wedged(engine->gt);
609 610 611 612 613 614 615 616 617 618
			goto out_batch;
		}

		results = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
		if (IS_ERR(results)) {
			err = PTR_ERR(results);
			goto out_batch;
		}

		GEM_BUG_ON(values[ARRAY_SIZE(values) - 1] != 0xffffffff);
619 620 621 622 623 624 625 626 627
		if (!ro_reg) {
			/* detect write masking */
			rsvd = results[ARRAY_SIZE(values)];
			if (!rsvd) {
				pr_err("%s: Unable to write to whitelisted register %x\n",
				       engine->name, reg);
				err = -EINVAL;
				goto out_unpin;
			}
628 629 630 631 632
		}

		expect = results[0];
		idx = 1;
		for (v = 0; v < ARRAY_SIZE(values); v++) {
633 634 635 636 637
			if (ro_reg)
				expect = results[0];
			else
				expect = reg_write(expect, values[v], rsvd);

638 639 640 641 642
			if (results[idx] != expect)
				err++;
			idx++;
		}
		for (v = 0; v < ARRAY_SIZE(values); v++) {
643 644 645 646 647
			if (ro_reg)
				expect = results[0];
			else
				expect = reg_write(expect, ~values[v], rsvd);

648 649 650 651 652 653 654 655
			if (results[idx] != expect)
				err++;
			idx++;
		}
		if (err) {
			pr_err("%s: %d mismatch between values written to whitelisted register [%x], and values read back!\n",
			       engine->name, err, reg);

656 657 658 659 660 661
			if (ro_reg)
				pr_info("%s: Whitelisted read-only register: %x, original value %08x\n",
					engine->name, reg, results[0]);
			else
				pr_info("%s: Whitelisted register: %x, original value %08x, rsvd %08x\n",
					engine->name, reg, results[0], rsvd);
662 663 664 665 666 667

			expect = results[0];
			idx = 1;
			for (v = 0; v < ARRAY_SIZE(values); v++) {
				u32 w = values[v];

668 669 670 671
				if (ro_reg)
					expect = results[0];
				else
					expect = reg_write(expect, w, rsvd);
672 673 674 675 676 677 678
				pr_info("Wrote %08x, read %08x, expect %08x\n",
					w, results[idx], expect);
				idx++;
			}
			for (v = 0; v < ARRAY_SIZE(values); v++) {
				u32 w = ~values[v];

679 680 681 682
				if (ro_reg)
					expect = results[0];
				else
					expect = reg_write(expect, w, rsvd);
683 684 685 686 687 688 689 690 691 692 693 694 695
				pr_info("Wrote %08x, read %08x, expect %08x\n",
					w, results[idx], expect);
				idx++;
			}

			err = -EINVAL;
		}
out_unpin:
		i915_gem_object_unpin_map(scratch->obj);
		if (err)
			break;
	}

696
	if (igt_flush_test(ctx->i915))
697 698 699 700 701 702 703 704 705 706
		err = -EIO;
out_batch:
	i915_vma_unpin_and_release(&batch, 0);
out_scratch:
	i915_vma_unpin_and_release(&scratch, 0);
	return err;
}

static int live_dirty_whitelist(void *arg)
{
707
	struct intel_gt *gt = arg;
708 709 710 711 712 713 714 715
	struct intel_engine_cs *engine;
	struct i915_gem_context *ctx;
	enum intel_engine_id id;
	struct drm_file *file;
	int err = 0;

	/* Can the user write to the whitelisted registers? */

716
	if (INTEL_GEN(gt->i915) < 7) /* minimum requirement for LRI, SRM, LRM */
717 718
		return 0;

719
	file = mock_file(gt->i915);
720 721
	if (IS_ERR(file))
		return PTR_ERR(file);
722

723
	ctx = live_context(gt->i915, file);
724 725 726 727 728
	if (IS_ERR(ctx)) {
		err = PTR_ERR(ctx);
		goto out_file;
	}

729
	for_each_engine(engine, gt->i915, id) {
730 731 732 733 734 735 736 737 738
		if (engine->whitelist.count == 0)
			continue;

		err = check_dirty_whitelist(ctx, engine);
		if (err)
			goto out_file;
	}

out_file:
739
	mock_file_free(gt->i915, file);
740 741 742
	return err;
}

743 744
static int live_reset_whitelist(void *arg)
{
745 746 747
	struct intel_gt *gt = arg;
	struct intel_engine_cs *engine;
	enum intel_engine_id id;
748
	int err = 0;
749 750

	/* If we reset the gpu, we should not lose the RING_NONPRIV */
751
	igt_global_reset_lock(gt);
752

753 754 755
	for_each_engine(engine, gt->i915, id) {
		if (engine->whitelist.count == 0)
			continue;
756

757 758 759 760 761 762 763
		if (intel_has_reset_engine(gt)) {
			err = check_whitelist_across_reset(engine,
							   do_engine_reset,
							   "engine");
			if (err)
				goto out;
		}
764

765 766 767 768 769 770 771
		if (intel_has_gpu_reset(gt)) {
			err = check_whitelist_across_reset(engine,
							   do_device_reset,
							   "device");
			if (err)
				goto out;
		}
772 773 774
	}

out:
775
	igt_global_reset_unlock(gt);
776 777 778
	return err;
}

779 780 781 782 783 784 785 786
static int read_whitelisted_registers(struct i915_gem_context *ctx,
				      struct intel_engine_cs *engine,
				      struct i915_vma *results)
{
	struct i915_request *rq;
	int i, err = 0;
	u32 srm, *cs;

787
	rq = igt_request_alloc(ctx, engine);
788 789 790
	if (IS_ERR(rq))
		return PTR_ERR(rq);

791 792 793 794 795 796 797 798
	i915_vma_lock(results);
	err = i915_request_await_object(rq, results->obj, true);
	if (err == 0)
		err = i915_vma_move_to_active(results, rq, EXEC_OBJECT_WRITE);
	i915_vma_unlock(results);
	if (err)
		goto err_req;

799 800 801 802 803 804 805 806 807 808 809 810
	srm = MI_STORE_REGISTER_MEM;
	if (INTEL_GEN(ctx->i915) >= 8)
		srm++;

	cs = intel_ring_begin(rq, 4 * engine->whitelist.count);
	if (IS_ERR(cs)) {
		err = PTR_ERR(cs);
		goto err_req;
	}

	for (i = 0; i < engine->whitelist.count; i++) {
		u64 offset = results->node.start + sizeof(u32) * i;
811 812
		u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);

813 814
		/* Clear access permission field */
		reg &= ~RING_FORCE_TO_NONPRIV_ACCESS_MASK;
815 816

		*cs++ = srm;
817
		*cs++ = reg;
818 819 820 821 822 823
		*cs++ = lower_32_bits(offset);
		*cs++ = upper_32_bits(offset);
	}
	intel_ring_advance(rq, cs);

err_req:
824
	return request_add_sync(rq, err);
825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844
}

static int scrub_whitelisted_registers(struct i915_gem_context *ctx,
				       struct intel_engine_cs *engine)
{
	struct i915_request *rq;
	struct i915_vma *batch;
	int i, err = 0;
	u32 *cs;

	batch = create_batch(ctx);
	if (IS_ERR(batch))
		return PTR_ERR(batch);

	cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
	if (IS_ERR(cs)) {
		err = PTR_ERR(cs);
		goto err_batch;
	}

845
	*cs++ = MI_LOAD_REGISTER_IMM(whitelist_writable_count(engine));
846
	for (i = 0; i < engine->whitelist.count; i++) {
847 848 849 850 851 852
		u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);

		if (ro_register(reg))
			continue;

		*cs++ = reg;
853 854 855 856 857
		*cs++ = 0xffffffff;
	}
	*cs++ = MI_BATCH_BUFFER_END;

	i915_gem_object_flush_map(batch->obj);
858
	intel_gt_chipset_flush(engine->gt);
859

860
	rq = igt_request_alloc(ctx, engine);
861 862 863 864 865 866 867 868 869 870 871
	if (IS_ERR(rq)) {
		err = PTR_ERR(rq);
		goto err_unpin;
	}

	if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
		err = engine->emit_init_breadcrumb(rq);
		if (err)
			goto err_request;
	}

872 873 874 875 876 877 878 879
	i915_vma_lock(batch);
	err = i915_request_await_object(rq, batch->obj, false);
	if (err == 0)
		err = i915_vma_move_to_active(batch, rq, 0);
	i915_vma_unlock(batch);
	if (err)
		goto err_request;

880 881 882 883
	/* Perform the writes from an unprivileged "user" batch */
	err = engine->emit_bb_start(rq, batch->node.start, 0, 0);

err_request:
884
	err = request_add_sync(rq, err);
885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982

err_unpin:
	i915_gem_object_unpin_map(batch->obj);
err_batch:
	i915_vma_unpin_and_release(&batch, 0);
	return err;
}

struct regmask {
	i915_reg_t reg;
	unsigned long gen_mask;
};

static bool find_reg(struct drm_i915_private *i915,
		     i915_reg_t reg,
		     const struct regmask *tbl,
		     unsigned long count)
{
	u32 offset = i915_mmio_reg_offset(reg);

	while (count--) {
		if (INTEL_INFO(i915)->gen_mask & tbl->gen_mask &&
		    i915_mmio_reg_offset(tbl->reg) == offset)
			return true;
		tbl++;
	}

	return false;
}

static bool pardon_reg(struct drm_i915_private *i915, i915_reg_t reg)
{
	/* Alas, we must pardon some whitelists. Mistakes already made */
	static const struct regmask pardon[] = {
		{ GEN9_CTX_PREEMPT_REG, INTEL_GEN_MASK(9, 9) },
		{ GEN8_L3SQCREG4, INTEL_GEN_MASK(9, 9) },
	};

	return find_reg(i915, reg, pardon, ARRAY_SIZE(pardon));
}

static bool result_eq(struct intel_engine_cs *engine,
		      u32 a, u32 b, i915_reg_t reg)
{
	if (a != b && !pardon_reg(engine->i915, reg)) {
		pr_err("Whitelisted register 0x%4x not context saved: A=%08x, B=%08x\n",
		       i915_mmio_reg_offset(reg), a, b);
		return false;
	}

	return true;
}

static bool writeonly_reg(struct drm_i915_private *i915, i915_reg_t reg)
{
	/* Some registers do not seem to behave and our writes unreadable */
	static const struct regmask wo[] = {
		{ GEN9_SLICE_COMMON_ECO_CHICKEN1, INTEL_GEN_MASK(9, 9) },
	};

	return find_reg(i915, reg, wo, ARRAY_SIZE(wo));
}

static bool result_neq(struct intel_engine_cs *engine,
		       u32 a, u32 b, i915_reg_t reg)
{
	if (a == b && !writeonly_reg(engine->i915, reg)) {
		pr_err("Whitelist register 0x%4x:%08x was unwritable\n",
		       i915_mmio_reg_offset(reg), a);
		return false;
	}

	return true;
}

static int
check_whitelisted_registers(struct intel_engine_cs *engine,
			    struct i915_vma *A,
			    struct i915_vma *B,
			    bool (*fn)(struct intel_engine_cs *engine,
				       u32 a, u32 b,
				       i915_reg_t reg))
{
	u32 *a, *b;
	int i, err;

	a = i915_gem_object_pin_map(A->obj, I915_MAP_WB);
	if (IS_ERR(a))
		return PTR_ERR(a);

	b = i915_gem_object_pin_map(B->obj, I915_MAP_WB);
	if (IS_ERR(b)) {
		err = PTR_ERR(b);
		goto err_a;
	}

	err = 0;
	for (i = 0; i < engine->whitelist.count; i++) {
983 984
		const struct i915_wa *wa = &engine->whitelist.list[i];

985 986
		if (i915_mmio_reg_offset(wa->reg) &
		    RING_FORCE_TO_NONPRIV_ACCESS_RD)
987 988 989
			continue;

		if (!fn(engine, a[i], b[i], wa->reg))
990 991 992 993 994 995 996 997 998 999 1000
			err = -EINVAL;
	}

	i915_gem_object_unpin_map(B->obj);
err_a:
	i915_gem_object_unpin_map(A->obj);
	return err;
}

static int live_isolated_whitelist(void *arg)
{
1001
	struct intel_gt *gt = arg;
1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014
	struct {
		struct i915_gem_context *ctx;
		struct i915_vma *scratch[2];
	} client[2] = {};
	struct intel_engine_cs *engine;
	enum intel_engine_id id;
	int i, err = 0;

	/*
	 * Check that a write into a whitelist register works, but
	 * invisible to a second context.
	 */

1015
	if (!intel_engines_has_context_isolation(gt->i915))
1016 1017 1018
		return 0;

	for (i = 0; i < ARRAY_SIZE(client); i++) {
1019
		struct i915_address_space *vm;
1020 1021
		struct i915_gem_context *c;

1022
		c = kernel_context(gt->i915);
1023 1024 1025 1026 1027
		if (IS_ERR(c)) {
			err = PTR_ERR(c);
			goto err;
		}

1028 1029 1030
		vm = i915_gem_context_get_vm_rcu(c);

		client[i].scratch[0] = create_scratch(vm, 1024);
1031 1032
		if (IS_ERR(client[i].scratch[0])) {
			err = PTR_ERR(client[i].scratch[0]);
1033
			i915_vm_put(vm);
1034 1035 1036 1037
			kernel_context_close(c);
			goto err;
		}

1038
		client[i].scratch[1] = create_scratch(vm, 1024);
1039 1040 1041
		if (IS_ERR(client[i].scratch[1])) {
			err = PTR_ERR(client[i].scratch[1]);
			i915_vma_unpin_and_release(&client[i].scratch[0], 0);
1042
			i915_vm_put(vm);
1043 1044 1045 1046 1047
			kernel_context_close(c);
			goto err;
		}

		client[i].ctx = c;
1048
		i915_vm_put(vm);
1049 1050
	}

1051 1052 1053 1054
	for_each_engine(engine, gt->i915, id) {
		if (!engine->kernel_context->vm)
			continue;

1055
		if (!whitelist_writable_count(engine))
1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107
			continue;

		/* Read default values */
		err = read_whitelisted_registers(client[0].ctx, engine,
						 client[0].scratch[0]);
		if (err)
			goto err;

		/* Try to overwrite registers (should only affect ctx0) */
		err = scrub_whitelisted_registers(client[0].ctx, engine);
		if (err)
			goto err;

		/* Read values from ctx1, we expect these to be defaults */
		err = read_whitelisted_registers(client[1].ctx, engine,
						 client[1].scratch[0]);
		if (err)
			goto err;

		/* Verify that both reads return the same default values */
		err = check_whitelisted_registers(engine,
						  client[0].scratch[0],
						  client[1].scratch[0],
						  result_eq);
		if (err)
			goto err;

		/* Read back the updated values in ctx0 */
		err = read_whitelisted_registers(client[0].ctx, engine,
						 client[0].scratch[1]);
		if (err)
			goto err;

		/* User should be granted privilege to overwhite regs */
		err = check_whitelisted_registers(engine,
						  client[0].scratch[0],
						  client[0].scratch[1],
						  result_neq);
		if (err)
			goto err;
	}

err:
	for (i = 0; i < ARRAY_SIZE(client); i++) {
		if (!client[i].ctx)
			break;

		i915_vma_unpin_and_release(&client[i].scratch[1], 0);
		i915_vma_unpin_and_release(&client[i].scratch[0], 0);
		kernel_context_close(client[i].ctx);
	}

1108
	if (igt_flush_test(gt->i915))
1109 1110 1111 1112 1113
		err = -EIO;

	return err;
}

1114 1115 1116
static bool
verify_wa_lists(struct i915_gem_context *ctx, struct wa_lists *lists,
		const char *str)
1117
{
1118 1119 1120
	struct drm_i915_private *i915 = ctx->i915;
	struct i915_gem_engines_iter it;
	struct intel_context *ce;
1121 1122
	bool ok = true;

1123
	ok &= wa_list_verify(&i915->uncore, &lists->gt_wa_list, str);
1124

1125
	for_each_gem_engine(ce, i915_gem_context_engines(ctx), it) {
1126 1127 1128
		enum intel_engine_id id = ce->engine->id;

		ok &= engine_wa_list_verify(ce,
1129 1130
					    &lists->engine[id].wa_list,
					    str) == 0;
1131 1132 1133 1134

		ok &= engine_wa_list_verify(ce,
					    &lists->engine[id].ctx_wa_list,
					    str) == 0;
1135
	}
1136 1137 1138 1139 1140

	return ok;
}

static int
1141
live_gpu_reset_workarounds(void *arg)
1142
{
1143
	struct intel_gt *gt = arg;
1144
	struct i915_gem_context *ctx;
1145
	intel_wakeref_t wakeref;
1146
	struct wa_lists lists;
1147 1148
	bool ok;

1149
	if (!intel_has_gpu_reset(gt))
1150 1151
		return 0;

1152
	ctx = kernel_context(gt->i915);
1153 1154 1155
	if (IS_ERR(ctx))
		return PTR_ERR(ctx);

1156 1157
	i915_gem_context_lock_engines(ctx);

1158 1159
	pr_info("Verifying after GPU reset...\n");

1160 1161
	igt_global_reset_lock(gt);
	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1162

1163
	reference_lists_init(gt, &lists);
1164

1165
	ok = verify_wa_lists(ctx, &lists, "before reset");
1166 1167 1168
	if (!ok)
		goto out;

1169
	intel_gt_reset(gt, ALL_ENGINES, "live_workarounds");
1170

1171
	ok = verify_wa_lists(ctx, &lists, "after reset");
1172 1173

out:
1174
	i915_gem_context_unlock_engines(ctx);
1175
	kernel_context_close(ctx);
1176 1177 1178
	reference_lists_fini(gt, &lists);
	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
	igt_global_reset_unlock(gt);
1179 1180 1181 1182 1183

	return ok ? 0 : -ESRCH;
}

static int
1184
live_engine_reset_workarounds(void *arg)
1185
{
1186
	struct intel_gt *gt = arg;
1187
	struct i915_gem_engines_iter it;
1188
	struct i915_gem_context *ctx;
1189
	struct intel_context *ce;
1190 1191
	struct igt_spinner spin;
	struct i915_request *rq;
1192
	intel_wakeref_t wakeref;
1193
	struct wa_lists lists;
1194 1195
	int ret = 0;

1196
	if (!intel_has_reset_engine(gt))
1197 1198
		return 0;

1199
	ctx = kernel_context(gt->i915);
1200 1201 1202
	if (IS_ERR(ctx))
		return PTR_ERR(ctx);

1203 1204
	igt_global_reset_lock(gt);
	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1205

1206
	reference_lists_init(gt, &lists);
1207

1208 1209
	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
		struct intel_engine_cs *engine = ce->engine;
1210 1211 1212 1213
		bool ok;

		pr_info("Verifying after %s reset...\n", engine->name);

1214
		ok = verify_wa_lists(ctx, &lists, "before reset");
1215 1216 1217 1218 1219
		if (!ok) {
			ret = -ESRCH;
			goto err;
		}

1220
		intel_engine_reset(engine, "live_workarounds");
1221

1222
		ok = verify_wa_lists(ctx, &lists, "after idle reset");
1223 1224 1225 1226 1227
		if (!ok) {
			ret = -ESRCH;
			goto err;
		}

1228
		ret = igt_spinner_init(&spin, engine->gt);
1229 1230 1231
		if (ret)
			goto err;

1232
		rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
1233 1234 1235 1236 1237 1238
		if (IS_ERR(rq)) {
			ret = PTR_ERR(rq);
			igt_spinner_fini(&spin);
			goto err;
		}

1239 1240
		ret = request_add_spin(rq, &spin);
		if (ret) {
1241 1242 1243 1244 1245
			pr_err("Spinner failed to start\n");
			igt_spinner_fini(&spin);
			goto err;
		}

1246
		intel_engine_reset(engine, "live_workarounds");
1247 1248 1249 1250

		igt_spinner_end(&spin);
		igt_spinner_fini(&spin);

1251
		ok = verify_wa_lists(ctx, &lists, "after busy reset");
1252 1253 1254 1255 1256 1257
		if (!ok) {
			ret = -ESRCH;
			goto err;
		}
	}
err:
1258
	i915_gem_context_unlock_engines(ctx);
1259 1260 1261
	reference_lists_fini(gt, &lists);
	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
	igt_global_reset_unlock(gt);
1262 1263
	kernel_context_close(ctx);

1264
	igt_flush_test(gt->i915);
1265 1266 1267 1268

	return ret;
}

1269 1270 1271
int intel_workarounds_live_selftests(struct drm_i915_private *i915)
{
	static const struct i915_subtest tests[] = {
1272
		SUBTEST(live_dirty_whitelist),
1273
		SUBTEST(live_reset_whitelist),
1274
		SUBTEST(live_isolated_whitelist),
1275 1276
		SUBTEST(live_gpu_reset_workarounds),
		SUBTEST(live_engine_reset_workarounds),
1277 1278
	};

1279
	if (intel_gt_is_wedged(&i915->gt))
1280 1281
		return 0;

1282
	return intel_gt_live_subtests(tests, &i915->gt);
1283
}