intel_lrc.c 14.1 KB
Newer Older
1 2 3 4 5 6
/*
 * SPDX-License-Identifier: MIT
 *
 * Copyright © 2018 Intel Corporation
 */

7 8
#include "../i915_reset.h"

9
#include "../i915_selftest.h"
10
#include "igt_flush_test.h"
11
#include "igt_spinner.h"
12
#include "i915_random.h"
13 14 15 16 17 18 19 20 21

#include "mock_context.h"

static int live_sanitycheck(void *arg)
{
	struct drm_i915_private *i915 = arg;
	struct intel_engine_cs *engine;
	struct i915_gem_context *ctx;
	enum intel_engine_id id;
22
	struct igt_spinner spin;
23
	intel_wakeref_t wakeref;
24 25 26 27 28 29
	int err = -ENOMEM;

	if (!HAS_LOGICAL_RING_CONTEXTS(i915))
		return 0;

	mutex_lock(&i915->drm.struct_mutex);
30
	wakeref = intel_runtime_pm_get(i915);
31

32
	if (igt_spinner_init(&spin, i915))
33 34 35 36 37 38 39 40 41
		goto err_unlock;

	ctx = kernel_context(i915);
	if (!ctx)
		goto err_spin;

	for_each_engine(engine, i915, id) {
		struct i915_request *rq;

42
		rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP);
43 44 45 46 47 48
		if (IS_ERR(rq)) {
			err = PTR_ERR(rq);
			goto err_ctx;
		}

		i915_request_add(rq);
49
		if (!igt_wait_for_spinner(&spin, rq)) {
50 51 52 53 54 55 56
			GEM_TRACE("spinner failed to start\n");
			GEM_TRACE_DUMP();
			i915_gem_set_wedged(i915);
			err = -EIO;
			goto err_ctx;
		}

57
		igt_spinner_end(&spin);
58
		if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
59 60 61 62 63 64 65 66 67
			err = -EIO;
			goto err_ctx;
		}
	}

	err = 0;
err_ctx:
	kernel_context_close(ctx);
err_spin:
68
	igt_spinner_fini(&spin);
69
err_unlock:
70
	igt_flush_test(i915, I915_WAIT_LOCKED);
71
	intel_runtime_pm_put(i915, wakeref);
72 73 74 75 76 77 78 79
	mutex_unlock(&i915->drm.struct_mutex);
	return err;
}

static int live_preempt(void *arg)
{
	struct drm_i915_private *i915 = arg;
	struct i915_gem_context *ctx_hi, *ctx_lo;
80
	struct igt_spinner spin_hi, spin_lo;
81 82
	struct intel_engine_cs *engine;
	enum intel_engine_id id;
83
	intel_wakeref_t wakeref;
84 85 86 87 88 89
	int err = -ENOMEM;

	if (!HAS_LOGICAL_RING_PREEMPTION(i915))
		return 0;

	mutex_lock(&i915->drm.struct_mutex);
90
	wakeref = intel_runtime_pm_get(i915);
91

92
	if (igt_spinner_init(&spin_hi, i915))
93 94
		goto err_unlock;

95
	if (igt_spinner_init(&spin_lo, i915))
96 97 98 99 100
		goto err_spin_hi;

	ctx_hi = kernel_context(i915);
	if (!ctx_hi)
		goto err_spin_lo;
101 102
	ctx_hi->sched.priority =
		I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
103 104 105 106

	ctx_lo = kernel_context(i915);
	if (!ctx_lo)
		goto err_ctx_hi;
107 108
	ctx_lo->sched.priority =
		I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
109 110 111 112

	for_each_engine(engine, i915, id) {
		struct i915_request *rq;

113 114
		rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
						MI_ARB_CHECK);
115 116 117 118 119 120
		if (IS_ERR(rq)) {
			err = PTR_ERR(rq);
			goto err_ctx_lo;
		}

		i915_request_add(rq);
121
		if (!igt_wait_for_spinner(&spin_lo, rq)) {
122 123 124 125 126 127 128
			GEM_TRACE("lo spinner failed to start\n");
			GEM_TRACE_DUMP();
			i915_gem_set_wedged(i915);
			err = -EIO;
			goto err_ctx_lo;
		}

129 130
		rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
						MI_ARB_CHECK);
131
		if (IS_ERR(rq)) {
132
			igt_spinner_end(&spin_lo);
133 134 135 136 137
			err = PTR_ERR(rq);
			goto err_ctx_lo;
		}

		i915_request_add(rq);
138
		if (!igt_wait_for_spinner(&spin_hi, rq)) {
139 140 141 142 143 144 145
			GEM_TRACE("hi spinner failed to start\n");
			GEM_TRACE_DUMP();
			i915_gem_set_wedged(i915);
			err = -EIO;
			goto err_ctx_lo;
		}

146 147
		igt_spinner_end(&spin_hi);
		igt_spinner_end(&spin_lo);
148
		if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
149 150 151 152 153 154 155 156 157 158 159
			err = -EIO;
			goto err_ctx_lo;
		}
	}

	err = 0;
err_ctx_lo:
	kernel_context_close(ctx_lo);
err_ctx_hi:
	kernel_context_close(ctx_hi);
err_spin_lo:
160
	igt_spinner_fini(&spin_lo);
161
err_spin_hi:
162
	igt_spinner_fini(&spin_hi);
163
err_unlock:
164
	igt_flush_test(i915, I915_WAIT_LOCKED);
165
	intel_runtime_pm_put(i915, wakeref);
166 167 168 169 170 171 172 173
	mutex_unlock(&i915->drm.struct_mutex);
	return err;
}

static int live_late_preempt(void *arg)
{
	struct drm_i915_private *i915 = arg;
	struct i915_gem_context *ctx_hi, *ctx_lo;
174
	struct igt_spinner spin_hi, spin_lo;
175
	struct intel_engine_cs *engine;
176
	struct i915_sched_attr attr = {};
177
	enum intel_engine_id id;
178
	intel_wakeref_t wakeref;
179 180 181 182 183 184
	int err = -ENOMEM;

	if (!HAS_LOGICAL_RING_PREEMPTION(i915))
		return 0;

	mutex_lock(&i915->drm.struct_mutex);
185
	wakeref = intel_runtime_pm_get(i915);
186

187
	if (igt_spinner_init(&spin_hi, i915))
188 189
		goto err_unlock;

190
	if (igt_spinner_init(&spin_lo, i915))
191 192 193 194 195 196 197 198 199 200 201 202 203
		goto err_spin_hi;

	ctx_hi = kernel_context(i915);
	if (!ctx_hi)
		goto err_spin_lo;

	ctx_lo = kernel_context(i915);
	if (!ctx_lo)
		goto err_ctx_hi;

	for_each_engine(engine, i915, id) {
		struct i915_request *rq;

204 205
		rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
						MI_ARB_CHECK);
206 207 208 209 210 211
		if (IS_ERR(rq)) {
			err = PTR_ERR(rq);
			goto err_ctx_lo;
		}

		i915_request_add(rq);
212
		if (!igt_wait_for_spinner(&spin_lo, rq)) {
213 214 215 216
			pr_err("First context failed to start\n");
			goto err_wedged;
		}

217 218
		rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
						MI_NOOP);
219
		if (IS_ERR(rq)) {
220
			igt_spinner_end(&spin_lo);
221 222 223 224 225
			err = PTR_ERR(rq);
			goto err_ctx_lo;
		}

		i915_request_add(rq);
226
		if (igt_wait_for_spinner(&spin_hi, rq)) {
227 228 229 230
			pr_err("Second context overtook first?\n");
			goto err_wedged;
		}

231
		attr.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
232
		engine->schedule(rq, &attr);
233

234
		if (!igt_wait_for_spinner(&spin_hi, rq)) {
235 236 237 238 239
			pr_err("High priority context failed to preempt the low priority context\n");
			GEM_TRACE_DUMP();
			goto err_wedged;
		}

240 241
		igt_spinner_end(&spin_hi);
		igt_spinner_end(&spin_lo);
242
		if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
243 244 245 246 247 248 249 250 251 252 253
			err = -EIO;
			goto err_ctx_lo;
		}
	}

	err = 0;
err_ctx_lo:
	kernel_context_close(ctx_lo);
err_ctx_hi:
	kernel_context_close(ctx_hi);
err_spin_lo:
254
	igt_spinner_fini(&spin_lo);
255
err_spin_hi:
256
	igt_spinner_fini(&spin_hi);
257
err_unlock:
258
	igt_flush_test(i915, I915_WAIT_LOCKED);
259
	intel_runtime_pm_put(i915, wakeref);
260 261 262 263
	mutex_unlock(&i915->drm.struct_mutex);
	return err;

err_wedged:
264 265
	igt_spinner_end(&spin_hi);
	igt_spinner_end(&spin_lo);
266 267 268 269 270
	i915_gem_set_wedged(i915);
	err = -EIO;
	goto err_ctx_lo;
}

271 272 273 274
static int live_preempt_hang(void *arg)
{
	struct drm_i915_private *i915 = arg;
	struct i915_gem_context *ctx_hi, *ctx_lo;
275
	struct igt_spinner spin_hi, spin_lo;
276 277
	struct intel_engine_cs *engine;
	enum intel_engine_id id;
278
	intel_wakeref_t wakeref;
279 280 281 282 283 284 285 286 287
	int err = -ENOMEM;

	if (!HAS_LOGICAL_RING_PREEMPTION(i915))
		return 0;

	if (!intel_has_reset_engine(i915))
		return 0;

	mutex_lock(&i915->drm.struct_mutex);
288
	wakeref = intel_runtime_pm_get(i915);
289

290
	if (igt_spinner_init(&spin_hi, i915))
291 292
		goto err_unlock;

293
	if (igt_spinner_init(&spin_lo, i915))
294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311
		goto err_spin_hi;

	ctx_hi = kernel_context(i915);
	if (!ctx_hi)
		goto err_spin_lo;
	ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;

	ctx_lo = kernel_context(i915);
	if (!ctx_lo)
		goto err_ctx_hi;
	ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;

	for_each_engine(engine, i915, id) {
		struct i915_request *rq;

		if (!intel_engine_has_preemption(engine))
			continue;

312 313
		rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
						MI_ARB_CHECK);
314 315 316 317 318 319
		if (IS_ERR(rq)) {
			err = PTR_ERR(rq);
			goto err_ctx_lo;
		}

		i915_request_add(rq);
320
		if (!igt_wait_for_spinner(&spin_lo, rq)) {
321 322 323 324 325 326 327
			GEM_TRACE("lo spinner failed to start\n");
			GEM_TRACE_DUMP();
			i915_gem_set_wedged(i915);
			err = -EIO;
			goto err_ctx_lo;
		}

328 329
		rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
						MI_ARB_CHECK);
330
		if (IS_ERR(rq)) {
331
			igt_spinner_end(&spin_lo);
332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355
			err = PTR_ERR(rq);
			goto err_ctx_lo;
		}

		init_completion(&engine->execlists.preempt_hang.completion);
		engine->execlists.preempt_hang.inject_hang = true;

		i915_request_add(rq);

		if (!wait_for_completion_timeout(&engine->execlists.preempt_hang.completion,
						 HZ / 10)) {
			pr_err("Preemption did not occur within timeout!");
			GEM_TRACE_DUMP();
			i915_gem_set_wedged(i915);
			err = -EIO;
			goto err_ctx_lo;
		}

		set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
		i915_reset_engine(engine, NULL);
		clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);

		engine->execlists.preempt_hang.inject_hang = false;

356
		if (!igt_wait_for_spinner(&spin_hi, rq)) {
357 358 359 360 361 362 363
			GEM_TRACE("hi spinner failed to start\n");
			GEM_TRACE_DUMP();
			i915_gem_set_wedged(i915);
			err = -EIO;
			goto err_ctx_lo;
		}

364 365
		igt_spinner_end(&spin_hi);
		igt_spinner_end(&spin_lo);
366 367 368 369 370 371 372 373 374 375 376 377
		if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
			err = -EIO;
			goto err_ctx_lo;
		}
	}

	err = 0;
err_ctx_lo:
	kernel_context_close(ctx_lo);
err_ctx_hi:
	kernel_context_close(ctx_hi);
err_spin_lo:
378
	igt_spinner_fini(&spin_lo);
379
err_spin_hi:
380
	igt_spinner_fini(&spin_hi);
381 382
err_unlock:
	igt_flush_test(i915, I915_WAIT_LOCKED);
383
	intel_runtime_pm_put(i915, wakeref);
384 385 386 387
	mutex_unlock(&i915->drm.struct_mutex);
	return err;
}

388 389 390 391 392 393 394 395 396 397 398 399 400
static int random_range(struct rnd_state *rnd, int min, int max)
{
	return i915_prandom_u32_max_state(max - min, rnd) + min;
}

static int random_priority(struct rnd_state *rnd)
{
	return random_range(rnd, I915_PRIORITY_MIN, I915_PRIORITY_MAX);
}

struct preempt_smoke {
	struct drm_i915_private *i915;
	struct i915_gem_context **contexts;
401
	struct intel_engine_cs *engine;
402
	struct drm_i915_gem_object *batch;
403 404
	unsigned int ncontext;
	struct rnd_state prng;
405
	unsigned long count;
406 407 408 409 410 411 412 413
};

static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
{
	return smoke->contexts[i915_prandom_u32_max_state(smoke->ncontext,
							  &smoke->prng)];
}

414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456
static int smoke_submit(struct preempt_smoke *smoke,
			struct i915_gem_context *ctx, int prio,
			struct drm_i915_gem_object *batch)
{
	struct i915_request *rq;
	struct i915_vma *vma = NULL;
	int err = 0;

	if (batch) {
		vma = i915_vma_instance(batch, &ctx->ppgtt->vm, NULL);
		if (IS_ERR(vma))
			return PTR_ERR(vma);

		err = i915_vma_pin(vma, 0, 0, PIN_USER);
		if (err)
			return err;
	}

	ctx->sched.priority = prio;

	rq = i915_request_alloc(smoke->engine, ctx);
	if (IS_ERR(rq)) {
		err = PTR_ERR(rq);
		goto unpin;
	}

	if (vma) {
		err = rq->engine->emit_bb_start(rq,
						vma->node.start,
						PAGE_SIZE, 0);
		if (!err)
			err = i915_vma_move_to_active(vma, rq, 0);
	}

	i915_request_add(rq);

unpin:
	if (vma)
		i915_vma_unpin(vma);

	return err;
}

457 458 459 460 461 462 463 464 465
static int smoke_crescendo_thread(void *arg)
{
	struct preempt_smoke *smoke = arg;
	IGT_TIMEOUT(end_time);
	unsigned long count;

	count = 0;
	do {
		struct i915_gem_context *ctx = smoke_context(smoke);
466
		int err;
467 468

		mutex_lock(&smoke->i915->drm.struct_mutex);
469 470 471
		err = smoke_submit(smoke,
				   ctx, count % I915_PRIORITY_MAX,
				   smoke->batch);
472
		mutex_unlock(&smoke->i915->drm.struct_mutex);
473 474
		if (err)
			return err;
475 476 477 478 479 480 481 482

		count++;
	} while (!__igt_timeout(end_time, NULL));

	smoke->count = count;
	return 0;
}

483 484
static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
#define BATCH BIT(0)
485
{
486 487
	struct task_struct *tsk[I915_NUM_ENGINES] = {};
	struct preempt_smoke arg[I915_NUM_ENGINES];
488 489 490
	struct intel_engine_cs *engine;
	enum intel_engine_id id;
	unsigned long count;
491 492 493
	int err = 0;

	mutex_unlock(&smoke->i915->drm.struct_mutex);
494 495

	for_each_engine(engine, smoke->i915, id) {
496 497
		arg[id] = *smoke;
		arg[id].engine = engine;
498 499
		if (!(flags & BATCH))
			arg[id].batch = NULL;
500 501 502 503 504 505 506 507
		arg[id].count = 0;

		tsk[id] = kthread_run(smoke_crescendo_thread, &arg,
				      "igt/smoke:%d", id);
		if (IS_ERR(tsk[id])) {
			err = PTR_ERR(tsk[id]);
			break;
		}
508
		get_task_struct(tsk[id]);
509
	}
510

511 512 513
	count = 0;
	for_each_engine(engine, smoke->i915, id) {
		int status;
514

515 516
		if (IS_ERR_OR_NULL(tsk[id]))
			continue;
517

518 519 520
		status = kthread_stop(tsk[id]);
		if (status && !err)
			err = status;
521

522
		count += arg[id].count;
523 524

		put_task_struct(tsk[id]);
525 526
	}

527 528
	mutex_lock(&smoke->i915->drm.struct_mutex);

529 530
	pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
		count, flags,
531
		RUNTIME_INFO(smoke->i915)->num_rings, smoke->ncontext);
532 533 534
	return 0;
}

535
static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
536 537 538 539 540 541 542
{
	enum intel_engine_id id;
	IGT_TIMEOUT(end_time);
	unsigned long count;

	count = 0;
	do {
543
		for_each_engine(smoke->engine, smoke->i915, id) {
544
			struct i915_gem_context *ctx = smoke_context(smoke);
545
			int err;
546

547 548 549 550 551
			err = smoke_submit(smoke,
					   ctx, random_priority(&smoke->prng),
					   flags & BATCH ? smoke->batch : NULL);
			if (err)
				return err;
552 553 554 555 556

			count++;
		}
	} while (!__igt_timeout(end_time, NULL));

557 558
	pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
		count, flags,
559
		RUNTIME_INFO(smoke->i915)->num_rings, smoke->ncontext);
560 561 562 563 564 565 566 567 568 569
	return 0;
}

static int live_preempt_smoke(void *arg)
{
	struct preempt_smoke smoke = {
		.i915 = arg,
		.prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed),
		.ncontext = 1024,
	};
570
	const unsigned int phase[] = { 0, BATCH };
571
	intel_wakeref_t wakeref;
572
	int err = -ENOMEM;
573
	u32 *cs;
574 575 576 577 578 579 580 581 582 583 584 585
	int n;

	if (!HAS_LOGICAL_RING_PREEMPTION(smoke.i915))
		return 0;

	smoke.contexts = kmalloc_array(smoke.ncontext,
				       sizeof(*smoke.contexts),
				       GFP_KERNEL);
	if (!smoke.contexts)
		return -ENOMEM;

	mutex_lock(&smoke.i915->drm.struct_mutex);
586
	wakeref = intel_runtime_pm_get(smoke.i915);
587

588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607
	smoke.batch = i915_gem_object_create_internal(smoke.i915, PAGE_SIZE);
	if (IS_ERR(smoke.batch)) {
		err = PTR_ERR(smoke.batch);
		goto err_unlock;
	}

	cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB);
	if (IS_ERR(cs)) {
		err = PTR_ERR(cs);
		goto err_batch;
	}
	for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++)
		cs[n] = MI_ARB_CHECK;
	cs[n] = MI_BATCH_BUFFER_END;
	i915_gem_object_unpin_map(smoke.batch);

	err = i915_gem_object_set_to_gtt_domain(smoke.batch, false);
	if (err)
		goto err_batch;

608 609 610 611 612 613
	for (n = 0; n < smoke.ncontext; n++) {
		smoke.contexts[n] = kernel_context(smoke.i915);
		if (!smoke.contexts[n])
			goto err_ctx;
	}

614 615 616 617
	for (n = 0; n < ARRAY_SIZE(phase); n++) {
		err = smoke_crescendo(&smoke, phase[n]);
		if (err)
			goto err_ctx;
618

619 620 621 622
		err = smoke_random(&smoke, phase[n]);
		if (err)
			goto err_ctx;
	}
623 624 625 626 627 628 629 630 631 632 633

err_ctx:
	if (igt_flush_test(smoke.i915, I915_WAIT_LOCKED))
		err = -EIO;

	for (n = 0; n < smoke.ncontext; n++) {
		if (!smoke.contexts[n])
			break;
		kernel_context_close(smoke.contexts[n]);
	}

634 635 636
err_batch:
	i915_gem_object_put(smoke.batch);
err_unlock:
637
	intel_runtime_pm_put(smoke.i915, wakeref);
638 639 640 641 642 643
	mutex_unlock(&smoke.i915->drm.struct_mutex);
	kfree(smoke.contexts);

	return err;
}

644 645 646 647 648 649
int intel_execlists_live_selftests(struct drm_i915_private *i915)
{
	static const struct i915_subtest tests[] = {
		SUBTEST(live_sanitycheck),
		SUBTEST(live_preempt),
		SUBTEST(live_late_preempt),
650
		SUBTEST(live_preempt_hang),
651
		SUBTEST(live_preempt_smoke),
652
	};
653 654 655 656

	if (!HAS_EXECLISTS(i915))
		return 0;

657 658 659
	if (i915_terminally_wedged(&i915->gpu_error))
		return 0;

660 661
	return i915_subtests(tests, i915);
}