i915_request.c 19.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 * Copyright © 2016 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 */

25 26
#include <linux/prime_numbers.h>

27
#include "../i915_selftest.h"
28
#include "igt_live_test.h"
29

30
#include "mock_context.h"
31 32 33 34 35
#include "mock_gem_device.h"

static int igt_add_request(void *arg)
{
	struct drm_i915_private *i915 = arg;
36
	struct i915_request *request;
37 38 39 40 41 42 43 44 45 46 47
	int err = -ENOMEM;

	/* Basic preliminary test to create a request and let it loose! */

	mutex_lock(&i915->drm.struct_mutex);
	request = mock_request(i915->engine[RCS],
			       i915->kernel_context,
			       HZ / 10);
	if (!request)
		goto out_unlock;

48
	i915_request_add(request);
49 50 51 52 53 54 55

	err = 0;
out_unlock:
	mutex_unlock(&i915->drm.struct_mutex);
	return err;
}

56 57 58 59
static int igt_wait_request(void *arg)
{
	const long T = HZ / 4;
	struct drm_i915_private *i915 = arg;
60
	struct i915_request *request;
61 62 63 64 65 66 67 68 69 70 71
	int err = -EINVAL;

	/* Submit a request, then wait upon it */

	mutex_lock(&i915->drm.struct_mutex);
	request = mock_request(i915->engine[RCS], i915->kernel_context, T);
	if (!request) {
		err = -ENOMEM;
		goto out_unlock;
	}

72
	if (i915_request_wait(request, I915_WAIT_LOCKED, 0) != -ETIME) {
73 74 75 76
		pr_err("request wait (busy query) succeeded (expected timeout before submit!)\n");
		goto out_unlock;
	}

77
	if (i915_request_wait(request, I915_WAIT_LOCKED, T) != -ETIME) {
78 79 80 81
		pr_err("request wait succeeded (expected timeout before submit!)\n");
		goto out_unlock;
	}

82
	if (i915_request_completed(request)) {
83 84 85 86
		pr_err("request completed before submit!!\n");
		goto out_unlock;
	}

87
	i915_request_add(request);
88

89
	if (i915_request_wait(request, I915_WAIT_LOCKED, 0) != -ETIME) {
90 91 92 93
		pr_err("request wait (busy query) succeeded (expected timeout after submit!)\n");
		goto out_unlock;
	}

94
	if (i915_request_completed(request)) {
95 96 97 98
		pr_err("request completed immediately!\n");
		goto out_unlock;
	}

99
	if (i915_request_wait(request, I915_WAIT_LOCKED, T / 2) != -ETIME) {
100 101 102 103
		pr_err("request wait succeeded (expected timeout!)\n");
		goto out_unlock;
	}

104
	if (i915_request_wait(request, I915_WAIT_LOCKED, T) == -ETIME) {
105 106 107 108
		pr_err("request wait timed out!\n");
		goto out_unlock;
	}

109
	if (!i915_request_completed(request)) {
110 111 112 113
		pr_err("request not complete after waiting!\n");
		goto out_unlock;
	}

114
	if (i915_request_wait(request, I915_WAIT_LOCKED, T) == -ETIME) {
115 116 117 118 119 120 121 122 123 124 125
		pr_err("request wait timed out when already complete!\n");
		goto out_unlock;
	}

	err = 0;
out_unlock:
	mock_device_flush(i915);
	mutex_unlock(&i915->drm.struct_mutex);
	return err;
}

126 127 128 129
static int igt_fence_wait(void *arg)
{
	const long T = HZ / 4;
	struct drm_i915_private *i915 = arg;
130
	struct i915_request *request;
131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148
	int err = -EINVAL;

	/* Submit a request, treat it as a fence and wait upon it */

	mutex_lock(&i915->drm.struct_mutex);
	request = mock_request(i915->engine[RCS], i915->kernel_context, T);
	if (!request) {
		err = -ENOMEM;
		goto out_locked;
	}
	mutex_unlock(&i915->drm.struct_mutex); /* safe as we are single user */

	if (dma_fence_wait_timeout(&request->fence, false, T) != -ETIME) {
		pr_err("fence wait success before submit (expected timeout)!\n");
		goto out_device;
	}

	mutex_lock(&i915->drm.struct_mutex);
149
	i915_request_add(request);
150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185
	mutex_unlock(&i915->drm.struct_mutex);

	if (dma_fence_is_signaled(&request->fence)) {
		pr_err("fence signaled immediately!\n");
		goto out_device;
	}

	if (dma_fence_wait_timeout(&request->fence, false, T / 2) != -ETIME) {
		pr_err("fence wait success after submit (expected timeout)!\n");
		goto out_device;
	}

	if (dma_fence_wait_timeout(&request->fence, false, T) <= 0) {
		pr_err("fence wait timed out (expected success)!\n");
		goto out_device;
	}

	if (!dma_fence_is_signaled(&request->fence)) {
		pr_err("fence unsignaled after waiting!\n");
		goto out_device;
	}

	if (dma_fence_wait_timeout(&request->fence, false, T) <= 0) {
		pr_err("fence wait timed out when complete (expected success)!\n");
		goto out_device;
	}

	err = 0;
out_device:
	mutex_lock(&i915->drm.struct_mutex);
out_locked:
	mock_device_flush(i915);
	mutex_unlock(&i915->drm.struct_mutex);
	return err;
}

186 187 188
static int igt_request_rewind(void *arg)
{
	struct drm_i915_private *i915 = arg;
189
	struct i915_request *request, *vip;
190 191 192 193 194 195 196 197 198 199 200
	struct i915_gem_context *ctx[2];
	int err = -EINVAL;

	mutex_lock(&i915->drm.struct_mutex);
	ctx[0] = mock_context(i915, "A");
	request = mock_request(i915->engine[RCS], ctx[0], 2 * HZ);
	if (!request) {
		err = -ENOMEM;
		goto err_context_0;
	}

201 202
	i915_request_get(request);
	i915_request_add(request);
203 204 205 206 207 208 209 210 211 212 213

	ctx[1] = mock_context(i915, "B");
	vip = mock_request(i915->engine[RCS], ctx[1], 0);
	if (!vip) {
		err = -ENOMEM;
		goto err_context_1;
	}

	/* Simulate preemption by manual reordering */
	if (!mock_cancel_request(request)) {
		pr_err("failed to cancel request (already executed)!\n");
214
		i915_request_add(vip);
215 216
		goto err_context_1;
	}
217 218
	i915_request_get(vip);
	i915_request_add(vip);
219
	rcu_read_lock();
220
	request->engine->submit_request(request);
221
	rcu_read_unlock();
222 223 224

	mutex_unlock(&i915->drm.struct_mutex);

225
	if (i915_request_wait(vip, 0, HZ) == -ETIME) {
226 227 228 229 230
		pr_err("timed out waiting for high priority request, vip.seqno=%d, current seqno=%d\n",
		       vip->global_seqno, intel_engine_get_seqno(i915->engine[RCS]));
		goto err;
	}

231
	if (i915_request_completed(request)) {
232 233 234 235 236 237
		pr_err("low priority request already completed\n");
		goto err;
	}

	err = 0;
err:
238
	i915_request_put(vip);
239 240 241
	mutex_lock(&i915->drm.struct_mutex);
err_context_1:
	mock_context_close(ctx[1]);
242
	i915_request_put(request);
243 244 245 246 247 248 249
err_context_0:
	mock_context_close(ctx[0]);
	mock_device_flush(i915);
	mutex_unlock(&i915->drm.struct_mutex);
	return err;
}

250
int i915_request_mock_selftests(void)
251 252 253
{
	static const struct i915_subtest tests[] = {
		SUBTEST(igt_add_request),
254
		SUBTEST(igt_wait_request),
255
		SUBTEST(igt_fence_wait),
256
		SUBTEST(igt_request_rewind),
257 258
	};
	struct drm_i915_private *i915;
259
	intel_wakeref_t wakeref;
260
	int err = 0;
261 262 263 264 265

	i915 = mock_gem_device();
	if (!i915)
		return -ENOMEM;

266 267
	with_intel_runtime_pm(i915, wakeref)
		err = i915_subtests(tests, i915);
268

269
	drm_dev_put(&i915->drm);
270 271 272

	return err;
}
273 274 275 276 277

static int live_nop_request(void *arg)
{
	struct drm_i915_private *i915 = arg;
	struct intel_engine_cs *engine;
278
	intel_wakeref_t wakeref;
279
	struct igt_live_test t;
280
	unsigned int id;
281
	int err = -ENODEV;
282 283 284 285 286 287 288

	/* Submit various sized batches of empty requests, to each engine
	 * (individually), and wait for the batch to complete. We can check
	 * the overhead of submitting requests to the hardware.
	 */

	mutex_lock(&i915->drm.struct_mutex);
289
	wakeref = intel_runtime_pm_get(i915);
290 291

	for_each_engine(engine, i915, id) {
292
		struct i915_request *request = NULL;
293
		unsigned long n, prime;
294
		IGT_TIMEOUT(end_time);
295 296
		ktime_t times[2] = {};

297
		err = igt_live_test_begin(&t, i915, __func__, engine->name);
298 299 300 301 302 303 304
		if (err)
			goto out_unlock;

		for_each_prime_number_from(prime, 1, 8192) {
			times[1] = ktime_get_raw();

			for (n = 0; n < prime; n++) {
305 306
				request = i915_request_alloc(engine,
							     i915->kernel_context);
307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324
				if (IS_ERR(request)) {
					err = PTR_ERR(request);
					goto out_unlock;
				}

				/* This space is left intentionally blank.
				 *
				 * We do not actually want to perform any
				 * action with this request, we just want
				 * to measure the latency in allocation
				 * and submission of our breadcrumbs -
				 * ensuring that the bare request is sufficient
				 * for the system to work (i.e. proper HEAD
				 * tracking of the rings, interrupt handling,
				 * etc). It also gives us the lowest bounds
				 * for latency.
				 */

325
				i915_request_add(request);
326
			}
327
			i915_request_wait(request,
328 329 330 331 332 333 334 335 336 337 338
					  I915_WAIT_LOCKED,
					  MAX_SCHEDULE_TIMEOUT);

			times[1] = ktime_sub(ktime_get_raw(), times[1]);
			if (prime == 1)
				times[0] = times[1];

			if (__igt_timeout(end_time, NULL))
				break;
		}

339
		err = igt_live_test_end(&t);
340 341 342 343 344 345 346 347 348 349
		if (err)
			goto out_unlock;

		pr_info("Request latencies on %s: 1 = %lluns, %lu = %lluns\n",
			engine->name,
			ktime_to_ns(times[0]),
			prime, div64_u64(ktime_to_ns(times[1]), prime));
	}

out_unlock:
350
	intel_runtime_pm_put(i915, wakeref);
351 352 353 354
	mutex_unlock(&i915->drm.struct_mutex);
	return err;
}

355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370
static struct i915_vma *empty_batch(struct drm_i915_private *i915)
{
	struct drm_i915_gem_object *obj;
	struct i915_vma *vma;
	u32 *cmd;
	int err;

	obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
	if (IS_ERR(obj))
		return ERR_CAST(obj);

	cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
	if (IS_ERR(cmd)) {
		err = PTR_ERR(cmd);
		goto err;
	}
371

372
	*cmd = MI_BATCH_BUFFER_END;
373 374
	i915_gem_chipset_flush(i915);

375 376 377 378 379 380
	i915_gem_object_unpin_map(obj);

	err = i915_gem_object_set_to_gtt_domain(obj, false);
	if (err)
		goto err;

381
	vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397
	if (IS_ERR(vma)) {
		err = PTR_ERR(vma);
		goto err;
	}

	err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_GLOBAL);
	if (err)
		goto err;

	return vma;

err:
	i915_gem_object_put(obj);
	return ERR_PTR(err);
}

398
static struct i915_request *
399 400 401
empty_request(struct intel_engine_cs *engine,
	      struct i915_vma *batch)
{
402
	struct i915_request *request;
403 404
	int err;

405
	request = i915_request_alloc(engine, engine->i915->kernel_context);
406 407 408 409 410 411 412 413 414 415 416
	if (IS_ERR(request))
		return request;

	err = engine->emit_bb_start(request,
				    batch->node.start,
				    batch->node.size,
				    I915_DISPATCH_SECURE);
	if (err)
		goto out_request;

out_request:
417
	i915_request_add(request);
418 419 420 421 422 423 424
	return err ? ERR_PTR(err) : request;
}

static int live_empty_request(void *arg)
{
	struct drm_i915_private *i915 = arg;
	struct intel_engine_cs *engine;
425
	intel_wakeref_t wakeref;
426
	struct igt_live_test t;
427 428 429 430 431 432 433 434 435 436
	struct i915_vma *batch;
	unsigned int id;
	int err = 0;

	/* Submit various sized batches of empty requests, to each engine
	 * (individually), and wait for the batch to complete. We can check
	 * the overhead of submitting requests to the hardware.
	 */

	mutex_lock(&i915->drm.struct_mutex);
437
	wakeref = intel_runtime_pm_get(i915);
438 439 440 441 442 443 444 445 446

	batch = empty_batch(i915);
	if (IS_ERR(batch)) {
		err = PTR_ERR(batch);
		goto out_unlock;
	}

	for_each_engine(engine, i915, id) {
		IGT_TIMEOUT(end_time);
447
		struct i915_request *request;
448 449 450
		unsigned long n, prime;
		ktime_t times[2] = {};

451
		err = igt_live_test_begin(&t, i915, __func__, engine->name);
452 453 454 455 456 457 458 459 460
		if (err)
			goto out_batch;

		/* Warmup / preload */
		request = empty_request(engine, batch);
		if (IS_ERR(request)) {
			err = PTR_ERR(request);
			goto out_batch;
		}
461
		i915_request_wait(request,
462 463 464 465 466 467 468 469 470 471 472 473 474
				  I915_WAIT_LOCKED,
				  MAX_SCHEDULE_TIMEOUT);

		for_each_prime_number_from(prime, 1, 8192) {
			times[1] = ktime_get_raw();

			for (n = 0; n < prime; n++) {
				request = empty_request(engine, batch);
				if (IS_ERR(request)) {
					err = PTR_ERR(request);
					goto out_batch;
				}
			}
475
			i915_request_wait(request,
476 477 478 479 480 481 482 483 484 485 486
					  I915_WAIT_LOCKED,
					  MAX_SCHEDULE_TIMEOUT);

			times[1] = ktime_sub(ktime_get_raw(), times[1]);
			if (prime == 1)
				times[0] = times[1];

			if (__igt_timeout(end_time, NULL))
				break;
		}

487
		err = igt_live_test_end(&t);
488 489 490 491 492 493 494 495 496 497 498 499 500
		if (err)
			goto out_batch;

		pr_info("Batch latencies on %s: 1 = %lluns, %lu = %lluns\n",
			engine->name,
			ktime_to_ns(times[0]),
			prime, div64_u64(ktime_to_ns(times[1]), prime));
	}

out_batch:
	i915_vma_unpin(batch);
	i915_vma_put(batch);
out_unlock:
501
	intel_runtime_pm_put(i915, wakeref);
502 503 504 505
	mutex_unlock(&i915->drm.struct_mutex);
	return err;
}

506 507 508
static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
{
	struct i915_gem_context *ctx = i915->kernel_context;
509 510
	struct i915_address_space *vm =
		ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530
	struct drm_i915_gem_object *obj;
	const int gen = INTEL_GEN(i915);
	struct i915_vma *vma;
	u32 *cmd;
	int err;

	obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
	if (IS_ERR(obj))
		return ERR_CAST(obj);

	vma = i915_vma_instance(obj, vm, NULL);
	if (IS_ERR(vma)) {
		err = PTR_ERR(vma);
		goto err;
	}

	err = i915_vma_pin(vma, 0, 0, PIN_USER);
	if (err)
		goto err;

531
	err = i915_gem_object_set_to_wc_domain(obj, true);
532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548
	if (err)
		goto err;

	cmd = i915_gem_object_pin_map(obj, I915_MAP_WC);
	if (IS_ERR(cmd)) {
		err = PTR_ERR(cmd);
		goto err;
	}

	if (gen >= 8) {
		*cmd++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
		*cmd++ = lower_32_bits(vma->node.start);
		*cmd++ = upper_32_bits(vma->node.start);
	} else if (gen >= 6) {
		*cmd++ = MI_BATCH_BUFFER_START | 1 << 8;
		*cmd++ = lower_32_bits(vma->node.start);
	} else {
549
		*cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
550 551 552
		*cmd++ = lower_32_bits(vma->node.start);
	}
	*cmd++ = MI_BATCH_BUFFER_END; /* terminate early in case of error */
553
	i915_gem_chipset_flush(i915);
554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572

	i915_gem_object_unpin_map(obj);

	return vma;

err:
	i915_gem_object_put(obj);
	return ERR_PTR(err);
}

static int recursive_batch_resolve(struct i915_vma *batch)
{
	u32 *cmd;

	cmd = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
	if (IS_ERR(cmd))
		return PTR_ERR(cmd);

	*cmd = MI_BATCH_BUFFER_END;
573
	i915_gem_chipset_flush(batch->vm->i915);
574 575 576 577 578 579 580 581 582 583

	i915_gem_object_unpin_map(batch->obj);

	return 0;
}

static int live_all_engines(void *arg)
{
	struct drm_i915_private *i915 = arg;
	struct intel_engine_cs *engine;
584
	struct i915_request *request[I915_NUM_ENGINES];
585
	intel_wakeref_t wakeref;
586
	struct igt_live_test t;
587 588 589 590 591 592 593 594 595 596
	struct i915_vma *batch;
	unsigned int id;
	int err;

	/* Check we can submit requests to all engines simultaneously. We
	 * send a recursive batch to each engine - checking that we don't
	 * block doing so, and that they don't complete too soon.
	 */

	mutex_lock(&i915->drm.struct_mutex);
597
	wakeref = intel_runtime_pm_get(i915);
598

599
	err = igt_live_test_begin(&t, i915, __func__, "");
600 601 602 603 604 605 606 607 608 609 610
	if (err)
		goto out_unlock;

	batch = recursive_batch(i915);
	if (IS_ERR(batch)) {
		err = PTR_ERR(batch);
		pr_err("%s: Unable to create batch, err=%d\n", __func__, err);
		goto out_unlock;
	}

	for_each_engine(engine, i915, id) {
611
		request[id] = i915_request_alloc(engine, i915->kernel_context);
612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630
		if (IS_ERR(request[id])) {
			err = PTR_ERR(request[id]);
			pr_err("%s: Request allocation failed with err=%d\n",
			       __func__, err);
			goto out_request;
		}

		err = engine->emit_bb_start(request[id],
					    batch->node.start,
					    batch->node.size,
					    0);
		GEM_BUG_ON(err);
		request[id]->batch = batch;

		if (!i915_gem_object_has_active_reference(batch->obj)) {
			i915_gem_object_get(batch->obj);
			i915_gem_object_set_active_reference(batch->obj);
		}

631 632 633
		err = i915_vma_move_to_active(batch, request[id], 0);
		GEM_BUG_ON(err);

634 635
		i915_request_get(request[id]);
		i915_request_add(request[id]);
636 637 638
	}

	for_each_engine(engine, i915, id) {
639
		if (i915_request_completed(request[id])) {
640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655
			pr_err("%s(%s): request completed too early!\n",
			       __func__, engine->name);
			err = -EINVAL;
			goto out_request;
		}
	}

	err = recursive_batch_resolve(batch);
	if (err) {
		pr_err("%s: failed to resolve batch, err=%d\n", __func__, err);
		goto out_request;
	}

	for_each_engine(engine, i915, id) {
		long timeout;

656
		timeout = i915_request_wait(request[id],
657 658 659 660 661 662 663 664 665
					    I915_WAIT_LOCKED,
					    MAX_SCHEDULE_TIMEOUT);
		if (timeout < 0) {
			err = timeout;
			pr_err("%s: error waiting for request on %s, err=%d\n",
			       __func__, engine->name, err);
			goto out_request;
		}

666 667
		GEM_BUG_ON(!i915_request_completed(request[id]));
		i915_request_put(request[id]);
668 669 670
		request[id] = NULL;
	}

671
	err = igt_live_test_end(&t);
672 673 674 675

out_request:
	for_each_engine(engine, i915, id)
		if (request[id])
676
			i915_request_put(request[id]);
677 678 679
	i915_vma_unpin(batch);
	i915_vma_put(batch);
out_unlock:
680
	intel_runtime_pm_put(i915, wakeref);
681 682 683 684
	mutex_unlock(&i915->drm.struct_mutex);
	return err;
}

685 686 687
static int live_sequential_engines(void *arg)
{
	struct drm_i915_private *i915 = arg;
688 689
	struct i915_request *request[I915_NUM_ENGINES] = {};
	struct i915_request *prev = NULL;
690
	struct intel_engine_cs *engine;
691
	intel_wakeref_t wakeref;
692
	struct igt_live_test t;
693 694 695 696 697 698 699 700 701 702
	unsigned int id;
	int err;

	/* Check we can submit requests to all engines sequentially, such
	 * that each successive request waits for the earlier ones. This
	 * tests that we don't execute requests out of order, even though
	 * they are running on independent engines.
	 */

	mutex_lock(&i915->drm.struct_mutex);
703
	wakeref = intel_runtime_pm_get(i915);
704

705
	err = igt_live_test_begin(&t, i915, __func__, "");
706 707 708 709 710 711 712 713 714 715 716 717 718 719
	if (err)
		goto out_unlock;

	for_each_engine(engine, i915, id) {
		struct i915_vma *batch;

		batch = recursive_batch(i915);
		if (IS_ERR(batch)) {
			err = PTR_ERR(batch);
			pr_err("%s: Unable to create batch for %s, err=%d\n",
			       __func__, engine->name, err);
			goto out_unlock;
		}

720
		request[id] = i915_request_alloc(engine, i915->kernel_context);
721 722 723 724 725 726 727 728
		if (IS_ERR(request[id])) {
			err = PTR_ERR(request[id]);
			pr_err("%s: Request allocation failed for %s with err=%d\n",
			       __func__, engine->name, err);
			goto out_request;
		}

		if (prev) {
729 730
			err = i915_request_await_dma_fence(request[id],
							   &prev->fence);
731
			if (err) {
732
				i915_request_add(request[id]);
733 734 735 736 737 738 739 740 741 742 743 744 745
				pr_err("%s: Request await failed for %s with err=%d\n",
				       __func__, engine->name, err);
				goto out_request;
			}
		}

		err = engine->emit_bb_start(request[id],
					    batch->node.start,
					    batch->node.size,
					    0);
		GEM_BUG_ON(err);
		request[id]->batch = batch;

746 747 748
		err = i915_vma_move_to_active(batch, request[id], 0);
		GEM_BUG_ON(err);

749 750 751
		i915_gem_object_set_active_reference(batch->obj);
		i915_vma_get(batch);

752 753
		i915_request_get(request[id]);
		i915_request_add(request[id]);
754 755 756 757 758 759 760

		prev = request[id];
	}

	for_each_engine(engine, i915, id) {
		long timeout;

761
		if (i915_request_completed(request[id])) {
762 763 764 765 766 767 768 769 770 771 772 773 774
			pr_err("%s(%s): request completed too early!\n",
			       __func__, engine->name);
			err = -EINVAL;
			goto out_request;
		}

		err = recursive_batch_resolve(request[id]->batch);
		if (err) {
			pr_err("%s: failed to resolve batch, err=%d\n",
			       __func__, err);
			goto out_request;
		}

775
		timeout = i915_request_wait(request[id],
776 777 778 779 780 781 782 783 784
					    I915_WAIT_LOCKED,
					    MAX_SCHEDULE_TIMEOUT);
		if (timeout < 0) {
			err = timeout;
			pr_err("%s: error waiting for request on %s, err=%d\n",
			       __func__, engine->name, err);
			goto out_request;
		}

785
		GEM_BUG_ON(!i915_request_completed(request[id]));
786 787
	}

788
	err = igt_live_test_end(&t);
789 790 791 792 793 794 795 796 797 798 799 800

out_request:
	for_each_engine(engine, i915, id) {
		u32 *cmd;

		if (!request[id])
			break;

		cmd = i915_gem_object_pin_map(request[id]->batch->obj,
					      I915_MAP_WC);
		if (!IS_ERR(cmd)) {
			*cmd = MI_BATCH_BUFFER_END;
801 802
			i915_gem_chipset_flush(i915);

803 804 805 806
			i915_gem_object_unpin_map(request[id]->batch->obj);
		}

		i915_vma_put(request[id]->batch);
807
		i915_request_put(request[id]);
808 809
	}
out_unlock:
810
	intel_runtime_pm_put(i915, wakeref);
811 812 813 814
	mutex_unlock(&i915->drm.struct_mutex);
	return err;
}

815
int i915_request_live_selftests(struct drm_i915_private *i915)
816 817 818
{
	static const struct i915_subtest tests[] = {
		SUBTEST(live_nop_request),
819
		SUBTEST(live_all_engines),
820
		SUBTEST(live_sequential_engines),
821
		SUBTEST(live_empty_request),
822
	};
823 824 825 826

	if (i915_terminally_wedged(&i915->gpu_error))
		return 0;

827 828
	return i915_subtests(tests, i915);
}