i915_request.c 20.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 * Copyright © 2016 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 */

25 26
#include <linux/prime_numbers.h>

27 28
#include "../i915_selftest.h"

29
#include "mock_context.h"
30 31 32 33 34
#include "mock_gem_device.h"

static int igt_add_request(void *arg)
{
	struct drm_i915_private *i915 = arg;
35
	struct i915_request *request;
36 37 38 39 40 41 42 43 44 45 46
	int err = -ENOMEM;

	/* Basic preliminary test to create a request and let it loose! */

	mutex_lock(&i915->drm.struct_mutex);
	request = mock_request(i915->engine[RCS],
			       i915->kernel_context,
			       HZ / 10);
	if (!request)
		goto out_unlock;

47
	i915_request_add(request);
48 49 50 51 52 53 54

	err = 0;
out_unlock:
	mutex_unlock(&i915->drm.struct_mutex);
	return err;
}

55 56 57 58
static int igt_wait_request(void *arg)
{
	const long T = HZ / 4;
	struct drm_i915_private *i915 = arg;
59
	struct i915_request *request;
60 61 62 63 64 65 66 67 68 69 70
	int err = -EINVAL;

	/* Submit a request, then wait upon it */

	mutex_lock(&i915->drm.struct_mutex);
	request = mock_request(i915->engine[RCS], i915->kernel_context, T);
	if (!request) {
		err = -ENOMEM;
		goto out_unlock;
	}

71
	if (i915_request_wait(request, I915_WAIT_LOCKED, 0) != -ETIME) {
72 73 74 75
		pr_err("request wait (busy query) succeeded (expected timeout before submit!)\n");
		goto out_unlock;
	}

76
	if (i915_request_wait(request, I915_WAIT_LOCKED, T) != -ETIME) {
77 78 79 80
		pr_err("request wait succeeded (expected timeout before submit!)\n");
		goto out_unlock;
	}

81
	if (i915_request_completed(request)) {
82 83 84 85
		pr_err("request completed before submit!!\n");
		goto out_unlock;
	}

86
	i915_request_add(request);
87

88
	if (i915_request_wait(request, I915_WAIT_LOCKED, 0) != -ETIME) {
89 90 91 92
		pr_err("request wait (busy query) succeeded (expected timeout after submit!)\n");
		goto out_unlock;
	}

93
	if (i915_request_completed(request)) {
94 95 96 97
		pr_err("request completed immediately!\n");
		goto out_unlock;
	}

98
	if (i915_request_wait(request, I915_WAIT_LOCKED, T / 2) != -ETIME) {
99 100 101 102
		pr_err("request wait succeeded (expected timeout!)\n");
		goto out_unlock;
	}

103
	if (i915_request_wait(request, I915_WAIT_LOCKED, T) == -ETIME) {
104 105 106 107
		pr_err("request wait timed out!\n");
		goto out_unlock;
	}

108
	if (!i915_request_completed(request)) {
109 110 111 112
		pr_err("request not complete after waiting!\n");
		goto out_unlock;
	}

113
	if (i915_request_wait(request, I915_WAIT_LOCKED, T) == -ETIME) {
114 115 116 117 118 119 120 121 122 123 124
		pr_err("request wait timed out when already complete!\n");
		goto out_unlock;
	}

	err = 0;
out_unlock:
	mock_device_flush(i915);
	mutex_unlock(&i915->drm.struct_mutex);
	return err;
}

125 126 127 128
static int igt_fence_wait(void *arg)
{
	const long T = HZ / 4;
	struct drm_i915_private *i915 = arg;
129
	struct i915_request *request;
130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147
	int err = -EINVAL;

	/* Submit a request, treat it as a fence and wait upon it */

	mutex_lock(&i915->drm.struct_mutex);
	request = mock_request(i915->engine[RCS], i915->kernel_context, T);
	if (!request) {
		err = -ENOMEM;
		goto out_locked;
	}
	mutex_unlock(&i915->drm.struct_mutex); /* safe as we are single user */

	if (dma_fence_wait_timeout(&request->fence, false, T) != -ETIME) {
		pr_err("fence wait success before submit (expected timeout)!\n");
		goto out_device;
	}

	mutex_lock(&i915->drm.struct_mutex);
148
	i915_request_add(request);
149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184
	mutex_unlock(&i915->drm.struct_mutex);

	if (dma_fence_is_signaled(&request->fence)) {
		pr_err("fence signaled immediately!\n");
		goto out_device;
	}

	if (dma_fence_wait_timeout(&request->fence, false, T / 2) != -ETIME) {
		pr_err("fence wait success after submit (expected timeout)!\n");
		goto out_device;
	}

	if (dma_fence_wait_timeout(&request->fence, false, T) <= 0) {
		pr_err("fence wait timed out (expected success)!\n");
		goto out_device;
	}

	if (!dma_fence_is_signaled(&request->fence)) {
		pr_err("fence unsignaled after waiting!\n");
		goto out_device;
	}

	if (dma_fence_wait_timeout(&request->fence, false, T) <= 0) {
		pr_err("fence wait timed out when complete (expected success)!\n");
		goto out_device;
	}

	err = 0;
out_device:
	mutex_lock(&i915->drm.struct_mutex);
out_locked:
	mock_device_flush(i915);
	mutex_unlock(&i915->drm.struct_mutex);
	return err;
}

185 186 187
static int igt_request_rewind(void *arg)
{
	struct drm_i915_private *i915 = arg;
188
	struct i915_request *request, *vip;
189 190 191 192 193 194 195 196 197 198 199
	struct i915_gem_context *ctx[2];
	int err = -EINVAL;

	mutex_lock(&i915->drm.struct_mutex);
	ctx[0] = mock_context(i915, "A");
	request = mock_request(i915->engine[RCS], ctx[0], 2 * HZ);
	if (!request) {
		err = -ENOMEM;
		goto err_context_0;
	}

200 201
	i915_request_get(request);
	i915_request_add(request);
202 203 204 205 206 207 208 209 210 211 212

	ctx[1] = mock_context(i915, "B");
	vip = mock_request(i915->engine[RCS], ctx[1], 0);
	if (!vip) {
		err = -ENOMEM;
		goto err_context_1;
	}

	/* Simulate preemption by manual reordering */
	if (!mock_cancel_request(request)) {
		pr_err("failed to cancel request (already executed)!\n");
213
		i915_request_add(vip);
214 215
		goto err_context_1;
	}
216 217
	i915_request_get(vip);
	i915_request_add(vip);
218
	rcu_read_lock();
219
	request->engine->submit_request(request);
220
	rcu_read_unlock();
221 222 223

	mutex_unlock(&i915->drm.struct_mutex);

224
	if (i915_request_wait(vip, 0, HZ) == -ETIME) {
225 226 227 228 229
		pr_err("timed out waiting for high priority request, vip.seqno=%d, current seqno=%d\n",
		       vip->global_seqno, intel_engine_get_seqno(i915->engine[RCS]));
		goto err;
	}

230
	if (i915_request_completed(request)) {
231 232 233 234 235 236
		pr_err("low priority request already completed\n");
		goto err;
	}

	err = 0;
err:
237
	i915_request_put(vip);
238 239 240
	mutex_lock(&i915->drm.struct_mutex);
err_context_1:
	mock_context_close(ctx[1]);
241
	i915_request_put(request);
242 243 244 245 246 247 248
err_context_0:
	mock_context_close(ctx[0]);
	mock_device_flush(i915);
	mutex_unlock(&i915->drm.struct_mutex);
	return err;
}

249
int i915_request_mock_selftests(void)
250 251 252
{
	static const struct i915_subtest tests[] = {
		SUBTEST(igt_add_request),
253
		SUBTEST(igt_wait_request),
254
		SUBTEST(igt_fence_wait),
255
		SUBTEST(igt_request_rewind),
256 257 258 259 260 261 262 263 264
	};
	struct drm_i915_private *i915;
	int err;

	i915 = mock_gem_device();
	if (!i915)
		return -ENOMEM;

	err = i915_subtests(tests, i915);
265
	drm_dev_put(&i915->drm);
266 267 268

	return err;
}
269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288

struct live_test {
	struct drm_i915_private *i915;
	const char *func;
	const char *name;

	unsigned int reset_count;
};

static int begin_live_test(struct live_test *t,
			   struct drm_i915_private *i915,
			   const char *func,
			   const char *name)
{
	int err;

	t->i915 = i915;
	t->func = func;
	t->name = name;

289 290 291
	err = i915_gem_wait_for_idle(i915,
				     I915_WAIT_LOCKED,
				     MAX_SCHEDULE_TIMEOUT);
292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307
	if (err) {
		pr_err("%s(%s): failed to idle before, with err=%d!",
		       func, name, err);
		return err;
	}

	i915->gpu_error.missed_irq_rings = 0;
	t->reset_count = i915_reset_count(&i915->gpu_error);

	return 0;
}

static int end_live_test(struct live_test *t)
{
	struct drm_i915_private *i915 = t->i915;

308
	i915_retire_requests(i915);
309 310

	if (wait_for(intel_engines_are_idle(i915), 10)) {
311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336
		pr_err("%s(%s): GPU not idle\n", t->func, t->name);
		return -EIO;
	}

	if (t->reset_count != i915_reset_count(&i915->gpu_error)) {
		pr_err("%s(%s): GPU was reset %d times!\n",
		       t->func, t->name,
		       i915_reset_count(&i915->gpu_error) - t->reset_count);
		return -EIO;
	}

	if (i915->gpu_error.missed_irq_rings) {
		pr_err("%s(%s): Missed interrupts on engines %lx\n",
		       t->func, t->name, i915->gpu_error.missed_irq_rings);
		return -EIO;
	}

	return 0;
}

static int live_nop_request(void *arg)
{
	struct drm_i915_private *i915 = arg;
	struct intel_engine_cs *engine;
	struct live_test t;
	unsigned int id;
337
	int err = -ENODEV;
338 339 340 341 342 343 344

	/* Submit various sized batches of empty requests, to each engine
	 * (individually), and wait for the batch to complete. We can check
	 * the overhead of submitting requests to the hardware.
	 */

	mutex_lock(&i915->drm.struct_mutex);
345
	intel_runtime_pm_get(i915);
346 347

	for_each_engine(engine, i915, id) {
348
		struct i915_request *request = NULL;
349
		unsigned long n, prime;
350
		IGT_TIMEOUT(end_time);
351 352 353 354 355 356 357 358 359 360
		ktime_t times[2] = {};

		err = begin_live_test(&t, i915, __func__, engine->name);
		if (err)
			goto out_unlock;

		for_each_prime_number_from(prime, 1, 8192) {
			times[1] = ktime_get_raw();

			for (n = 0; n < prime; n++) {
361 362
				request = i915_request_alloc(engine,
							     i915->kernel_context);
363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380
				if (IS_ERR(request)) {
					err = PTR_ERR(request);
					goto out_unlock;
				}

				/* This space is left intentionally blank.
				 *
				 * We do not actually want to perform any
				 * action with this request, we just want
				 * to measure the latency in allocation
				 * and submission of our breadcrumbs -
				 * ensuring that the bare request is sufficient
				 * for the system to work (i.e. proper HEAD
				 * tracking of the rings, interrupt handling,
				 * etc). It also gives us the lowest bounds
				 * for latency.
				 */

381
				i915_request_add(request);
382
			}
383
			i915_request_wait(request,
384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405
					  I915_WAIT_LOCKED,
					  MAX_SCHEDULE_TIMEOUT);

			times[1] = ktime_sub(ktime_get_raw(), times[1]);
			if (prime == 1)
				times[0] = times[1];

			if (__igt_timeout(end_time, NULL))
				break;
		}

		err = end_live_test(&t);
		if (err)
			goto out_unlock;

		pr_info("Request latencies on %s: 1 = %lluns, %lu = %lluns\n",
			engine->name,
			ktime_to_ns(times[0]),
			prime, div64_u64(ktime_to_ns(times[1]), prime));
	}

out_unlock:
406
	intel_runtime_pm_put_unchecked(i915);
407 408 409 410
	mutex_unlock(&i915->drm.struct_mutex);
	return err;
}

411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426
static struct i915_vma *empty_batch(struct drm_i915_private *i915)
{
	struct drm_i915_gem_object *obj;
	struct i915_vma *vma;
	u32 *cmd;
	int err;

	obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
	if (IS_ERR(obj))
		return ERR_CAST(obj);

	cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
	if (IS_ERR(cmd)) {
		err = PTR_ERR(cmd);
		goto err;
	}
427

428
	*cmd = MI_BATCH_BUFFER_END;
429 430
	i915_gem_chipset_flush(i915);

431 432 433 434 435 436
	i915_gem_object_unpin_map(obj);

	err = i915_gem_object_set_to_gtt_domain(obj, false);
	if (err)
		goto err;

437
	vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453
	if (IS_ERR(vma)) {
		err = PTR_ERR(vma);
		goto err;
	}

	err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_GLOBAL);
	if (err)
		goto err;

	return vma;

err:
	i915_gem_object_put(obj);
	return ERR_PTR(err);
}

454
static struct i915_request *
455 456 457
empty_request(struct intel_engine_cs *engine,
	      struct i915_vma *batch)
{
458
	struct i915_request *request;
459 460
	int err;

461
	request = i915_request_alloc(engine, engine->i915->kernel_context);
462 463 464 465 466 467 468 469 470 471 472
	if (IS_ERR(request))
		return request;

	err = engine->emit_bb_start(request,
				    batch->node.start,
				    batch->node.size,
				    I915_DISPATCH_SECURE);
	if (err)
		goto out_request;

out_request:
473
	i915_request_add(request);
474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491
	return err ? ERR_PTR(err) : request;
}

static int live_empty_request(void *arg)
{
	struct drm_i915_private *i915 = arg;
	struct intel_engine_cs *engine;
	struct live_test t;
	struct i915_vma *batch;
	unsigned int id;
	int err = 0;

	/* Submit various sized batches of empty requests, to each engine
	 * (individually), and wait for the batch to complete. We can check
	 * the overhead of submitting requests to the hardware.
	 */

	mutex_lock(&i915->drm.struct_mutex);
492
	intel_runtime_pm_get(i915);
493 494 495 496 497 498 499 500 501

	batch = empty_batch(i915);
	if (IS_ERR(batch)) {
		err = PTR_ERR(batch);
		goto out_unlock;
	}

	for_each_engine(engine, i915, id) {
		IGT_TIMEOUT(end_time);
502
		struct i915_request *request;
503 504 505 506 507 508 509 510 511 512 513 514 515
		unsigned long n, prime;
		ktime_t times[2] = {};

		err = begin_live_test(&t, i915, __func__, engine->name);
		if (err)
			goto out_batch;

		/* Warmup / preload */
		request = empty_request(engine, batch);
		if (IS_ERR(request)) {
			err = PTR_ERR(request);
			goto out_batch;
		}
516
		i915_request_wait(request,
517 518 519 520 521 522 523 524 525 526 527 528 529
				  I915_WAIT_LOCKED,
				  MAX_SCHEDULE_TIMEOUT);

		for_each_prime_number_from(prime, 1, 8192) {
			times[1] = ktime_get_raw();

			for (n = 0; n < prime; n++) {
				request = empty_request(engine, batch);
				if (IS_ERR(request)) {
					err = PTR_ERR(request);
					goto out_batch;
				}
			}
530
			i915_request_wait(request,
531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555
					  I915_WAIT_LOCKED,
					  MAX_SCHEDULE_TIMEOUT);

			times[1] = ktime_sub(ktime_get_raw(), times[1]);
			if (prime == 1)
				times[0] = times[1];

			if (__igt_timeout(end_time, NULL))
				break;
		}

		err = end_live_test(&t);
		if (err)
			goto out_batch;

		pr_info("Batch latencies on %s: 1 = %lluns, %lu = %lluns\n",
			engine->name,
			ktime_to_ns(times[0]),
			prime, div64_u64(ktime_to_ns(times[1]), prime));
	}

out_batch:
	i915_vma_unpin(batch);
	i915_vma_put(batch);
out_unlock:
556
	intel_runtime_pm_put_unchecked(i915);
557 558 559 560
	mutex_unlock(&i915->drm.struct_mutex);
	return err;
}

561 562 563
static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
{
	struct i915_gem_context *ctx = i915->kernel_context;
564 565
	struct i915_address_space *vm =
		ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585
	struct drm_i915_gem_object *obj;
	const int gen = INTEL_GEN(i915);
	struct i915_vma *vma;
	u32 *cmd;
	int err;

	obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
	if (IS_ERR(obj))
		return ERR_CAST(obj);

	vma = i915_vma_instance(obj, vm, NULL);
	if (IS_ERR(vma)) {
		err = PTR_ERR(vma);
		goto err;
	}

	err = i915_vma_pin(vma, 0, 0, PIN_USER);
	if (err)
		goto err;

586
	err = i915_gem_object_set_to_wc_domain(obj, true);
587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603
	if (err)
		goto err;

	cmd = i915_gem_object_pin_map(obj, I915_MAP_WC);
	if (IS_ERR(cmd)) {
		err = PTR_ERR(cmd);
		goto err;
	}

	if (gen >= 8) {
		*cmd++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
		*cmd++ = lower_32_bits(vma->node.start);
		*cmd++ = upper_32_bits(vma->node.start);
	} else if (gen >= 6) {
		*cmd++ = MI_BATCH_BUFFER_START | 1 << 8;
		*cmd++ = lower_32_bits(vma->node.start);
	} else {
604
		*cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
605 606 607
		*cmd++ = lower_32_bits(vma->node.start);
	}
	*cmd++ = MI_BATCH_BUFFER_END; /* terminate early in case of error */
608
	i915_gem_chipset_flush(i915);
609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627

	i915_gem_object_unpin_map(obj);

	return vma;

err:
	i915_gem_object_put(obj);
	return ERR_PTR(err);
}

static int recursive_batch_resolve(struct i915_vma *batch)
{
	u32 *cmd;

	cmd = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
	if (IS_ERR(cmd))
		return PTR_ERR(cmd);

	*cmd = MI_BATCH_BUFFER_END;
628
	i915_gem_chipset_flush(batch->vm->i915);
629 630 631 632 633 634 635 636 637 638

	i915_gem_object_unpin_map(batch->obj);

	return 0;
}

static int live_all_engines(void *arg)
{
	struct drm_i915_private *i915 = arg;
	struct intel_engine_cs *engine;
639
	struct i915_request *request[I915_NUM_ENGINES];
640 641 642 643 644 645 646 647 648 649 650
	struct i915_vma *batch;
	struct live_test t;
	unsigned int id;
	int err;

	/* Check we can submit requests to all engines simultaneously. We
	 * send a recursive batch to each engine - checking that we don't
	 * block doing so, and that they don't complete too soon.
	 */

	mutex_lock(&i915->drm.struct_mutex);
651
	intel_runtime_pm_get(i915);
652 653 654 655 656 657 658 659 660 661 662 663 664

	err = begin_live_test(&t, i915, __func__, "");
	if (err)
		goto out_unlock;

	batch = recursive_batch(i915);
	if (IS_ERR(batch)) {
		err = PTR_ERR(batch);
		pr_err("%s: Unable to create batch, err=%d\n", __func__, err);
		goto out_unlock;
	}

	for_each_engine(engine, i915, id) {
665
		request[id] = i915_request_alloc(engine, i915->kernel_context);
666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684
		if (IS_ERR(request[id])) {
			err = PTR_ERR(request[id]);
			pr_err("%s: Request allocation failed with err=%d\n",
			       __func__, err);
			goto out_request;
		}

		err = engine->emit_bb_start(request[id],
					    batch->node.start,
					    batch->node.size,
					    0);
		GEM_BUG_ON(err);
		request[id]->batch = batch;

		if (!i915_gem_object_has_active_reference(batch->obj)) {
			i915_gem_object_get(batch->obj);
			i915_gem_object_set_active_reference(batch->obj);
		}

685 686 687
		err = i915_vma_move_to_active(batch, request[id], 0);
		GEM_BUG_ON(err);

688 689
		i915_request_get(request[id]);
		i915_request_add(request[id]);
690 691 692
	}

	for_each_engine(engine, i915, id) {
693
		if (i915_request_completed(request[id])) {
694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709
			pr_err("%s(%s): request completed too early!\n",
			       __func__, engine->name);
			err = -EINVAL;
			goto out_request;
		}
	}

	err = recursive_batch_resolve(batch);
	if (err) {
		pr_err("%s: failed to resolve batch, err=%d\n", __func__, err);
		goto out_request;
	}

	for_each_engine(engine, i915, id) {
		long timeout;

710
		timeout = i915_request_wait(request[id],
711 712 713 714 715 716 717 718 719
					    I915_WAIT_LOCKED,
					    MAX_SCHEDULE_TIMEOUT);
		if (timeout < 0) {
			err = timeout;
			pr_err("%s: error waiting for request on %s, err=%d\n",
			       __func__, engine->name, err);
			goto out_request;
		}

720 721
		GEM_BUG_ON(!i915_request_completed(request[id]));
		i915_request_put(request[id]);
722 723 724 725 726 727 728 729
		request[id] = NULL;
	}

	err = end_live_test(&t);

out_request:
	for_each_engine(engine, i915, id)
		if (request[id])
730
			i915_request_put(request[id]);
731 732 733
	i915_vma_unpin(batch);
	i915_vma_put(batch);
out_unlock:
734
	intel_runtime_pm_put_unchecked(i915);
735 736 737 738
	mutex_unlock(&i915->drm.struct_mutex);
	return err;
}

739 740 741
static int live_sequential_engines(void *arg)
{
	struct drm_i915_private *i915 = arg;
742 743
	struct i915_request *request[I915_NUM_ENGINES] = {};
	struct i915_request *prev = NULL;
744 745 746 747 748 749 750 751 752 753 754 755
	struct intel_engine_cs *engine;
	struct live_test t;
	unsigned int id;
	int err;

	/* Check we can submit requests to all engines sequentially, such
	 * that each successive request waits for the earlier ones. This
	 * tests that we don't execute requests out of order, even though
	 * they are running on independent engines.
	 */

	mutex_lock(&i915->drm.struct_mutex);
756
	intel_runtime_pm_get(i915);
757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772

	err = begin_live_test(&t, i915, __func__, "");
	if (err)
		goto out_unlock;

	for_each_engine(engine, i915, id) {
		struct i915_vma *batch;

		batch = recursive_batch(i915);
		if (IS_ERR(batch)) {
			err = PTR_ERR(batch);
			pr_err("%s: Unable to create batch for %s, err=%d\n",
			       __func__, engine->name, err);
			goto out_unlock;
		}

773
		request[id] = i915_request_alloc(engine, i915->kernel_context);
774 775 776 777 778 779 780 781
		if (IS_ERR(request[id])) {
			err = PTR_ERR(request[id]);
			pr_err("%s: Request allocation failed for %s with err=%d\n",
			       __func__, engine->name, err);
			goto out_request;
		}

		if (prev) {
782 783
			err = i915_request_await_dma_fence(request[id],
							   &prev->fence);
784
			if (err) {
785
				i915_request_add(request[id]);
786 787 788 789 790 791 792 793 794 795 796 797 798
				pr_err("%s: Request await failed for %s with err=%d\n",
				       __func__, engine->name, err);
				goto out_request;
			}
		}

		err = engine->emit_bb_start(request[id],
					    batch->node.start,
					    batch->node.size,
					    0);
		GEM_BUG_ON(err);
		request[id]->batch = batch;

799 800 801
		err = i915_vma_move_to_active(batch, request[id], 0);
		GEM_BUG_ON(err);

802 803 804
		i915_gem_object_set_active_reference(batch->obj);
		i915_vma_get(batch);

805 806
		i915_request_get(request[id]);
		i915_request_add(request[id]);
807 808 809 810 811 812 813

		prev = request[id];
	}

	for_each_engine(engine, i915, id) {
		long timeout;

814
		if (i915_request_completed(request[id])) {
815 816 817 818 819 820 821 822 823 824 825 826 827
			pr_err("%s(%s): request completed too early!\n",
			       __func__, engine->name);
			err = -EINVAL;
			goto out_request;
		}

		err = recursive_batch_resolve(request[id]->batch);
		if (err) {
			pr_err("%s: failed to resolve batch, err=%d\n",
			       __func__, err);
			goto out_request;
		}

828
		timeout = i915_request_wait(request[id],
829 830 831 832 833 834 835 836 837
					    I915_WAIT_LOCKED,
					    MAX_SCHEDULE_TIMEOUT);
		if (timeout < 0) {
			err = timeout;
			pr_err("%s: error waiting for request on %s, err=%d\n",
			       __func__, engine->name, err);
			goto out_request;
		}

838
		GEM_BUG_ON(!i915_request_completed(request[id]));
839 840 841 842 843 844 845 846 847 848 849 850 851 852 853
	}

	err = end_live_test(&t);

out_request:
	for_each_engine(engine, i915, id) {
		u32 *cmd;

		if (!request[id])
			break;

		cmd = i915_gem_object_pin_map(request[id]->batch->obj,
					      I915_MAP_WC);
		if (!IS_ERR(cmd)) {
			*cmd = MI_BATCH_BUFFER_END;
854 855
			i915_gem_chipset_flush(i915);

856 857 858 859
			i915_gem_object_unpin_map(request[id]->batch->obj);
		}

		i915_vma_put(request[id]->batch);
860
		i915_request_put(request[id]);
861 862
	}
out_unlock:
863
	intel_runtime_pm_put_unchecked(i915);
864 865 866 867
	mutex_unlock(&i915->drm.struct_mutex);
	return err;
}

868
int i915_request_live_selftests(struct drm_i915_private *i915)
869 870 871
{
	static const struct i915_subtest tests[] = {
		SUBTEST(live_nop_request),
872
		SUBTEST(live_all_engines),
873
		SUBTEST(live_sequential_engines),
874
		SUBTEST(live_empty_request),
875
	};
876 877 878 879

	if (i915_terminally_wedged(&i915->gpu_error))
		return 0;

880 881
	return i915_subtests(tests, i915);
}