i915_gem_request.c 35.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 * Copyright © 2008-2015 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 */

25
#include <linux/prefetch.h>
26
#include <linux/dma-fence-array.h>
27 28
#include <linux/sched.h>
#include <linux/sched/clock.h>
29
#include <linux/sched/signal.h>
30

31 32
#include "i915_drv.h"

33
static const char *i915_fence_get_driver_name(struct dma_fence *fence)
34 35 36 37
{
	return "i915";
}

38
static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
39
{
40 41 42 43 44 45 46 47 48 49 50
	/* The timeline struct (as part of the ppgtt underneath a context)
	 * may be freed when the request is no longer in use by the GPU.
	 * We could extend the life of a context to beyond that of all
	 * fences, possibly keeping the hw resource around indefinitely,
	 * or we just give them a false name. Since
	 * dma_fence_ops.get_timeline_name is a debug feature, the occasional
	 * lie seems justifiable.
	 */
	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
		return "signaled";

51
	return to_request(fence)->timeline->common->name;
52 53
}

54
static bool i915_fence_signaled(struct dma_fence *fence)
55 56 57 58
{
	return i915_gem_request_completed(to_request(fence));
}

59
static bool i915_fence_enable_signaling(struct dma_fence *fence)
60 61 62 63 64 65 66 67
{
	if (i915_fence_signaled(fence))
		return false;

	intel_engine_enable_signaling(to_request(fence));
	return true;
}

68
static signed long i915_fence_wait(struct dma_fence *fence,
69
				   bool interruptible,
70
				   signed long timeout)
71
{
72
	return i915_wait_request(to_request(fence), interruptible, timeout);
73 74
}

75
static void i915_fence_release(struct dma_fence *fence)
76 77 78
{
	struct drm_i915_gem_request *req = to_request(fence);

79 80 81 82 83 84 85 86
	/* The request is put onto a RCU freelist (i.e. the address
	 * is immediately reused), mark the fences as being freed now.
	 * Otherwise the debugobjects for the fences are only marked as
	 * freed when the slab cache itself is freed, and so we would get
	 * caught trying to reuse dead objects.
	 */
	i915_sw_fence_fini(&req->submit);

87 88 89
	kmem_cache_free(req->i915->requests, req);
}

90
const struct dma_fence_ops i915_fence_ops = {
91 92 93 94 95 96 97 98
	.get_driver_name = i915_fence_get_driver_name,
	.get_timeline_name = i915_fence_get_timeline_name,
	.enable_signaling = i915_fence_enable_signaling,
	.signaled = i915_fence_signaled,
	.wait = i915_fence_wait,
	.release = i915_fence_release,
};

99 100 101
static inline void
i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
{
102
	struct drm_i915_file_private *file_priv;
103

104
	file_priv = request->file_priv;
105 106 107 108
	if (!file_priv)
		return;

	spin_lock(&file_priv->mm.lock);
109 110 111 112
	if (request->file_priv) {
		list_del(&request->client_link);
		request->file_priv = NULL;
	}
113 114 115
	spin_unlock(&file_priv->mm.lock);
}

116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134
static struct i915_dependency *
i915_dependency_alloc(struct drm_i915_private *i915)
{
	return kmem_cache_alloc(i915->dependencies, GFP_KERNEL);
}

static void
i915_dependency_free(struct drm_i915_private *i915,
		     struct i915_dependency *dep)
{
	kmem_cache_free(i915->dependencies, dep);
}

static void
__i915_priotree_add_dependency(struct i915_priotree *pt,
			       struct i915_priotree *signal,
			       struct i915_dependency *dep,
			       unsigned long flags)
{
135
	INIT_LIST_HEAD(&dep->dfs_link);
136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161
	list_add(&dep->wait_link, &signal->waiters_list);
	list_add(&dep->signal_link, &pt->signalers_list);
	dep->signaler = signal;
	dep->flags = flags;
}

static int
i915_priotree_add_dependency(struct drm_i915_private *i915,
			     struct i915_priotree *pt,
			     struct i915_priotree *signal)
{
	struct i915_dependency *dep;

	dep = i915_dependency_alloc(i915);
	if (!dep)
		return -ENOMEM;

	__i915_priotree_add_dependency(pt, signal, dep, I915_DEPENDENCY_ALLOC);
	return 0;
}

static void
i915_priotree_fini(struct drm_i915_private *i915, struct i915_priotree *pt)
{
	struct i915_dependency *dep, *next;

162 163
	GEM_BUG_ON(!RB_EMPTY_NODE(&pt->node));

164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187
	/* Everyone we depended upon (the fences we wait to be signaled)
	 * should retire before us and remove themselves from our list.
	 * However, retirement is run independently on each timeline and
	 * so we may be called out-of-order.
	 */
	list_for_each_entry_safe(dep, next, &pt->signalers_list, signal_link) {
		list_del(&dep->wait_link);
		if (dep->flags & I915_DEPENDENCY_ALLOC)
			i915_dependency_free(i915, dep);
	}

	/* Remove ourselves from everyone who depends upon us */
	list_for_each_entry_safe(dep, next, &pt->waiters_list, wait_link) {
		list_del(&dep->signal_link);
		if (dep->flags & I915_DEPENDENCY_ALLOC)
			i915_dependency_free(i915, dep);
	}
}

static void
i915_priotree_init(struct i915_priotree *pt)
{
	INIT_LIST_HEAD(&pt->signalers_list);
	INIT_LIST_HEAD(&pt->waiters_list);
188 189
	RB_CLEAR_NODE(&pt->node);
	pt->priority = INT_MIN;
190 191
}

192 193 194 195 196 197 198 199 200 201 202 203 204 205 206
static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
{
	struct intel_engine_cs *engine;
	enum intel_engine_id id;
	int ret;

	/* Carefully retire all requests without writing to the rings */
	ret = i915_gem_wait_for_idle(i915,
				     I915_WAIT_INTERRUPTIBLE |
				     I915_WAIT_LOCKED);
	if (ret)
		return ret;

	/* If the seqno wraps around, we need to clear the breadcrumb rbtree */
	for_each_engine(engine, i915, id) {
207 208
		struct i915_gem_timeline *timeline;
		struct intel_timeline *tl = engine->timeline;
209 210 211 212 213 214 215 216 217

		if (!i915_seqno_passed(seqno, tl->seqno)) {
			/* spin until threads are complete */
			while (intel_breadcrumbs_busy(engine))
				cond_resched();
		}

		/* Finally reset hw state */
		intel_engine_init_global_seqno(engine, seqno);
218
		tl->seqno = seqno;
219

220 221 222
		list_for_each_entry(timeline, &i915->gt.timelines, link)
			memset(timeline->engine[id].sync_seqno, 0,
			       sizeof(timeline->engine[id].sync_seqno));
223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261
	}

	return 0;
}

int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno)
{
	struct drm_i915_private *dev_priv = to_i915(dev);

	lockdep_assert_held(&dev_priv->drm.struct_mutex);

	if (seqno == 0)
		return -EINVAL;

	/* HWS page needs to be set less than what we
	 * will inject to ring
	 */
	return reset_all_global_seqno(dev_priv, seqno - 1);
}

static int reserve_seqno(struct intel_engine_cs *engine)
{
	u32 active = ++engine->timeline->inflight_seqnos;
	u32 seqno = engine->timeline->seqno;
	int ret;

	/* Reservation is fine until we need to wrap around */
	if (likely(!add_overflows(seqno, active)))
		return 0;

	ret = reset_all_global_seqno(engine->i915, 0);
	if (ret) {
		engine->timeline->inflight_seqnos--;
		return ret;
	}

	return 0;
}

262 263 264 265 266 267
static void unreserve_seqno(struct intel_engine_cs *engine)
{
	GEM_BUG_ON(!engine->timeline->inflight_seqnos);
	engine->timeline->inflight_seqnos--;
}

268 269 270 271 272 273
void i915_gem_retire_noop(struct i915_gem_active *active,
			  struct drm_i915_gem_request *request)
{
	/* Space left intentionally blank */
}

274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294
static void advance_ring(struct drm_i915_gem_request *request)
{
	unsigned int tail;

	/* We know the GPU must have read the request to have
	 * sent us the seqno + interrupt, so use the position
	 * of tail of the request to update the last known position
	 * of the GPU head.
	 *
	 * Note this requires that we are always called in request
	 * completion order.
	 */
	if (list_is_last(&request->ring_link, &request->ring->request_list))
		tail = request->ring->tail;
	else
		tail = request->postfix;
	list_del(&request->ring_link);

	request->ring->head = tail;
}

295 296 297 298 299 300 301 302 303 304 305 306 307
static void free_capture_list(struct drm_i915_gem_request *request)
{
	struct i915_gem_capture_list *capture;

	capture = request->capture_list;
	while (capture) {
		struct i915_gem_capture_list *next = capture->next;

		kfree(capture);
		capture = next;
	}
}

308 309
static void i915_gem_request_retire(struct drm_i915_gem_request *request)
{
310
	struct intel_engine_cs *engine = request->engine;
311 312
	struct i915_gem_active *active, *next;

313
	lockdep_assert_held(&request->i915->drm.struct_mutex);
314
	GEM_BUG_ON(!i915_sw_fence_signaled(&request->submit));
315
	GEM_BUG_ON(!i915_gem_request_completed(request));
316
	GEM_BUG_ON(!request->i915->gt.active_requests);
317

318
	trace_i915_gem_request_retire(request);
C
Chris Wilson 已提交
319

320
	spin_lock_irq(&engine->timeline->lock);
321
	list_del_init(&request->link);
322
	spin_unlock_irq(&engine->timeline->lock);
323

324 325 326 327 328 329
	if (!--request->i915->gt.active_requests) {
		GEM_BUG_ON(!request->i915->gt.awake);
		mod_delayed_work(request->i915->wq,
				 &request->i915->gt.idle_work,
				 msecs_to_jiffies(100));
	}
330
	unreserve_seqno(request->engine);
331
	advance_ring(request);
332

333 334
	free_capture_list(request);

335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356
	/* Walk through the active list, calling retire on each. This allows
	 * objects to track their GPU activity and mark themselves as idle
	 * when their *last* active request is completed (updating state
	 * tracking lists for eviction, active references for GEM, etc).
	 *
	 * As the ->retire() may free the node, we decouple it first and
	 * pass along the auxiliary information (to avoid dereferencing
	 * the node after the callback).
	 */
	list_for_each_entry_safe(active, next, &request->active_list, link) {
		/* In microbenchmarks or focusing upon time inside the kernel,
		 * we may spend an inordinate amount of time simply handling
		 * the retirement of requests and processing their callbacks.
		 * Of which, this loop itself is particularly hot due to the
		 * cache misses when jumping around the list of i915_gem_active.
		 * So we try to keep this loop as streamlined as possible and
		 * also prefetch the next i915_gem_active to try and hide
		 * the likely cache miss.
		 */
		prefetchw(next);

		INIT_LIST_HEAD(&active->link);
357
		RCU_INIT_POINTER(active->request, NULL);
358 359 360 361

		active->retire(active, request);
	}

362 363
	i915_gem_request_remove_from_client(request);

364
	/* Retirement decays the ban score as it is a sign of ctx progress */
365 366
	if (request->ctx->ban_score > 0)
		request->ctx->ban_score--;
367

368 369 370 371 372 373 374 375 376 377
	/* The backing object for the context is done after switching to the
	 * *next* context. Therefore we cannot retire the previous context until
	 * the next context has already started running. However, since we
	 * cannot take the required locks at i915_gem_request_submit() we
	 * defer the unpinning of the active context to now, retirement of
	 * the subsequent request.
	 */
	if (engine->last_retired_context)
		engine->context_unpin(engine, engine->last_retired_context);
	engine->last_retired_context = request->ctx;
378 379

	dma_fence_signal(&request->fence);
380 381

	i915_priotree_fini(request->i915, &request->priotree);
382
	i915_gem_request_put(request);
383 384 385 386 387 388 389 390
}

void i915_gem_request_retire_upto(struct drm_i915_gem_request *req)
{
	struct intel_engine_cs *engine = req->engine;
	struct drm_i915_gem_request *tmp;

	lockdep_assert_held(&req->i915->drm.struct_mutex);
391 392
	GEM_BUG_ON(!i915_gem_request_completed(req));

393 394
	if (list_empty(&req->link))
		return;
395 396

	do {
397
		tmp = list_first_entry(&engine->timeline->requests,
398
				       typeof(*tmp), link);
399 400 401 402 403

		i915_gem_request_retire(tmp);
	} while (tmp != req);
}

404
static u32 timeline_get_seqno(struct intel_timeline *tl)
405
{
406
	return ++tl->seqno;
407 408
}

409
void __i915_gem_request_submit(struct drm_i915_gem_request *request)
410
{
411
	struct intel_engine_cs *engine = request->engine;
412 413
	struct intel_timeline *timeline;
	u32 seqno;
414

415
	GEM_BUG_ON(!irqs_disabled());
416
	lockdep_assert_held(&engine->timeline->lock);
417

418 419
	trace_i915_gem_request_execute(request);

C
Chris Wilson 已提交
420 421 422
	/* Transfer from per-context onto the global per-engine timeline */
	timeline = engine->timeline;
	GEM_BUG_ON(timeline == request->timeline);
423

424
	seqno = timeline_get_seqno(timeline);
425 426 427 428 429 430 431 432 433 434
	GEM_BUG_ON(!seqno);
	GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine), seqno));

	/* We may be recursing from the signal callback of another i915 fence */
	spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
	request->global_seqno = seqno;
	if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
		intel_engine_enable_signaling(request);
	spin_unlock(&request->lock);

C
Chris Wilson 已提交
435 436
	engine->emit_breadcrumb(request,
				request->ring->vaddr + request->postfix);
437

438
	spin_lock(&request->timeline->lock);
C
Chris Wilson 已提交
439 440 441
	list_move_tail(&request->link, &timeline->requests);
	spin_unlock(&request->timeline->lock);

442
	wake_up_all(&request->execute);
443 444 445 446 447 448
}

void i915_gem_request_submit(struct drm_i915_gem_request *request)
{
	struct intel_engine_cs *engine = request->engine;
	unsigned long flags;
449

450 451 452 453 454 455 456 457
	/* Will be called from irq-context when using foreign fences. */
	spin_lock_irqsave(&engine->timeline->lock, flags);

	__i915_gem_request_submit(request);

	spin_unlock_irqrestore(&engine->timeline->lock, flags);
}

458
void __i915_gem_request_unsubmit(struct drm_i915_gem_request *request)
459
{
460 461
	struct intel_engine_cs *engine = request->engine;
	struct intel_timeline *timeline;
462

463
	GEM_BUG_ON(!irqs_disabled());
464
	lockdep_assert_held(&engine->timeline->lock);
465

466 467 468 469 470
	/* Only unwind in reverse order, required so that the per-context list
	 * is kept in seqno/ring order.
	 */
	GEM_BUG_ON(request->global_seqno != engine->timeline->seqno);
	engine->timeline->seqno--;
C
Chris Wilson 已提交
471

472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505
	/* We may be recursing from the signal callback of another i915 fence */
	spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
	request->global_seqno = 0;
	if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
		intel_engine_cancel_signaling(request);
	spin_unlock(&request->lock);

	/* Transfer back from the global per-engine timeline to per-context */
	timeline = request->timeline;
	GEM_BUG_ON(timeline == engine->timeline);

	spin_lock(&timeline->lock);
	list_move(&request->link, &timeline->requests);
	spin_unlock(&timeline->lock);

	/* We don't need to wake_up any waiters on request->execute, they
	 * will get woken by any other event or us re-adding this request
	 * to the engine timeline (__i915_gem_request_submit()). The waiters
	 * should be quite adapt at finding that the request now has a new
	 * global_seqno to the one they went to sleep on.
	 */
}

void i915_gem_request_unsubmit(struct drm_i915_gem_request *request)
{
	struct intel_engine_cs *engine = request->engine;
	unsigned long flags;

	/* Will be called from irq-context when using foreign fences. */
	spin_lock_irqsave(&engine->timeline->lock, flags);

	__i915_gem_request_unsubmit(request);

	spin_unlock_irqrestore(&engine->timeline->lock, flags);
506 507
}

508
static int __i915_sw_fence_call
509
submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
510
{
511 512 513 514 515
	struct drm_i915_gem_request *request =
		container_of(fence, typeof(*request), submit);

	switch (state) {
	case FENCE_COMPLETE:
516
		trace_i915_gem_request_submit(request);
517
		request->engine->submit_request(request);
518 519 520 521 522 523 524
		break;

	case FENCE_FREE:
		i915_gem_request_put(request);
		break;
	}

525 526 527
	return NOTIFY_DONE;
}

528 529 530 531 532 533 534 535 536 537 538 539 540 541 542
/**
 * i915_gem_request_alloc - allocate a request structure
 *
 * @engine: engine that we wish to issue the request on.
 * @ctx: context that the request will be associated with.
 *       This can be NULL if the request is not directly related to
 *       any specific user context, in which case this function will
 *       choose an appropriate context to use.
 *
 * Returns a pointer to the allocated request if successful,
 * or an error code if not.
 */
struct drm_i915_gem_request *
i915_gem_request_alloc(struct intel_engine_cs *engine,
		       struct i915_gem_context *ctx)
543 544 545 546 547
{
	struct drm_i915_private *dev_priv = engine->i915;
	struct drm_i915_gem_request *req;
	int ret;

548 549
	lockdep_assert_held(&dev_priv->drm.struct_mutex);

550
	/* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
551
	 * EIO if the GPU is already wedged.
552
	 */
553 554
	if (i915_terminally_wedged(&dev_priv->gpu_error))
		return ERR_PTR(-EIO);
555

556 557 558 559 560
	/* Pinning the contexts may generate requests in order to acquire
	 * GGTT space, so do this first before we reserve a seqno for
	 * ourselves.
	 */
	ret = engine->context_pin(engine, ctx);
561 562 563
	if (ret)
		return ERR_PTR(ret);

564
	ret = reserve_seqno(engine);
565 566 567
	if (ret)
		goto err_unpin;

568
	/* Move the oldest request to the slab-cache (if not in use!) */
569
	req = list_first_entry_or_null(&engine->timeline->requests,
570
				       typeof(*req), link);
571
	if (req && i915_gem_request_completed(req))
572
		i915_gem_request_retire(req);
573

574 575 576 577 578
	/* Beware: Dragons be flying overhead.
	 *
	 * We use RCU to look up requests in flight. The lookups may
	 * race with the request being allocated from the slab freelist.
	 * That is the request we are writing to here, may be in the process
579
	 * of being read by __i915_gem_active_get_rcu(). As such,
580 581
	 * we have to be very careful when overwriting the contents. During
	 * the RCU lookup, we change chase the request->engine pointer,
582
	 * read the request->global_seqno and increment the reference count.
583 584 585 586
	 *
	 * The reference count is incremented atomically. If it is zero,
	 * the lookup knows the request is unallocated and complete. Otherwise,
	 * it is either still in use, or has been reallocated and reset
587 588
	 * with dma_fence_init(). This increment is safe for release as we
	 * check that the request we have a reference to and matches the active
589 590 591 592 593 594 595 596 597 598 599 600 601 602
	 * request.
	 *
	 * Before we increment the refcount, we chase the request->engine
	 * pointer. We must not call kmem_cache_zalloc() or else we set
	 * that pointer to NULL and cause a crash during the lookup. If
	 * we see the request is completed (based on the value of the
	 * old engine and seqno), the lookup is complete and reports NULL.
	 * If we decide the request is not completed (new engine or seqno),
	 * then we grab a reference and double check that it is still the
	 * active request - which it won't be and restart the lookup.
	 *
	 * Do not use kmem_cache_zalloc() here!
	 */
	req = kmem_cache_alloc(dev_priv->requests, GFP_KERNEL);
603 604 605 606
	if (!req) {
		ret = -ENOMEM;
		goto err_unreserve;
	}
607

C
Chris Wilson 已提交
608 609
	req->timeline = i915_gem_context_lookup_timeline(ctx, engine);
	GEM_BUG_ON(req->timeline == engine->timeline);
610

611
	spin_lock_init(&req->lock);
612 613 614
	dma_fence_init(&req->fence,
		       &i915_fence_ops,
		       &req->lock,
615
		       req->timeline->fence_context,
616
		       timeline_get_seqno(req->timeline));
617

618 619
	/* We bump the ref for the fence chain */
	i915_sw_fence_init(&i915_gem_request_get(req)->submit, submit_notify);
620
	init_waitqueue_head(&req->execute);
621

622 623
	i915_priotree_init(&req->priotree);

624
	INIT_LIST_HEAD(&req->active_list);
625 626
	req->i915 = dev_priv;
	req->engine = engine;
627
	req->ctx = ctx;
628

629
	/* No zalloc, must clear what we need by hand */
630
	req->global_seqno = 0;
631
	req->file_priv = NULL;
C
Chris Wilson 已提交
632
	req->batch = NULL;
633
	req->capture_list = NULL;
634

635 636 637 638 639 640 641 642
	/*
	 * Reserve space in the ring buffer for all the commands required to
	 * eventually emit this request. This is to guarantee that the
	 * i915_add_request() call can't fail. Note that the reserve may need
	 * to be redone if the request is not actually submitted straight
	 * away, e.g. because a GPU scheduler has deferred it.
	 */
	req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
643
	GEM_BUG_ON(req->reserved_space < engine->emit_breadcrumb_sz);
644

645
	ret = engine->request_alloc(req);
646 647 648
	if (ret)
		goto err_ctx;

649 650 651 652 653 654 655
	/* Record the position of the start of the request so that
	 * should we detect the updated seqno part-way through the
	 * GPU processing the request, we never over-estimate the
	 * position of the head.
	 */
	req->head = req->ring->tail;

656 657
	/* Check that we didn't interrupt ourselves with a new request */
	GEM_BUG_ON(req->timeline->seqno != req->fence.seqno);
658
	return req;
659 660

err_ctx:
661 662 663 664 665
	/* Make sure we didn't add ourselves to external state before freeing */
	GEM_BUG_ON(!list_empty(&req->active_list));
	GEM_BUG_ON(!list_empty(&req->priotree.signalers_list));
	GEM_BUG_ON(!list_empty(&req->priotree.waiters_list));

666
	kmem_cache_free(dev_priv->requests, req);
667
err_unreserve:
668
	unreserve_seqno(engine);
669 670
err_unpin:
	engine->context_unpin(engine, ctx);
671
	return ERR_PTR(ret);
672 673
}

674 675 676 677
static int
i915_gem_request_await_request(struct drm_i915_gem_request *to,
			       struct drm_i915_gem_request *from)
{
678
	u32 seqno;
679
	int ret;
680 681 682

	GEM_BUG_ON(to == from);

683 684 685
	if (i915_gem_request_completed(from))
		return 0;

686 687 688 689 690 691 692 693
	if (to->engine->schedule) {
		ret = i915_priotree_add_dependency(to->i915,
						   &to->priotree,
						   &from->priotree);
		if (ret < 0)
			return ret;
	}

694
	if (to->timeline == from->timeline)
695 696
		return 0;

697 698 699 700 701 702 703
	if (to->engine == from->engine) {
		ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
						       &from->submit,
						       GFP_KERNEL);
		return ret < 0 ? ret : 0;
	}

704 705
	seqno = i915_gem_request_global_seqno(from);
	if (!seqno) {
706 707 708 709 710 711
		ret = i915_sw_fence_await_dma_fence(&to->submit,
						    &from->fence, 0,
						    GFP_KERNEL);
		return ret < 0 ? ret : 0;
	}

712
	if (seqno <= to->timeline->sync_seqno[from->engine->id])
713 714 715 716
		return 0;

	trace_i915_gem_ring_sync_to(to, from);
	if (!i915.semaphores) {
717 718 719 720 721 722 723
		if (!i915_spin_request(from, TASK_INTERRUPTIBLE, 2)) {
			ret = i915_sw_fence_await_dma_fence(&to->submit,
							    &from->fence, 0,
							    GFP_KERNEL);
			if (ret < 0)
				return ret;
		}
724 725 726 727 728 729
	} else {
		ret = to->engine->semaphore.sync_to(to, from);
		if (ret)
			return ret;
	}

730
	to->timeline->sync_seqno[from->engine->id] = seqno;
731 732 733
	return 0;
}

734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780
int
i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req,
				 struct dma_fence *fence)
{
	struct dma_fence_array *array;
	int ret;
	int i;

	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
		return 0;

	if (dma_fence_is_i915(fence))
		return i915_gem_request_await_request(req, to_request(fence));

	if (!dma_fence_is_array(fence)) {
		ret = i915_sw_fence_await_dma_fence(&req->submit,
						    fence, I915_FENCE_TIMEOUT,
						    GFP_KERNEL);
		return ret < 0 ? ret : 0;
	}

	/* Note that if the fence-array was created in signal-on-any mode,
	 * we should *not* decompose it into its individual fences. However,
	 * we don't currently store which mode the fence-array is operating
	 * in. Fortunately, the only user of signal-on-any is private to
	 * amdgpu and we should not see any incoming fence-array from
	 * sync-file being in signal-on-any mode.
	 */

	array = to_dma_fence_array(fence);
	for (i = 0; i < array->num_fences; i++) {
		struct dma_fence *child = array->fences[i];

		if (dma_fence_is_i915(child))
			ret = i915_gem_request_await_request(req,
							     to_request(child));
		else
			ret = i915_sw_fence_await_dma_fence(&req->submit,
							    child, I915_FENCE_TIMEOUT,
							    GFP_KERNEL);
		if (ret < 0)
			return ret;
	}

	return 0;
}

781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805
/**
 * i915_gem_request_await_object - set this request to (async) wait upon a bo
 *
 * @to: request we are wishing to use
 * @obj: object which may be in use on another ring.
 *
 * This code is meant to abstract object synchronization with the GPU.
 * Conceptually we serialise writes between engines inside the GPU.
 * We only allow one engine to write into a buffer at any time, but
 * multiple readers. To ensure each has a coherent view of memory, we must:
 *
 * - If there is an outstanding write request to the object, the new
 *   request must wait for it to complete (either CPU or in hw, requests
 *   on the same ring will be naturally ordered).
 *
 * - If we are a write request (pending_write_domain is set), the new
 *   request must wait for outstanding read requests to complete.
 *
 * Returns 0 if successful, else propagates up the lower layer error.
 */
int
i915_gem_request_await_object(struct drm_i915_gem_request *to,
			      struct drm_i915_gem_object *obj,
			      bool write)
{
806 807
	struct dma_fence *excl;
	int ret = 0;
808 809

	if (write) {
810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828
		struct dma_fence **shared;
		unsigned int count, i;

		ret = reservation_object_get_fences_rcu(obj->resv,
							&excl, &count, &shared);
		if (ret)
			return ret;

		for (i = 0; i < count; i++) {
			ret = i915_gem_request_await_dma_fence(to, shared[i]);
			if (ret)
				break;

			dma_fence_put(shared[i]);
		}

		for (; i < count; i++)
			dma_fence_put(shared[i]);
		kfree(shared);
829
	} else {
830
		excl = reservation_object_get_excl_rcu(obj->resv);
831 832
	}

833 834 835
	if (excl) {
		if (ret == 0)
			ret = i915_gem_request_await_dma_fence(to, excl);
836

837
		dma_fence_put(excl);
838 839
	}

840
	return ret;
841 842
}

843 844 845 846 847 848 849
static void i915_gem_mark_busy(const struct intel_engine_cs *engine)
{
	struct drm_i915_private *dev_priv = engine->i915;

	if (dev_priv->gt.awake)
		return;

850 851
	GEM_BUG_ON(!dev_priv->gt.active_requests);

852 853 854
	intel_runtime_pm_get_noresume(dev_priv);
	dev_priv->gt.awake = true;

855
	intel_enable_gt_powersave(dev_priv);
856 857 858 859 860 861 862 863 864 865 866 867 868 869
	i915_update_gfx_val(dev_priv);
	if (INTEL_GEN(dev_priv) >= 6)
		gen6_rps_busy(dev_priv);

	queue_delayed_work(dev_priv->wq,
			   &dev_priv->gt.retire_work,
			   round_jiffies_up_relative(HZ));
}

/*
 * NB: This function is not allowed to fail. Doing so would mean the the
 * request is not being tracked for completion but the work itself is
 * going to happen on the hardware. This would be a Bad Thing(tm).
 */
870
void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
871
{
872 873
	struct intel_engine_cs *engine = request->engine;
	struct intel_ring *ring = request->ring;
874
	struct intel_timeline *timeline = request->timeline;
875
	struct drm_i915_gem_request *prev;
876
	u32 *cs;
C
Chris Wilson 已提交
877
	int err;
878

879
	lockdep_assert_held(&request->i915->drm.struct_mutex);
880 881
	trace_i915_gem_request_add(request);

882 883 884 885
	/* Make sure that no request gazumped us - if it was allocated after
	 * our i915_gem_request_alloc() and called __i915_add_request() before
	 * us, the timeline will hold its seqno which is later than ours.
	 */
886
	GEM_BUG_ON(timeline->seqno != request->fence.seqno);
887

888 889 890 891 892 893 894 895 896 897 898 899 900 901 902
	/*
	 * To ensure that this call will not fail, space for its emissions
	 * should already have been reserved in the ring buffer. Let the ring
	 * know that it is time to use that space up.
	 */
	request->reserved_space = 0;

	/*
	 * Emit any outstanding flushes - execbuf can fail to emit the flush
	 * after having emitted the batchbuffer command. Hence we need to fix
	 * things up similar to emitting the lazy request. The difference here
	 * is that the flush _must_ happen before the next request, no matter
	 * what.
	 */
	if (flush_caches) {
C
Chris Wilson 已提交
903
		err = engine->emit_flush(request, EMIT_FLUSH);
904

905
		/* Not allowed to fail! */
C
Chris Wilson 已提交
906
		WARN(err, "engine->emit_flush() failed: %d!\n", err);
907 908
	}

909
	/* Record the position of the start of the breadcrumb so that
910 911
	 * should we detect the updated seqno part-way through the
	 * GPU processing the request, we never over-estimate the
912
	 * position of the ring's HEAD.
913
	 */
914 915 916
	cs = intel_ring_begin(request, engine->emit_breadcrumb_sz);
	GEM_BUG_ON(IS_ERR(cs));
	request->postfix = intel_ring_offset(request, cs);
917

918 919 920 921 922
	/* Seal the request and mark it as pending execution. Note that
	 * we may inspect this state, without holding any locks, during
	 * hangcheck. Hence we apply the barrier to ensure that we do not
	 * see a more recent value in the hws than we are tracking.
	 */
923

924
	prev = i915_gem_active_raw(&timeline->last_request,
925
				   &request->i915->drm.struct_mutex);
926
	if (prev) {
927 928
		i915_sw_fence_await_sw_fence(&request->submit, &prev->submit,
					     &request->submitq);
929 930 931 932 933 934
		if (engine->schedule)
			__i915_priotree_add_dependency(&request->priotree,
						       &prev->priotree,
						       &request->dep,
						       0);
	}
935

C
Chris Wilson 已提交
936
	spin_lock_irq(&timeline->lock);
937
	list_add_tail(&request->link, &timeline->requests);
C
Chris Wilson 已提交
938 939
	spin_unlock_irq(&timeline->lock);

940
	GEM_BUG_ON(timeline->seqno != request->fence.seqno);
941
	i915_gem_active_set(&timeline->last_request, request);
942

943
	list_add_tail(&request->ring_link, &ring->request_list);
944
	request->emitted_jiffies = jiffies;
945

946 947
	if (!request->i915->gt.active_requests++)
		i915_gem_mark_busy(engine);
948

949 950 951 952 953 954 955 956 957 958 959
	/* Let the backend know a new request has arrived that may need
	 * to adjust the existing execution schedule due to a high priority
	 * request - i.e. we may want to preempt the current request in order
	 * to run a high priority dependency chain *before* we can execute this
	 * request.
	 *
	 * This is called before the request is ready to run so that we can
	 * decide whether to preempt the entire chain so that it is ready to
	 * run at the earliest possible convenience.
	 */
	if (engine->schedule)
960
		engine->schedule(request, request->ctx->priority);
961

962 963 964
	local_bh_disable();
	i915_sw_fence_commit(&request->submit);
	local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999
}

static unsigned long local_clock_us(unsigned int *cpu)
{
	unsigned long t;

	/* Cheaply and approximately convert from nanoseconds to microseconds.
	 * The result and subsequent calculations are also defined in the same
	 * approximate microseconds units. The principal source of timing
	 * error here is from the simple truncation.
	 *
	 * Note that local_clock() is only defined wrt to the current CPU;
	 * the comparisons are no longer valid if we switch CPUs. Instead of
	 * blocking preemption for the entire busywait, we can detect the CPU
	 * switch and use that as indicator of system load and a reason to
	 * stop busywaiting, see busywait_stop().
	 */
	*cpu = get_cpu();
	t = local_clock() >> 10;
	put_cpu();

	return t;
}

static bool busywait_stop(unsigned long timeout, unsigned int cpu)
{
	unsigned int this_cpu;

	if (time_after(local_clock_us(&this_cpu), timeout))
		return true;

	return this_cpu != cpu;
}

bool __i915_spin_request(const struct drm_i915_gem_request *req,
1000
			 u32 seqno, int state, unsigned long timeout_us)
1001
{
1002 1003
	struct intel_engine_cs *engine = req->engine;
	unsigned int irq, cpu;
1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014

	/* When waiting for high frequency requests, e.g. during synchronous
	 * rendering split between the CPU and GPU, the finite amount of time
	 * required to set up the irq and wait upon it limits the response
	 * rate. By busywaiting on the request completion for a short while we
	 * can service the high frequency waits as quick as possible. However,
	 * if it is a slow request, we want to sleep as quickly as possible.
	 * The tradeoff between waiting and sleeping is roughly the time it
	 * takes to sleep on a request, on the order of a microsecond.
	 */

1015
	irq = atomic_read(&engine->irq_count);
1016 1017
	timeout_us += local_clock_us(&cpu);
	do {
1018 1019 1020 1021 1022
		if (seqno != i915_gem_request_global_seqno(req))
			break;

		if (i915_seqno_passed(intel_engine_get_seqno(req->engine),
				      seqno))
1023 1024
			return true;

1025 1026 1027 1028 1029 1030 1031 1032
		/* Seqno are meant to be ordered *before* the interrupt. If
		 * we see an interrupt without a corresponding seqno advance,
		 * assume we won't see one in the near future but require
		 * the engine->seqno_barrier() to fixup coherency.
		 */
		if (atomic_read(&engine->irq_count) != irq)
			break;

1033 1034 1035 1036 1037 1038
		if (signal_pending_state(state, current))
			break;

		if (busywait_stop(timeout_us, cpu))
			break;

1039
		cpu_relax();
1040 1041 1042 1043 1044
	} while (!need_resched());

	return false;
}

1045
static bool __i915_wait_request_check_and_reset(struct drm_i915_gem_request *request)
1046
{
1047
	if (likely(!i915_reset_handoff(&request->i915->gpu_error)))
1048
		return false;
1049

1050 1051 1052
	__set_current_state(TASK_RUNNING);
	i915_reset(request->i915);
	return true;
1053 1054
}

1055
/**
1056
 * i915_wait_request - wait until execution of request has finished
1057
 * @req: the request to wait upon
1058
 * @flags: how to wait
1059 1060 1061 1062 1063
 * @timeout: how long to wait in jiffies
 *
 * i915_wait_request() waits for the request to be completed, for a
 * maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an
 * unbounded wait).
1064
 *
1065 1066 1067
 * If the caller holds the struct_mutex, the caller must pass I915_WAIT_LOCKED
 * in via the flags, and vice versa if the struct_mutex is not held, the caller
 * must not specify that the wait is locked.
1068
 *
1069 1070 1071 1072
 * Returns the remaining time (in jiffies) if the request completed, which may
 * be zero or -ETIME if the request is unfinished after the timeout expires.
 * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is
 * pending before the request completes.
1073
 */
1074 1075 1076
long i915_wait_request(struct drm_i915_gem_request *req,
		       unsigned int flags,
		       long timeout)
1077
{
1078 1079
	const int state = flags & I915_WAIT_INTERRUPTIBLE ?
		TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
1080
	wait_queue_head_t *errq = &req->i915->gpu_error.wait_queue;
1081 1082
	DEFINE_WAIT_FUNC(reset, default_wake_function);
	DEFINE_WAIT_FUNC(exec, default_wake_function);
1083 1084 1085
	struct intel_wait wait;

	might_sleep();
1086
#if IS_ENABLED(CONFIG_LOCKDEP)
1087 1088
	GEM_BUG_ON(debug_locks &&
		   !!lockdep_is_held(&req->i915->drm.struct_mutex) !=
1089 1090
		   !!(flags & I915_WAIT_LOCKED));
#endif
1091
	GEM_BUG_ON(timeout < 0);
1092 1093

	if (i915_gem_request_completed(req))
1094
		return timeout;
1095

1096 1097
	if (!timeout)
		return -ETIME;
1098

1099
	trace_i915_gem_request_wait_begin(req, flags);
1100

1101
	add_wait_queue(&req->execute, &exec);
1102 1103 1104
	if (flags & I915_WAIT_LOCKED)
		add_wait_queue(errq, &reset);

1105
	intel_wait_init(&wait, req);
1106

1107
restart:
1108 1109 1110 1111
	do {
		set_current_state(state);
		if (intel_wait_update_request(&wait, req))
			break;
1112

1113 1114 1115
		if (flags & I915_WAIT_LOCKED &&
		    __i915_wait_request_check_and_reset(req))
			continue;
1116

1117 1118
		if (signal_pending_state(state, current)) {
			timeout = -ERESTARTSYS;
1119
			goto complete;
1120
		}
1121

1122 1123 1124 1125
		if (!timeout) {
			timeout = -ETIME;
			goto complete;
		}
1126

1127 1128
		timeout = io_schedule_timeout(timeout);
	} while (1);
1129

1130
	GEM_BUG_ON(!intel_wait_has_seqno(&wait));
1131
	GEM_BUG_ON(!i915_sw_fence_signaled(&req->submit));
1132

1133
	/* Optimistic short spin before touching IRQs */
1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144
	if (i915_spin_request(req, state, 5))
		goto complete;

	set_current_state(state);
	if (intel_engine_add_wait(req->engine, &wait))
		/* In order to check that we haven't missed the interrupt
		 * as we enabled it, we need to kick ourselves to do a
		 * coherent check on the seqno before we sleep.
		 */
		goto wakeup;

1145 1146 1147
	if (flags & I915_WAIT_LOCKED)
		__i915_wait_request_check_and_reset(req);

1148 1149
	for (;;) {
		if (signal_pending_state(state, current)) {
1150
			timeout = -ERESTARTSYS;
1151 1152 1153
			break;
		}

1154 1155
		if (!timeout) {
			timeout = -ETIME;
1156 1157 1158
			break;
		}

1159 1160
		timeout = io_schedule_timeout(timeout);

1161 1162
		if (intel_wait_complete(&wait) &&
		    intel_wait_check_request(&wait, req))
1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175
			break;

		set_current_state(state);

wakeup:
		/* Carefully check if the request is complete, giving time
		 * for the seqno to be visible following the interrupt.
		 * We also have to check in case we are kicked by the GPU
		 * reset in order to drop the struct_mutex.
		 */
		if (__i915_request_irq_complete(req))
			break;

1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186
		/* If the GPU is hung, and we hold the lock, reset the GPU
		 * and then check for completion. On a full reset, the engine's
		 * HW seqno will be advanced passed us and we are complete.
		 * If we do a partial reset, we have to wait for the GPU to
		 * resume and update the breadcrumb.
		 *
		 * If we don't hold the mutex, we can just wait for the worker
		 * to come along and update the breadcrumb (either directly
		 * itself, or indirectly by recovering the GPU).
		 */
		if (flags & I915_WAIT_LOCKED &&
1187
		    __i915_wait_request_check_and_reset(req))
1188 1189
			continue;

1190 1191 1192
		/* Only spin if we know the GPU is processing this request */
		if (i915_spin_request(req, state, 2))
			break;
1193 1194 1195 1196 1197

		if (!intel_wait_check_request(&wait, req)) {
			intel_engine_remove_wait(req->engine, &wait);
			goto restart;
		}
1198 1199 1200 1201
	}

	intel_engine_remove_wait(req->engine, &wait);
complete:
1202
	__set_current_state(TASK_RUNNING);
1203 1204
	if (flags & I915_WAIT_LOCKED)
		remove_wait_queue(errq, &reset);
1205
	remove_wait_queue(&req->execute, &exec);
1206 1207
	trace_i915_gem_request_wait_end(req);

1208
	return timeout;
1209
}
1210

1211
static void engine_retire_requests(struct intel_engine_cs *engine)
1212 1213
{
	struct drm_i915_gem_request *request, *next;
1214 1215
	u32 seqno = intel_engine_get_seqno(engine);
	LIST_HEAD(retire);
1216

1217
	spin_lock_irq(&engine->timeline->lock);
1218 1219
	list_for_each_entry_safe(request, next,
				 &engine->timeline->requests, link) {
1220 1221
		if (!i915_seqno_passed(seqno, request->global_seqno))
			break;
1222

1223
		list_move_tail(&request->link, &retire);
1224
	}
1225 1226 1227 1228
	spin_unlock_irq(&engine->timeline->lock);

	list_for_each_entry_safe(request, next, &retire, link)
		i915_gem_request_retire(request);
1229 1230 1231 1232 1233
}

void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
{
	struct intel_engine_cs *engine;
1234
	enum intel_engine_id id;
1235 1236 1237

	lockdep_assert_held(&dev_priv->drm.struct_mutex);

1238
	if (!dev_priv->gt.active_requests)
1239 1240
		return;

1241 1242
	for_each_engine(engine, dev_priv, id)
		engine_retire_requests(engine);
1243
}
1244 1245 1246 1247 1248

#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_request.c"
#include "selftests/i915_gem_request.c"
#endif