intel_lrc.c 107.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
/*
 * Copyright © 2014 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Ben Widawsky <ben@bwidawsk.net>
 *    Michel Thierry <michel.thierry@intel.com>
 *    Thomas Daniel <thomas.daniel@intel.com>
 *    Oscar Mateo <oscar.mateo@intel.com>
 *
 */

31 32 33 34
/**
 * DOC: Logical Rings, Logical Ring Contexts and Execlists
 *
 * Motivation:
35 36 37 38
 * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts".
 * These expanded contexts enable a number of new abilities, especially
 * "Execlists" (also implemented in this file).
 *
39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89
 * One of the main differences with the legacy HW contexts is that logical
 * ring contexts incorporate many more things to the context's state, like
 * PDPs or ringbuffer control registers:
 *
 * The reason why PDPs are included in the context is straightforward: as
 * PPGTTs (per-process GTTs) are actually per-context, having the PDPs
 * contained there mean you don't need to do a ppgtt->switch_mm yourself,
 * instead, the GPU will do it for you on the context switch.
 *
 * But, what about the ringbuffer control registers (head, tail, etc..)?
 * shouldn't we just need a set of those per engine command streamer? This is
 * where the name "Logical Rings" starts to make sense: by virtualizing the
 * rings, the engine cs shifts to a new "ring buffer" with every context
 * switch. When you want to submit a workload to the GPU you: A) choose your
 * context, B) find its appropriate virtualized ring, C) write commands to it
 * and then, finally, D) tell the GPU to switch to that context.
 *
 * Instead of the legacy MI_SET_CONTEXT, the way you tell the GPU to switch
 * to a contexts is via a context execution list, ergo "Execlists".
 *
 * LRC implementation:
 * Regarding the creation of contexts, we have:
 *
 * - One global default context.
 * - One local default context for each opened fd.
 * - One local extra context for each context create ioctl call.
 *
 * Now that ringbuffers belong per-context (and not per-engine, like before)
 * and that contexts are uniquely tied to a given engine (and not reusable,
 * like before) we need:
 *
 * - One ringbuffer per-engine inside each context.
 * - One backing object per-engine inside each context.
 *
 * The global default context starts its life with these new objects fully
 * allocated and populated. The local default context for each opened fd is
 * more complex, because we don't know at creation time which engine is going
 * to use them. To handle this, we have implemented a deferred creation of LR
 * contexts:
 *
 * The local context starts its life as a hollow or blank holder, that only
 * gets populated for a given engine once we receive an execbuffer. If later
 * on we receive another execbuffer ioctl for the same context but a different
 * engine, we allocate/populate a new ringbuffer and context backing object and
 * so on.
 *
 * Finally, regarding local contexts created using the ioctl call: as they are
 * only allowed with the render ring, we can allocate & populate them right
 * away (no need to defer anything, at least for now).
 *
 * Execlists implementation:
90 91
 * Execlists are the new method by which, on gen8+ hardware, workloads are
 * submitted for execution (as opposed to the legacy, ringbuffer-based, method).
92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132
 * This method works as follows:
 *
 * When a request is committed, its commands (the BB start and any leading or
 * trailing commands, like the seqno breadcrumbs) are placed in the ringbuffer
 * for the appropriate context. The tail pointer in the hardware context is not
 * updated at this time, but instead, kept by the driver in the ringbuffer
 * structure. A structure representing this request is added to a request queue
 * for the appropriate engine: this structure contains a copy of the context's
 * tail after the request was written to the ring buffer and a pointer to the
 * context itself.
 *
 * If the engine's request queue was empty before the request was added, the
 * queue is processed immediately. Otherwise the queue will be processed during
 * a context switch interrupt. In any case, elements on the queue will get sent
 * (in pairs) to the GPU's ExecLists Submit Port (ELSP, for short) with a
 * globally unique 20-bits submission ID.
 *
 * When execution of a request completes, the GPU updates the context status
 * buffer with a context complete event and generates a context switch interrupt.
 * During the interrupt handling, the driver examines the events in the buffer:
 * for each context complete event, if the announced ID matches that on the head
 * of the request queue, then that request is retired and removed from the queue.
 *
 * After processing, if any requests were retired and the queue is not empty
 * then a new execution list can be submitted. The two requests at the front of
 * the queue are next to be submitted but since a context may not occur twice in
 * an execution list, if subsequent requests have the same ID as the first then
 * the two requests must be combined. This is done simply by discarding requests
 * at the head of the queue until either only one requests is left (in which case
 * we use a NULL second context) or the first two requests have unique IDs.
 *
 * By always executing the first two requests in the queue the driver ensures
 * that the GPU is kept as busy as possible. In the case where a single context
 * completes but a second context is still executing, the request for this second
 * context will be at the head of the queue when we remove the first one. This
 * request will then be resubmitted along with a new request for a different context,
 * which will cause the hardware to continue executing the second request and queue
 * the new request (the GPU detects the condition of a context getting preempted
 * with the same context and optimizes the context switch flow by not doing
 * preemption, but just sampling the new tail pointer).
 *
133
 */
134
#include <linux/interrupt.h>
135

136 137
#include "gem/i915_gem_context.h"

138
#include "i915_drv.h"
139
#include "i915_vgpu.h"
140
#include "intel_engine_pm.h"
141
#include "intel_gt.h"
142
#include "intel_lrc_reg.h"
143
#include "intel_mocs.h"
144
#include "intel_renderstate.h"
145
#include "intel_reset.h"
146
#include "intel_workarounds.h"
147

148 149 150 151 152 153 154 155 156 157 158 159 160
#define RING_EXECLIST_QFULL		(1 << 0x2)
#define RING_EXECLIST1_VALID		(1 << 0x3)
#define RING_EXECLIST0_VALID		(1 << 0x4)
#define RING_EXECLIST_ACTIVE_STATUS	(3 << 0xE)
#define RING_EXECLIST1_ACTIVE		(1 << 0x11)
#define RING_EXECLIST0_ACTIVE		(1 << 0x12)

#define GEN8_CTX_STATUS_IDLE_ACTIVE	(1 << 0)
#define GEN8_CTX_STATUS_PREEMPTED	(1 << 1)
#define GEN8_CTX_STATUS_ELEMENT_SWITCH	(1 << 2)
#define GEN8_CTX_STATUS_ACTIVE_IDLE	(1 << 3)
#define GEN8_CTX_STATUS_COMPLETE	(1 << 4)
#define GEN8_CTX_STATUS_LITE_RESTORE	(1 << 15)
161

162
#define GEN8_CTX_STATUS_COMPLETED_MASK \
163
	 (GEN8_CTX_STATUS_COMPLETE | GEN8_CTX_STATUS_PREEMPTED)
164

165 166
#define CTX_DESC_FORCE_RESTORE BIT_ULL(2)

167 168
/* Typical size of the average request (2 pipecontrols and a MI_BB) */
#define EXECLISTS_REQUEST_SIZE 64 /* bytes */
169
#define WA_TAIL_DWORDS 2
170
#define WA_TAIL_BYTES (sizeof(u32) * WA_TAIL_DWORDS)
171

172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196
struct virtual_engine {
	struct intel_engine_cs base;
	struct intel_context context;

	/*
	 * We allow only a single request through the virtual engine at a time
	 * (each request in the timeline waits for the completion fence of
	 * the previous before being submitted). By restricting ourselves to
	 * only submitting a single request, each request is placed on to a
	 * physical to maximise load spreading (by virtue of the late greedy
	 * scheduling -- each real engine takes the next available request
	 * upon idling).
	 */
	struct i915_request *request;

	/*
	 * We keep a rbtree of available virtual engines inside each physical
	 * engine, sorted by priority. Here we preallocate the nodes we need
	 * for the virtual engine, indexed by physical_engine->id.
	 */
	struct ve_node {
		struct rb_node rb;
		int prio;
	} nodes[I915_NUM_ENGINES];

197 198 199 200 201 202 203 204 205 206 207 208
	/*
	 * Keep track of bonded pairs -- restrictions upon on our selection
	 * of physical engines any particular request may be submitted to.
	 * If we receive a submit-fence from a master engine, we will only
	 * use one of sibling_mask physical engines.
	 */
	struct ve_bond {
		const struct intel_engine_cs *master;
		intel_engine_mask_t sibling_mask;
	} *bonds;
	unsigned int num_bonds;

209 210 211 212 213 214 215 216 217 218 219
	/* And finally, which physical engines this virtual engine maps onto. */
	unsigned int num_siblings;
	struct intel_engine_cs *siblings[0];
};

static struct virtual_engine *to_virtual_engine(struct intel_engine_cs *engine)
{
	GEM_BUG_ON(!intel_engine_is_virtual(engine));
	return container_of(engine, struct virtual_engine, base);
}

220 221
static int execlists_context_deferred_alloc(struct intel_context *ce,
					    struct intel_engine_cs *engine);
222
static void execlists_init_reg_state(u32 *reg_state,
223
				     struct intel_context *ce,
224 225
				     struct intel_engine_cs *engine,
				     struct intel_ring *ring);
226

227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242
static inline u32 intel_hws_preempt_address(struct intel_engine_cs *engine)
{
	return (i915_ggtt_offset(engine->status_page.vma) +
		I915_GEM_HWS_PREEMPT_ADDR);
}

static inline void
ring_set_paused(const struct intel_engine_cs *engine, int state)
{
	/*
	 * We inspect HWS_PREEMPT with a semaphore inside
	 * engine->emit_fini_breadcrumb. If the dword is true,
	 * the ring is paused as the semaphore will busywait
	 * until the dword is false.
	 */
	engine->status_page.addr[I915_GEM_HWS_PREEMPT] = state;
243 244
	if (state)
		wmb();
245 246
}

247 248 249 250 251 252 253
static inline struct i915_priolist *to_priolist(struct rb_node *rb)
{
	return rb_entry(rb, struct i915_priolist, node);
}

static inline int rq_prio(const struct i915_request *rq)
{
254
	return rq->sched.attr.priority;
255 256
}

257 258
static int effective_prio(const struct i915_request *rq)
{
259 260
	int prio = rq_prio(rq);

261 262 263 264 265 266 267 268 269 270 271
	/*
	 * If this request is special and must not be interrupted at any
	 * cost, so be it. Note we are only checking the most recent request
	 * in the context and so may be masking an earlier vip request. It
	 * is hoped that under the conditions where nopreempt is used, this
	 * will not matter (i.e. all requests to that context will be
	 * nopreempt for as long as desired).
	 */
	if (i915_request_has_nopreempt(rq))
		prio = I915_PRIORITY_UNPREEMPTABLE;

272 273
	/*
	 * On unwinding the active request, we give it a priority bump
274 275 276
	 * if it has completed waiting on any semaphore. If we know that
	 * the request has already started, we can prevent an unwanted
	 * preempt-to-idle cycle by taking that into account now.
277
	 */
278 279
	if (__i915_request_has_started(rq))
		prio |= I915_PRIORITY_NOSEMAPHORE;
280

281
	/* Restrict mere WAIT boosts from triggering preemption */
282
	BUILD_BUG_ON(__NO_PREEMPTION & ~I915_PRIORITY_MASK); /* only internal */
283
	return prio | __NO_PREEMPTION;
284 285
}

286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302
static int queue_prio(const struct intel_engine_execlists *execlists)
{
	struct i915_priolist *p;
	struct rb_node *rb;

	rb = rb_first_cached(&execlists->queue);
	if (!rb)
		return INT_MIN;

	/*
	 * As the priolist[] are inverted, with the highest priority in [0],
	 * we have to flip the index value to become priority.
	 */
	p = to_priolist(rb);
	return ((p->priority + 1) << I915_USER_PRIORITY_SHIFT) - ffs(p->used);
}

303
static inline bool need_preempt(const struct intel_engine_cs *engine,
304 305
				const struct i915_request *rq,
				struct rb_node *rb)
306
{
307
	int last_prio;
308

309 310 311
	if (!intel_engine_has_semaphores(engine))
		return false;

312 313 314 315 316 317 318 319 320 321 322 323
	/*
	 * Check if the current priority hint merits a preemption attempt.
	 *
	 * We record the highest value priority we saw during rescheduling
	 * prior to this dequeue, therefore we know that if it is strictly
	 * less than the current tail of ESLP[0], we do not need to force
	 * a preempt-to-idle cycle.
	 *
	 * However, the priority hint is a mere hint that we may need to
	 * preempt. If that hint is stale or we may be trying to preempt
	 * ourselves, ignore the request.
	 */
324
	last_prio = effective_prio(rq);
325 326
	if (!i915_scheduler_need_preempt(engine->execlists.queue_priority_hint,
					 last_prio))
327 328 329 330 331 332
		return false;

	/*
	 * Check against the first request in ELSP[1], it will, thanks to the
	 * power of PI, be the highest priority of that context.
	 */
333 334
	if (!list_is_last(&rq->sched.link, &engine->active.requests) &&
	    rq_prio(list_next_entry(rq, sched.link)) > last_prio)
335 336
		return true;

337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355
	if (rb) {
		struct virtual_engine *ve =
			rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
		bool preempt = false;

		if (engine == ve->siblings[0]) { /* only preempt one sibling */
			struct i915_request *next;

			rcu_read_lock();
			next = READ_ONCE(ve->request);
			if (next)
				preempt = rq_prio(next) > last_prio;
			rcu_read_unlock();
		}

		if (preempt)
			return preempt;
	}

356 357 358 359 360 361 362 363 364 365 366 367 368 369
	/*
	 * If the inflight context did not trigger the preemption, then maybe
	 * it was the set of queued requests? Pick the highest priority in
	 * the queue (the first active priolist) and see if it deserves to be
	 * running instead of ELSP[0].
	 *
	 * The highest priority request in the queue can not be either
	 * ELSP[0] or ELSP[1] as, thanks again to PI, if it was the same
	 * context, it's priority would not exceed ELSP[0] aka last_prio.
	 */
	return queue_prio(&engine->execlists) > last_prio;
}

__maybe_unused static inline bool
370
assert_priority_queue(const struct i915_request *prev,
371
		      const struct i915_request *next)
372
{
373 374 375 376 377 378 379
	/*
	 * Without preemption, the prev may refer to the still active element
	 * which we refuse to let go.
	 *
	 * Even with preemption, there are times when we think it is better not
	 * to preempt and leave an ostensibly lower priority request in flight.
	 */
380
	if (i915_request_is_active(prev))
381 382 383
		return true;

	return rq_prio(prev) >= rq_prio(next);
384 385
}

386
/*
387 388 389 390 391
 * The context descriptor encodes various attributes of a context,
 * including its GTT address and some flags. Because it's fairly
 * expensive to calculate, we'll just do it once and cache the result,
 * which remains valid until the context is unpinned.
 *
392 393
 * This is what a descriptor looks like, from LSB to MSB::
 *
394
 *      bits  0-11:    flags, GEN8_CTX_* (cached in ctx->desc_template)
395
 *      bits 12-31:    LRCA, GTT address of (the HWSP of) this context
396
 *      bits 32-52:    ctx ID, a globally unique tag (highest bit used by GuC)
397 398
 *      bits 53-54:    mbz, reserved for use by hardware
 *      bits 55-63:    group ID, currently unused and set to 0
399 400 401 402 403 404 405 406 407 408 409 410
 *
 * Starting from Gen11, the upper dword of the descriptor has a new format:
 *
 *      bits 32-36:    reserved
 *      bits 37-47:    SW context ID
 *      bits 48:53:    engine instance
 *      bit 54:        mbz, reserved for use by hardware
 *      bits 55-60:    SW counter
 *      bits 61-63:    engine class
 *
 * engine info, SW context ID and SW counter need to form a unique number
 * (Context ID) per lrc.
411
 */
412 413
static u64
lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine)
414
{
415
	struct i915_gem_context *ctx = ce->gem_context;
416
	u64 desc;
417

418 419
	BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (BIT(GEN8_CTX_ID_WIDTH)));
	BUILD_BUG_ON(GEN11_MAX_CONTEXT_HW_ID > (BIT(GEN11_SW_CTX_ID_WIDTH)));
420

421
	desc = ctx->desc_template;				/* bits  0-11 */
422 423
	GEM_BUG_ON(desc & GENMASK_ULL(63, 12));

424
	desc |= i915_ggtt_offset(ce->state) + LRC_HEADER_PAGES * PAGE_SIZE;
425
								/* bits 12-31 */
426 427
	GEM_BUG_ON(desc & GENMASK_ULL(63, 32));

428 429 430 431 432
	/*
	 * The following 32bits are copied into the OA reports (dword 2).
	 * Consider updating oa_get_render_ctx_id in i915_perf.c when changing
	 * anything below.
	 */
433
	if (INTEL_GEN(engine->i915) >= 11) {
434 435 436 437 438 439 440 441 442 443 444 445 446 447 448
		GEM_BUG_ON(ctx->hw_id >= BIT(GEN11_SW_CTX_ID_WIDTH));
		desc |= (u64)ctx->hw_id << GEN11_SW_CTX_ID_SHIFT;
								/* bits 37-47 */

		desc |= (u64)engine->instance << GEN11_ENGINE_INSTANCE_SHIFT;
								/* bits 48-53 */

		/* TODO: decide what to do with SW counter (bits 55-60) */

		desc |= (u64)engine->class << GEN11_ENGINE_CLASS_SHIFT;
								/* bits 61-63 */
	} else {
		GEM_BUG_ON(ctx->hw_id >= BIT(GEN8_CTX_ID_WIDTH));
		desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT;	/* bits 32-52 */
	}
449

450
	return desc;
451 452
}

453
static void unwind_wa_tail(struct i915_request *rq)
454 455 456 457 458
{
	rq->tail = intel_ring_wrap(rq->ring, rq->wa_tail - WA_TAIL_BYTES);
	assert_ring_tail_valid(rq->ring, rq->tail);
}

459
static struct i915_request *
460
__unwind_incomplete_requests(struct intel_engine_cs *engine)
461
{
462
	struct i915_request *rq, *rn, *active = NULL;
463
	struct list_head *uninitialized_var(pl);
464
	int prio = I915_PRIORITY_INVALID;
465

466
	lockdep_assert_held(&engine->active.lock);
467 468

	list_for_each_entry_safe_reverse(rq, rn,
469 470
					 &engine->active.requests,
					 sched.link) {
471 472
		struct intel_engine_cs *owner;

473
		if (i915_request_completed(rq))
474
			continue; /* XXX */
475

476
		__i915_request_unsubmit(rq);
477 478
		unwind_wa_tail(rq);

479 480 481 482 483 484 485 486 487 488 489 490 491 492 493
		/*
		 * Push the request back into the queue for later resubmission.
		 * If this request is not native to this physical engine (i.e.
		 * it came from a virtual source), push it back onto the virtual
		 * engine so that it can be moved across onto another physical
		 * engine as load dictates.
		 */
		owner = rq->hw_context->engine;
		if (likely(owner == engine)) {
			GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
			if (rq_prio(rq) != prio) {
				prio = rq_prio(rq);
				pl = i915_sched_lookup_priolist(engine, prio);
			}
			GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
494

495
			list_move(&rq->sched.link, pl);
496 497 498 499 500 501
			active = rq;
		} else {
			rq->engine = owner;
			owner->submit_request(rq);
			active = NULL;
		}
502 503
	}

504
	return active;
505 506
}

507
struct i915_request *
508 509 510 511 512
execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists)
{
	struct intel_engine_cs *engine =
		container_of(execlists, typeof(*engine), execlists);

513
	return __unwind_incomplete_requests(engine);
514 515
}

516
static inline void
517
execlists_context_status_change(struct i915_request *rq, unsigned long status)
518
{
519 520 521 522 523 524
	/*
	 * Only used when GVT-g is enabled now. When GVT-g is disabled,
	 * The compiler should eliminate this function as dead-code.
	 */
	if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
		return;
525

526 527
	atomic_notifier_call_chain(&rq->engine->context_status_notifier,
				   status, rq);
528 529
}

530 531
static inline struct i915_request *
execlists_schedule_in(struct i915_request *rq, int idx)
532
{
533 534
	struct intel_context *ce = rq->hw_context;
	int count;
535

536
	trace_i915_request_in(rq, idx);
537

538 539 540 541 542 543 544 545 546 547 548
	count = intel_context_inflight_count(ce);
	if (!count) {
		intel_context_get(ce);
		ce->inflight = rq->engine;

		execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
		intel_engine_context_in(ce->inflight);
	}

	intel_context_inflight_inc(ce);
	GEM_BUG_ON(intel_context_inflight(ce) != rq->engine);
549

550
	return i915_request_get(rq);
551 552
}

553
static void kick_siblings(struct i915_request *rq, struct intel_context *ce)
554
{
555
	struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
556 557 558 559 560 561
	struct i915_request *next = READ_ONCE(ve->request);

	if (next && next->execution_mask & ~rq->execution_mask)
		tasklet_schedule(&ve->base.execlists.tasklet);
}

562
static inline void
563
execlists_schedule_out(struct i915_request *rq)
564
{
565 566 567 568
	struct intel_context *ce = rq->hw_context;

	GEM_BUG_ON(!intel_context_inflight_count(ce));

569
	trace_i915_request_out(rq);
570

571 572 573 574 575 576 577 578 579 580 581 582 583 584
	intel_context_inflight_dec(ce);
	if (!intel_context_inflight_count(ce)) {
		intel_engine_context_out(ce->inflight);
		execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);

		/*
		 * If this is part of a virtual engine, its next request may
		 * have been blocked waiting for access to the active context.
		 * We have to kick all the siblings again in case we need to
		 * switch (e.g. the next request is not runnable on this
		 * engine). Hopefully, we will already have submitted the next
		 * request before the tasklet runs and do not need to rebuild
		 * each virtual tree and kick everyone again.
		 */
585
		ce->inflight = NULL;
586 587
		if (rq->engine != ce->engine)
			kick_siblings(rq, ce);
588 589

		intel_context_put(ce);
590 591 592
	}

	i915_request_put(rq);
593 594
}

595
static u64 execlists_update_context(const struct i915_request *rq)
596
{
597
	struct intel_context *ce = rq->hw_context;
598
	u64 desc;
599

600 601
	ce->lrc_reg_state[CTX_RING_TAIL + 1] =
		intel_ring_set_tail(rq->ring, rq->tail);
602

603 604 605 606 607 608 609 610 611
	/*
	 * Make sure the context image is complete before we submit it to HW.
	 *
	 * Ostensibly, writes (including the WCB) should be flushed prior to
	 * an uncached write such as our mmio register access, the empirical
	 * evidence (esp. on Braswell) suggests that the WC write into memory
	 * may not be visible to the HW prior to the completion of the UC
	 * register write and that we may begin execution from the context
	 * before its image is complete leading to invalid PD chasing.
612 613 614 615 616
	 *
	 * Furthermore, Braswell, at least, wants a full mb to be sure that
	 * the writes are coherent in memory (visible to the GPU) prior to
	 * execution, and not just visible to other CPUs (as is the result of
	 * wmb).
617
	 */
618
	mb();
619 620 621 622 623

	desc = ce->lrc_desc;
	ce->lrc_desc &= ~CTX_DESC_FORCE_RESTORE;

	return desc;
624 625
}

626
static inline void write_desc(struct intel_engine_execlists *execlists, u64 desc, u32 port)
C
Chris Wilson 已提交
627
{
628 629 630 631 632 633 634
	if (execlists->ctrl_reg) {
		writel(lower_32_bits(desc), execlists->submit_reg + port * 2);
		writel(upper_32_bits(desc), execlists->submit_reg + port * 2 + 1);
	} else {
		writel(upper_32_bits(desc), execlists->submit_reg);
		writel(lower_32_bits(desc), execlists->submit_reg);
	}
C
Chris Wilson 已提交
635 636
}

637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685
static __maybe_unused void
trace_ports(const struct intel_engine_execlists *execlists,
	    const char *msg,
	    struct i915_request * const *ports)
{
	const struct intel_engine_cs *engine =
		container_of(execlists, typeof(*engine), execlists);

	GEM_TRACE("%s: %s { %llx:%lld%s, %llx:%lld }\n",
		  engine->name, msg,
		  ports[0]->fence.context,
		  ports[0]->fence.seqno,
		  i915_request_completed(ports[0]) ? "!" :
		  i915_request_started(ports[0]) ? "*" :
		  "",
		  ports[1] ? ports[1]->fence.context : 0,
		  ports[1] ? ports[1]->fence.seqno : 0);
}

static __maybe_unused bool
assert_pending_valid(const struct intel_engine_execlists *execlists,
		     const char *msg)
{
	struct i915_request * const *port, *rq;
	struct intel_context *ce = NULL;

	trace_ports(execlists, msg, execlists->pending);

	if (execlists->pending[execlists_num_ports(execlists)])
		return false;

	for (port = execlists->pending; (rq = *port); port++) {
		if (ce == rq->hw_context)
			return false;

		ce = rq->hw_context;
		if (i915_request_completed(rq))
			continue;

		if (i915_active_is_idle(&ce->active))
			return false;

		if (!i915_vma_is_pinned(ce->state))
			return false;
	}

	return ce;
}

686
static void execlists_submit_ports(struct intel_engine_cs *engine)
687
{
688
	struct intel_engine_execlists *execlists = &engine->execlists;
689
	unsigned int n;
690

691 692
	GEM_BUG_ON(!assert_pending_valid(execlists, "submit"));

693 694 695 696 697 698 699 700
	/*
	 * We can skip acquiring intel_runtime_pm_get() here as it was taken
	 * on our behalf by the request (see i915_gem_mark_busy()) and it will
	 * not be relinquished until the device is idle (see
	 * i915_gem_idle_work_handler()). As a precaution, we make sure
	 * that all ELSP are drained i.e. we have processed the CSB,
	 * before allowing ourselves to idle and calling intel_runtime_pm_put().
	 */
701
	GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
702

703 704 705 706 707 708 709
	/*
	 * ELSQ note: the submit queue is not cleared after being submitted
	 * to the HW so we need to make sure we always clean it up. This is
	 * currently ensured by the fact that we always write the same number
	 * of elsq entries, keep this in mind before changing the loop below.
	 */
	for (n = execlists_num_ports(execlists); n--; ) {
710
		struct i915_request *rq = execlists->pending[n];
711

712 713 714
		write_desc(execlists,
			   rq ? execlists_update_context(rq) : 0,
			   n);
715
	}
716 717 718 719

	/* we need to manually load the submit queue */
	if (execlists->ctrl_reg)
		writel(EL_CTRL_LOAD, execlists->ctrl_reg);
720 721
}

722
static bool ctx_single_port_submission(const struct intel_context *ce)
723
{
724
	return (IS_ENABLED(CONFIG_DRM_I915_GVT) &&
725
		i915_gem_context_force_single_submission(ce->gem_context));
726
}
727

728 729
static bool can_merge_ctx(const struct intel_context *prev,
			  const struct intel_context *next)
730 731 732
{
	if (prev != next)
		return false;
733

734 735
	if (ctx_single_port_submission(prev))
		return false;
736

737
	return true;
738 739
}

740 741 742
static bool can_merge_rq(const struct i915_request *prev,
			 const struct i915_request *next)
{
743
	GEM_BUG_ON(prev == next);
744 745 746 747 748 749 750 751
	GEM_BUG_ON(!assert_priority_queue(prev, next));

	if (!can_merge_ctx(prev->hw_context, next->hw_context))
		return false;

	return true;
}

752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801
static void virtual_update_register_offsets(u32 *regs,
					    struct intel_engine_cs *engine)
{
	u32 base = engine->mmio_base;

	/* Must match execlists_init_reg_state()! */

	regs[CTX_CONTEXT_CONTROL] =
		i915_mmio_reg_offset(RING_CONTEXT_CONTROL(base));
	regs[CTX_RING_HEAD] = i915_mmio_reg_offset(RING_HEAD(base));
	regs[CTX_RING_TAIL] = i915_mmio_reg_offset(RING_TAIL(base));
	regs[CTX_RING_BUFFER_START] = i915_mmio_reg_offset(RING_START(base));
	regs[CTX_RING_BUFFER_CONTROL] = i915_mmio_reg_offset(RING_CTL(base));

	regs[CTX_BB_HEAD_U] = i915_mmio_reg_offset(RING_BBADDR_UDW(base));
	regs[CTX_BB_HEAD_L] = i915_mmio_reg_offset(RING_BBADDR(base));
	regs[CTX_BB_STATE] = i915_mmio_reg_offset(RING_BBSTATE(base));
	regs[CTX_SECOND_BB_HEAD_U] =
		i915_mmio_reg_offset(RING_SBBADDR_UDW(base));
	regs[CTX_SECOND_BB_HEAD_L] = i915_mmio_reg_offset(RING_SBBADDR(base));
	regs[CTX_SECOND_BB_STATE] = i915_mmio_reg_offset(RING_SBBSTATE(base));

	regs[CTX_CTX_TIMESTAMP] =
		i915_mmio_reg_offset(RING_CTX_TIMESTAMP(base));
	regs[CTX_PDP3_UDW] = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 3));
	regs[CTX_PDP3_LDW] = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 3));
	regs[CTX_PDP2_UDW] = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 2));
	regs[CTX_PDP2_LDW] = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 2));
	regs[CTX_PDP1_UDW] = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 1));
	regs[CTX_PDP1_LDW] = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 1));
	regs[CTX_PDP0_UDW] = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 0));
	regs[CTX_PDP0_LDW] = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 0));

	if (engine->class == RENDER_CLASS) {
		regs[CTX_RCS_INDIRECT_CTX] =
			i915_mmio_reg_offset(RING_INDIRECT_CTX(base));
		regs[CTX_RCS_INDIRECT_CTX_OFFSET] =
			i915_mmio_reg_offset(RING_INDIRECT_CTX_OFFSET(base));
		regs[CTX_BB_PER_CTX_PTR] =
			i915_mmio_reg_offset(RING_BB_PER_CTX_PTR(base));

		regs[CTX_R_PWR_CLK_STATE] =
			i915_mmio_reg_offset(GEN8_R_PWR_CLK_STATE);
	}
}

static bool virtual_matches(const struct virtual_engine *ve,
			    const struct i915_request *rq,
			    const struct intel_engine_cs *engine)
{
802
	const struct intel_engine_cs *inflight;
803

804 805 806
	if (!(rq->execution_mask & engine->mask)) /* We peeked too soon! */
		return false;

807 808 809 810 811 812 813 814 815
	/*
	 * We track when the HW has completed saving the context image
	 * (i.e. when we have seen the final CS event switching out of
	 * the context) and must not overwrite the context image before
	 * then. This restricts us to only using the active engine
	 * while the previous virtualized request is inflight (so
	 * we reuse the register offsets). This is a very small
	 * hystersis on the greedy seelction algorithm.
	 */
816
	inflight = intel_context_inflight(&ve->context);
817
	if (inflight && inflight != engine)
818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838
		return false;

	return true;
}

static void virtual_xfer_breadcrumbs(struct virtual_engine *ve,
				     struct intel_engine_cs *engine)
{
	struct intel_engine_cs *old = ve->siblings[0];

	/* All unattached (rq->engine == old) must already be completed */

	spin_lock(&old->breadcrumbs.irq_lock);
	if (!list_empty(&ve->context.signal_link)) {
		list_move_tail(&ve->context.signal_link,
			       &engine->breadcrumbs.signalers);
		intel_engine_queue_breadcrumbs(engine);
	}
	spin_unlock(&old->breadcrumbs.irq_lock);
}

839 840 841 842 843 844 845 846 847 848 849
static struct i915_request *
last_active(const struct intel_engine_execlists *execlists)
{
	struct i915_request * const *last = execlists->active;

	while (*last && i915_request_completed(*last))
		last++;

	return *last;
}

850
static void defer_request(struct i915_request *rq, struct list_head * const pl)
851
{
852
	LIST_HEAD(list);
853 854 855 856 857 858 859 860

	/*
	 * We want to move the interrupted request to the back of
	 * the round-robin list (i.e. its priority level), but
	 * in doing so, we must then move all requests that were in
	 * flight and were waiting for the interrupted request to
	 * be run after it again.
	 */
861 862
	do {
		struct i915_dependency *p;
863

864 865
		GEM_BUG_ON(i915_request_is_active(rq));
		list_move_tail(&rq->sched.link, pl);
866

867 868 869
		list_for_each_entry(p, &rq->sched.waiters_list, wait_link) {
			struct i915_request *w =
				container_of(p->waiter, typeof(*w), sched);
870

871 872 873
			/* Leave semaphores spinning on the other engines */
			if (w->engine != rq->engine)
				continue;
874

875 876 877
			/* No waiter should start before its signaler */
			GEM_BUG_ON(i915_request_started(w) &&
				   !i915_request_completed(rq));
878

879 880 881
			GEM_BUG_ON(i915_request_is_active(w));
			if (list_empty(&w->sched.link))
				continue; /* Not yet submitted; unready */
882

883 884 885 886 887 888 889 890 891
			if (rq_prio(w) < rq_prio(rq))
				continue;

			GEM_BUG_ON(rq_prio(w) > rq_prio(rq));
			list_move_tail(&w->sched.link, &list);
		}

		rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
	} while (rq);
892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909
}

static void defer_active(struct intel_engine_cs *engine)
{
	struct i915_request *rq;

	rq = __unwind_incomplete_requests(engine);
	if (!rq)
		return;

	defer_request(rq, i915_sched_lookup_priolist(engine, rq_prio(rq)));
}

static bool
need_timeslice(struct intel_engine_cs *engine, const struct i915_request *rq)
{
	int hint;

910 911 912
	if (!intel_engine_has_semaphores(engine))
		return false;

913 914 915 916 917 918
	if (list_is_last(&rq->sched.link, &engine->active.requests))
		return false;

	hint = max(rq_prio(list_next_entry(rq, sched.link)),
		   engine->execlists.queue_priority_hint);

919
	return hint >= effective_prio(rq);
920 921 922 923 924 925 926 927 928 929
}

static bool
enable_timeslice(struct intel_engine_cs *engine)
{
	struct i915_request *last = last_active(&engine->execlists);

	return last && need_timeslice(engine, last);
}

930 931 932 933 934
static void record_preemption(struct intel_engine_execlists *execlists)
{
	(void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++);
}

935
static void execlists_dequeue(struct intel_engine_cs *engine)
936
{
937
	struct intel_engine_execlists * const execlists = &engine->execlists;
938 939 940
	struct i915_request **port = execlists->pending;
	struct i915_request ** const last_port = port + execlists->port_mask;
	struct i915_request *last;
941
	struct rb_node *rb;
942 943
	bool submit = false;

944 945
	/*
	 * Hardware submission is through 2 ports. Conceptually each port
946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963
	 * has a (RING_START, RING_HEAD, RING_TAIL) tuple. RING_START is
	 * static for a context, and unique to each, so we only execute
	 * requests belonging to a single context from each ring. RING_HEAD
	 * is maintained by the CS in the context image, it marks the place
	 * where it got up to last time, and through RING_TAIL we tell the CS
	 * where we want to execute up to this time.
	 *
	 * In this list the requests are in order of execution. Consecutive
	 * requests from the same context are adjacent in the ringbuffer. We
	 * can combine these requests into a single RING_TAIL update:
	 *
	 *              RING_HEAD...req1...req2
	 *                                    ^- RING_TAIL
	 * since to execute req2 the CS must first execute req1.
	 *
	 * Our goal then is to point each port to the end of a consecutive
	 * sequence of requests as being the most optimal (fewest wake ups
	 * and context switches) submission.
964
	 */
965

966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985
	for (rb = rb_first_cached(&execlists->virtual); rb; ) {
		struct virtual_engine *ve =
			rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
		struct i915_request *rq = READ_ONCE(ve->request);

		if (!rq) { /* lazily cleanup after another engine handled rq */
			rb_erase_cached(rb, &execlists->virtual);
			RB_CLEAR_NODE(rb);
			rb = rb_first_cached(&execlists->virtual);
			continue;
		}

		if (!virtual_matches(ve, rq, engine)) {
			rb = rb_next(rb);
			continue;
		}

		break;
	}

986 987 988 989 990 991 992 993 994
	/*
	 * If the queue is higher priority than the last
	 * request in the currently active context, submit afresh.
	 * We will resubmit again afterwards in case we need to split
	 * the active context to interject the preemption request,
	 * i.e. we will retrigger preemption following the ack in case
	 * of trouble.
	 */
	last = last_active(execlists);
C
Chris Wilson 已提交
995
	if (last) {
996
		if (need_preempt(engine, last, rb)) {
997 998 999 1000 1001 1002
			GEM_TRACE("%s: preempting last=%llx:%lld, prio=%d, hint=%d\n",
				  engine->name,
				  last->fence.context,
				  last->fence.seqno,
				  last->sched.attr.priority,
				  execlists->queue_priority_hint);
1003 1004
			record_preemption(execlists);

1005 1006 1007 1008 1009 1010
			/*
			 * Don't let the RING_HEAD advance past the breadcrumb
			 * as we unwind (and until we resubmit) so that we do
			 * not accidentally tell it to go backwards.
			 */
			ring_set_paused(engine, 1);
1011

1012 1013 1014 1015 1016 1017 1018 1019
			/*
			 * Note that we have not stopped the GPU at this point,
			 * so we are unwinding the incomplete requests as they
			 * remain inflight and so by the time we do complete
			 * the preemption, some of the unwound requests may
			 * complete!
			 */
			__unwind_incomplete_requests(engine);
1020

1021 1022 1023 1024 1025 1026 1027 1028 1029
			/*
			 * If we need to return to the preempted context, we
			 * need to skip the lite-restore and force it to
			 * reload the RING_TAIL. Otherwise, the HW has a
			 * tendency to ignore us rewinding the TAIL to the
			 * end of an earlier request.
			 */
			last->hw_context->lrc_desc |= CTX_DESC_FORCE_RESTORE;
			last = NULL;
1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055
		} else if (need_timeslice(engine, last) &&
			   !timer_pending(&engine->execlists.timer)) {
			GEM_TRACE("%s: expired last=%llx:%lld, prio=%d, hint=%d\n",
				  engine->name,
				  last->fence.context,
				  last->fence.seqno,
				  last->sched.attr.priority,
				  execlists->queue_priority_hint);

			ring_set_paused(engine, 1);
			defer_active(engine);

			/*
			 * Unlike for preemption, if we rewind and continue
			 * executing the same context as previously active,
			 * the order of execution will remain the same and
			 * the tail will only advance. We do not need to
			 * force a full context restore, as a lite-restore
			 * is sufficient to resample the monotonic TAIL.
			 *
			 * If we switch to any other context, similarly we
			 * will not rewind TAIL of current context, and
			 * normal save/restore will preserve state and allow
			 * us to later continue executing the same request.
			 */
			last = NULL;
1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079
		} else {
			/*
			 * Otherwise if we already have a request pending
			 * for execution after the current one, we can
			 * just wait until the next CS event before
			 * queuing more. In either case we will force a
			 * lite-restore preemption event, but if we wait
			 * we hopefully coalesce several updates into a single
			 * submission.
			 */
			if (!list_is_last(&last->sched.link,
					  &engine->active.requests))
				return;

			/*
			 * WaIdleLiteRestore:bdw,skl
			 * Apply the wa NOOPs to prevent
			 * ring:HEAD == rq:TAIL as we resubmit the
			 * request. See gen8_emit_fini_breadcrumb() for
			 * where we prepare the padding after the
			 * end of the request.
			 */
			last->tail = last->wa_tail;
		}
C
Chris Wilson 已提交
1080 1081
	}

1082 1083 1084 1085 1086
	while (rb) { /* XXX virtual is always taking precedence */
		struct virtual_engine *ve =
			rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
		struct i915_request *rq;

1087
		spin_lock(&ve->base.active.lock);
1088 1089 1090

		rq = ve->request;
		if (unlikely(!rq)) { /* lost the race to a sibling */
1091
			spin_unlock(&ve->base.active.lock);
1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103
			rb_erase_cached(rb, &execlists->virtual);
			RB_CLEAR_NODE(rb);
			rb = rb_first_cached(&execlists->virtual);
			continue;
		}

		GEM_BUG_ON(rq != ve->request);
		GEM_BUG_ON(rq->engine != &ve->base);
		GEM_BUG_ON(rq->hw_context != &ve->context);

		if (rq_prio(rq) >= queue_prio(execlists)) {
			if (!virtual_matches(ve, rq, engine)) {
1104
				spin_unlock(&ve->base.active.lock);
1105 1106 1107 1108
				rb = rb_next(rb);
				continue;
			}

1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123
			if (i915_request_completed(rq)) {
				ve->request = NULL;
				ve->base.execlists.queue_priority_hint = INT_MIN;
				rb_erase_cached(rb, &execlists->virtual);
				RB_CLEAR_NODE(rb);

				rq->engine = engine;
				__i915_request_submit(rq);

				spin_unlock(&ve->base.active.lock);

				rb = rb_first_cached(&execlists->virtual);
				continue;
			}

1124
			if (last && !can_merge_rq(last, rq)) {
1125
				spin_unlock(&ve->base.active.lock);
1126
				return; /* leave this for another */
1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142
			}

			GEM_TRACE("%s: virtual rq=%llx:%lld%s, new engine? %s\n",
				  engine->name,
				  rq->fence.context,
				  rq->fence.seqno,
				  i915_request_completed(rq) ? "!" :
				  i915_request_started(rq) ? "*" :
				  "",
				  yesno(engine != ve->siblings[0]));

			ve->request = NULL;
			ve->base.execlists.queue_priority_hint = INT_MIN;
			rb_erase_cached(rb, &execlists->virtual);
			RB_CLEAR_NODE(rb);

1143
			GEM_BUG_ON(!(rq->execution_mask & engine->mask));
1144 1145 1146 1147 1148 1149
			rq->engine = engine;

			if (engine != ve->siblings[0]) {
				u32 *regs = ve->context.lrc_reg_state;
				unsigned int n;

1150
				GEM_BUG_ON(READ_ONCE(ve->context.inflight));
1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174
				virtual_update_register_offsets(regs, engine);

				if (!list_empty(&ve->context.signals))
					virtual_xfer_breadcrumbs(ve, engine);

				/*
				 * Move the bound engine to the top of the list
				 * for future execution. We then kick this
				 * tasklet first before checking others, so that
				 * we preferentially reuse this set of bound
				 * registers.
				 */
				for (n = 1; n < ve->num_siblings; n++) {
					if (ve->siblings[n] == engine) {
						swap(ve->siblings[n],
						     ve->siblings[0]);
						break;
					}
				}

				GEM_BUG_ON(ve->siblings[0] != engine);
			}

			__i915_request_submit(rq);
1175 1176 1177 1178
			if (!i915_request_completed(rq)) {
				submit = true;
				last = rq;
			}
1179 1180
		}

1181
		spin_unlock(&ve->base.active.lock);
1182 1183 1184
		break;
	}

1185
	while ((rb = rb_first_cached(&execlists->queue))) {
1186
		struct i915_priolist *p = to_priolist(rb);
1187
		struct i915_request *rq, *rn;
1188
		int i;
1189

1190
		priolist_for_each_request_consume(rq, rn, p, i) {
1191 1192 1193
			if (i915_request_completed(rq))
				goto skip;

1194 1195 1196 1197 1198 1199 1200 1201 1202 1203
			/*
			 * Can we combine this request with the current port?
			 * It has to be the same context/ringbuffer and not
			 * have any exceptions (e.g. GVT saying never to
			 * combine contexts).
			 *
			 * If we can combine the requests, we can execute both
			 * by updating the RING_TAIL to point to the end of the
			 * second request, and so we never need to tell the
			 * hardware about the first.
1204
			 */
1205
			if (last && !can_merge_rq(last, rq)) {
1206 1207 1208 1209 1210
				/*
				 * If we are on the second port and cannot
				 * combine this request with the last, then we
				 * are done.
				 */
1211
				if (port == last_port)
1212 1213
					goto done;

1214 1215 1216 1217 1218 1219 1220 1221
				/*
				 * We must not populate both ELSP[] with the
				 * same LRCA, i.e. we must submit 2 different
				 * contexts if we submit 2 ELSP.
				 */
				if (last->hw_context == rq->hw_context)
					goto done;

1222 1223 1224 1225 1226 1227 1228
				/*
				 * If GVT overrides us we only ever submit
				 * port[0], leaving port[1] empty. Note that we
				 * also have to be careful that we don't queue
				 * the same context (even though a different
				 * request) to the second port.
				 */
1229
				if (ctx_single_port_submission(last->hw_context) ||
1230
				    ctx_single_port_submission(rq->hw_context))
1231 1232
					goto done;

1233
				*port = execlists_schedule_in(last, port - execlists->pending);
1234 1235
				port++;
			}
1236

1237 1238
			last = rq;
			submit = true;
1239 1240
skip:
			__i915_request_submit(rq);
1241
		}
1242

1243
		rb_erase_cached(&p->node, &execlists->queue);
1244
		i915_priolist_free(p);
1245
	}
1246

1247
done:
1248 1249 1250
	/*
	 * Here be a bit of magic! Or sleight-of-hand, whichever you prefer.
	 *
1251
	 * We choose the priority hint such that if we add a request of greater
1252 1253 1254
	 * priority than this, we kick the submission tasklet to decide on
	 * the right order of submitting the requests to hardware. We must
	 * also be prepared to reorder requests as they are in-flight on the
1255
	 * HW. We derive the priority hint then as the first "hole" in
1256 1257 1258 1259
	 * the HW submission ports and if there are no available slots,
	 * the priority of the lowest executing request, i.e. last.
	 *
	 * When we do receive a higher priority request ready to run from the
1260
	 * user, see queue_request(), the priority hint is bumped to that
1261 1262 1263
	 * request triggering preemption on the next dequeue (or subsequent
	 * interrupt for secondary ports).
	 */
1264
	execlists->queue_priority_hint = queue_prio(execlists);
1265 1266 1267
	GEM_TRACE("%s: queue_priority_hint:%d, submit:%s\n",
		  engine->name, execlists->queue_priority_hint,
		  yesno(submit));
1268

1269
	if (submit) {
1270 1271
		*port = execlists_schedule_in(last, port - execlists->pending);
		memset(port + 1, 0, (last_port - port) * sizeof(*port));
1272
		execlists_submit_ports(engine);
1273 1274
	} else {
		ring_set_paused(engine, 0);
1275
	}
1276 1277
}

1278
void
1279
execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
1280
{
1281
	struct i915_request * const *port, *rq;
1282

1283 1284 1285
	for (port = execlists->pending; (rq = *port); port++)
		execlists_schedule_out(rq);
	memset(execlists->pending, 0, sizeof(execlists->pending));
1286

1287 1288 1289 1290
	for (port = execlists->active; (rq = *port); port++)
		execlists_schedule_out(rq);
	execlists->active =
		memset(execlists->inflight, 0, sizeof(execlists->inflight));
1291 1292
}

1293 1294 1295 1296 1297 1298 1299
static inline void
invalidate_csb_entries(const u32 *first, const u32 *last)
{
	clflush((void *)first);
	clflush((void *)last);
}

1300 1301 1302 1303 1304 1305
static inline bool
reset_in_progress(const struct intel_engine_execlists *execlists)
{
	return unlikely(!__tasklet_is_enabled(&execlists->tasklet));
}

1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329
enum csb_step {
	CSB_NOP,
	CSB_PROMOTE,
	CSB_PREEMPT,
	CSB_COMPLETE,
};

static inline enum csb_step
csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb)
{
	unsigned int status = *csb;

	if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
		return CSB_PROMOTE;

	if (status & GEN8_CTX_STATUS_PREEMPTED)
		return CSB_PREEMPT;

	if (*execlists->active)
		return CSB_COMPLETE;

	return CSB_NOP;
}

1330
static void process_csb(struct intel_engine_cs *engine)
1331
{
1332
	struct intel_engine_execlists * const execlists = &engine->execlists;
1333
	const u32 * const buf = execlists->csb_status;
1334
	const u8 num_entries = execlists->csb_size;
1335
	u8 head, tail;
1336

1337
	lockdep_assert_held(&engine->active.lock);
1338
	GEM_BUG_ON(USES_GUC_SUBMISSION(engine->i915));
1339

1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354
	/*
	 * Note that csb_write, csb_status may be either in HWSP or mmio.
	 * When reading from the csb_write mmio register, we have to be
	 * careful to only use the GEN8_CSB_WRITE_PTR portion, which is
	 * the low 4bits. As it happens we know the next 4bits are always
	 * zero and so we can simply masked off the low u8 of the register
	 * and treat it identically to reading from the HWSP (without having
	 * to use explicit shifting and masking, and probably bifurcating
	 * the code to handle the legacy mmio read).
	 */
	head = execlists->csb_head;
	tail = READ_ONCE(*execlists->csb_write);
	GEM_TRACE("%s cs-irq head=%d, tail=%d\n", engine->name, head, tail);
	if (unlikely(head == tail))
		return;
1355

1356 1357 1358 1359 1360 1361 1362 1363 1364
	/*
	 * Hopefully paired with a wmb() in HW!
	 *
	 * We must complete the read of the write pointer before any reads
	 * from the CSB, so that we do not see stale values. Without an rmb
	 * (lfence) the HW may speculatively perform the CSB[] reads *before*
	 * we perform the READ_ONCE(*csb_write).
	 */
	rmb();
1365

1366
	do {
1367
		if (++head == num_entries)
1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387
			head = 0;

		/*
		 * We are flying near dragons again.
		 *
		 * We hold a reference to the request in execlist_port[]
		 * but no more than that. We are operating in softirq
		 * context and so cannot hold any mutex or sleep. That
		 * prevents us stopping the requests we are processing
		 * in port[] from being retired simultaneously (the
		 * breadcrumb will be complete before we see the
		 * context-switch). As we only hold the reference to the
		 * request, any pointer chasing underneath the request
		 * is subject to a potential use-after-free. Thus we
		 * store all of the bookkeeping within port[] as
		 * required, and avoid using unguarded pointers beneath
		 * request itself. The same applies to the atomic
		 * status notifier.
		 */

1388
		GEM_TRACE("%s csb[%d]: status=0x%08x:0x%08x\n",
1389
			  engine->name, head,
1390
			  buf[2 * head + 0], buf[2 * head + 1]);
1391

1392 1393 1394 1395 1396 1397 1398 1399 1400
		switch (csb_parse(execlists, buf + 2 * head)) {
		case CSB_PREEMPT: /* cancel old inflight, prepare for switch */
			trace_ports(execlists, "preempted", execlists->active);

			while (*execlists->active)
				execlists_schedule_out(*execlists->active++);

			/* fallthrough */
		case CSB_PROMOTE: /* switch pending to inflight */
1401 1402 1403 1404 1405 1406 1407 1408 1409
			GEM_BUG_ON(*execlists->active);
			GEM_BUG_ON(!assert_pending_valid(execlists, "promote"));
			execlists->active =
				memcpy(execlists->inflight,
				       execlists->pending,
				       execlists_num_ports(execlists) *
				       sizeof(*execlists->pending));
			execlists->pending[0] = NULL;

1410 1411
			trace_ports(execlists, "promoted", execlists->active);

1412 1413 1414
			if (enable_timeslice(engine))
				mod_timer(&execlists->timer, jiffies + 1);

1415 1416
			if (!inject_preempt_hang(execlists))
				ring_set_paused(engine, 0);
1417
			break;
1418

1419 1420
		case CSB_COMPLETE: /* port0 completed, advanced to port1 */
			trace_ports(execlists, "completed", execlists->active);
1421

1422 1423 1424 1425 1426 1427
			/*
			 * We rely on the hardware being strongly
			 * ordered, that the breadcrumb write is
			 * coherent (visible from the CPU) before the
			 * user interrupt and CSB is processed.
			 */
1428 1429
			GEM_BUG_ON(!i915_request_completed(*execlists->active) &&
				   !reset_in_progress(execlists));
1430
			execlists_schedule_out(*execlists->active++);
C
Chris Wilson 已提交
1431

1432 1433
			GEM_BUG_ON(execlists->active - execlists->inflight >
				   execlists_num_ports(execlists));
1434 1435 1436 1437
			break;

		case CSB_NOP:
			break;
1438
		}
1439
	} while (head != tail);
1440

1441
	execlists->csb_head = head;
1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453

	/*
	 * Gen11 has proven to fail wrt global observation point between
	 * entry and tail update, failing on the ordering and thus
	 * we see an old entry in the context status buffer.
	 *
	 * Forcibly evict out entries for the next gpu csb update,
	 * to increase the odds that we get a fresh entries with non
	 * working hardware. The cost for doing so comes out mostly with
	 * the wash as hardware, working or not, will need to do the
	 * invalidation before.
	 */
1454
	invalidate_csb_entries(&buf[0], &buf[num_entries - 1]);
1455
}
1456

1457
static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
1458
{
1459
	lockdep_assert_held(&engine->active.lock);
1460

C
Chris Wilson 已提交
1461
	process_csb(engine);
1462
	if (!engine->execlists.pending[0])
1463
		execlists_dequeue(engine);
1464 1465
}

1466 1467 1468 1469 1470 1471 1472 1473 1474
/*
 * Check the unread Context Status Buffers and manage the submission of new
 * contexts to the ELSP accordingly.
 */
static void execlists_submission_tasklet(unsigned long data)
{
	struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
	unsigned long flags;

1475
	spin_lock_irqsave(&engine->active.lock, flags);
1476
	__execlists_submission_tasklet(engine);
1477
	spin_unlock_irqrestore(&engine->active.lock, flags);
1478 1479
}

1480 1481 1482 1483 1484 1485 1486 1487 1488
static void execlists_submission_timer(struct timer_list *timer)
{
	struct intel_engine_cs *engine =
		from_timer(engine, timer, execlists.timer);

	/* Kick the tasklet for some interrupt coalescing and reset handling */
	tasklet_hi_schedule(&engine->execlists.tasklet);
}

1489
static void queue_request(struct intel_engine_cs *engine,
1490
			  struct i915_sched_node *node,
1491
			  int prio)
1492
{
1493
	GEM_BUG_ON(!list_empty(&node->link));
1494
	list_add_tail(&node->link, i915_sched_lookup_priolist(engine, prio));
1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507
}

static void __submit_queue_imm(struct intel_engine_cs *engine)
{
	struct intel_engine_execlists * const execlists = &engine->execlists;

	if (reset_in_progress(execlists))
		return; /* defer until we restart the engine following reset */

	if (execlists->tasklet.func == execlists_submission_tasklet)
		__execlists_submission_tasklet(engine);
	else
		tasklet_hi_schedule(&execlists->tasklet);
1508 1509
}

1510 1511
static void submit_queue(struct intel_engine_cs *engine,
			 const struct i915_request *rq)
1512
{
1513 1514 1515 1516 1517 1518 1519
	struct intel_engine_execlists *execlists = &engine->execlists;

	if (rq_prio(rq) <= execlists->queue_priority_hint)
		return;

	execlists->queue_priority_hint = rq_prio(rq);
	__submit_queue_imm(engine);
1520 1521
}

1522
static void execlists_submit_request(struct i915_request *request)
1523
{
1524
	struct intel_engine_cs *engine = request->engine;
1525
	unsigned long flags;
1526

1527
	/* Will be called from irq-context when using foreign fences. */
1528
	spin_lock_irqsave(&engine->active.lock, flags);
1529

1530
	queue_request(engine, &request->sched, rq_prio(request));
1531

1532
	GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
1533
	GEM_BUG_ON(list_empty(&request->sched.link));
1534

1535
	submit_queue(engine, request);
1536

1537
	spin_unlock_irqrestore(&engine->active.lock, flags);
1538 1539
}

1540
static void __execlists_context_fini(struct intel_context *ce)
1541
{
1542
	intel_ring_put(ce->ring);
1543
	i915_vma_put(ce->state);
1544 1545
}

1546
static void execlists_context_destroy(struct kref *kref)
1547
{
1548 1549
	struct intel_context *ce = container_of(kref, typeof(*ce), ref);

1550
	GEM_BUG_ON(!i915_active_is_idle(&ce->active));
1551
	GEM_BUG_ON(intel_context_is_pinned(ce));
1552 1553 1554 1555 1556 1557 1558

	if (ce->state)
		__execlists_context_fini(ce);

	intel_context_free(ce);
}

1559
static void execlists_context_unpin(struct intel_context *ce)
1560
{
1561
	i915_gem_context_unpin_hw_id(ce->gem_context);
1562
	i915_gem_object_unpin_map(ce->state->obj);
1563 1564
}

1565
static void
1566 1567
__execlists_update_reg_state(struct intel_context *ce,
			     struct intel_engine_cs *engine)
1568 1569
{
	struct intel_ring *ring = ce->ring;
1570 1571 1572 1573
	u32 *regs = ce->lrc_reg_state;

	GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
	GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
1574 1575 1576 1577 1578 1579

	regs[CTX_RING_BUFFER_START + 1] = i915_ggtt_offset(ring->vma);
	regs[CTX_RING_HEAD + 1] = ring->head;
	regs[CTX_RING_TAIL + 1] = ring->tail;

	/* RPCS */
1580
	if (engine->class == RENDER_CLASS) {
1581
		regs[CTX_R_PWR_CLK_STATE + 1] =
1582
			intel_sseu_make_rpcs(engine->i915, &ce->sseu);
1583 1584 1585

		i915_oa_init_reg_state(engine, ce, regs);
	}
1586 1587
}

1588 1589 1590
static int
__execlists_context_pin(struct intel_context *ce,
			struct intel_engine_cs *engine)
1591
{
1592
	void *vaddr;
1593
	int ret;
1594

1595
	GEM_BUG_ON(!ce->gem_context->vm);
1596 1597

	ret = execlists_context_deferred_alloc(ce, engine);
1598 1599
	if (ret)
		goto err;
1600
	GEM_BUG_ON(!ce->state);
1601

1602
	ret = intel_context_active_acquire(ce);
1603
	if (ret)
1604
		goto err;
1605
	GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
1606

1607
	vaddr = i915_gem_object_pin_map(ce->state->obj,
1608
					i915_coherent_map_type(engine->i915) |
1609
					I915_MAP_OVERRIDE);
1610 1611
	if (IS_ERR(vaddr)) {
		ret = PTR_ERR(vaddr);
1612
		goto unpin_active;
1613 1614
	}

1615
	ret = i915_gem_context_pin_hw_id(ce->gem_context);
1616
	if (ret)
1617
		goto unpin_map;
1618

1619
	ce->lrc_desc = lrc_descriptor(ce, engine);
1620
	ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
1621
	__execlists_update_reg_state(ce, engine);
1622

1623
	return 0;
1624

1625
unpin_map:
1626
	i915_gem_object_unpin_map(ce->state->obj);
1627 1628
unpin_active:
	intel_context_active_release(ce);
1629
err:
1630
	return ret;
1631 1632
}

1633
static int execlists_context_pin(struct intel_context *ce)
1634
{
1635
	return __execlists_context_pin(ce, ce->engine);
1636 1637
}

1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659
static void execlists_context_reset(struct intel_context *ce)
{
	/*
	 * Because we emit WA_TAIL_DWORDS there may be a disparity
	 * between our bookkeeping in ce->ring->head and ce->ring->tail and
	 * that stored in context. As we only write new commands from
	 * ce->ring->tail onwards, everything before that is junk. If the GPU
	 * starts reading from its RING_HEAD from the context, it may try to
	 * execute that junk and die.
	 *
	 * The contexts that are stilled pinned on resume belong to the
	 * kernel, and are local to each engine. All other contexts will
	 * have their head/tail sanitized upon pinning before use, so they
	 * will never see garbage,
	 *
	 * So to avoid that we reset the context images upon resume. For
	 * simplicity, we just zero everything out.
	 */
	intel_ring_reset(ce->ring, 0);
	__execlists_update_reg_state(ce, ce->engine);
}

1660
static const struct intel_context_ops execlists_context_ops = {
1661
	.pin = execlists_context_pin,
1662
	.unpin = execlists_context_unpin,
1663

1664 1665 1666
	.enter = intel_context_enter_engine,
	.exit = intel_context_exit_engine,

1667
	.reset = execlists_context_reset,
1668 1669 1670
	.destroy = execlists_context_destroy,
};

1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695
static int gen8_emit_init_breadcrumb(struct i915_request *rq)
{
	u32 *cs;

	GEM_BUG_ON(!rq->timeline->has_initial_breadcrumb);

	cs = intel_ring_begin(rq, 6);
	if (IS_ERR(cs))
		return PTR_ERR(cs);

	/*
	 * Check if we have been preempted before we even get started.
	 *
	 * After this point i915_request_started() reports true, even if
	 * we get preempted and so are no longer running.
	 */
	*cs++ = MI_ARB_CHECK;
	*cs++ = MI_NOOP;

	*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
	*cs++ = rq->timeline->hwsp_offset;
	*cs++ = 0;
	*cs++ = rq->fence.seqno - 1;

	intel_ring_advance(rq, cs);
1696 1697 1698 1699

	/* Record the updated position of the request's payload */
	rq->infix = intel_ring_offset(rq, cs);

1700 1701 1702
	return 0;
}

1703 1704 1705
static int emit_pdps(struct i915_request *rq)
{
	const struct intel_engine_cs * const engine = rq->engine;
1706
	struct i915_ppgtt * const ppgtt =
1707
		i915_vm_to_ppgtt(rq->gem_context->vm);
1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737
	int err, i;
	u32 *cs;

	GEM_BUG_ON(intel_vgpu_active(rq->i915));

	/*
	 * Beware ye of the dragons, this sequence is magic!
	 *
	 * Small changes to this sequence can cause anything from
	 * GPU hangs to forcewake errors and machine lockups!
	 */

	/* Flush any residual operations from the context load */
	err = engine->emit_flush(rq, EMIT_FLUSH);
	if (err)
		return err;

	/* Magic required to prevent forcewake errors! */
	err = engine->emit_flush(rq, EMIT_INVALIDATE);
	if (err)
		return err;

	cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
	if (IS_ERR(cs))
		return PTR_ERR(cs);

	/* Ensure the LRI have landed before we invalidate & continue */
	*cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED;
	for (i = GEN8_3LVL_PDPES; i--; ) {
		const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
1738
		u32 base = engine->mmio_base;
1739

1740
		*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i));
1741
		*cs++ = upper_32_bits(pd_daddr);
1742
		*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i));
1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757
		*cs++ = lower_32_bits(pd_daddr);
	}
	*cs++ = MI_NOOP;

	intel_ring_advance(rq, cs);

	/* Be doubly sure the LRI have landed before proceeding */
	err = engine->emit_flush(rq, EMIT_FLUSH);
	if (err)
		return err;

	/* Re-invalidate the TLB for luck */
	return engine->emit_flush(rq, EMIT_INVALIDATE);
}

1758
static int execlists_request_alloc(struct i915_request *request)
1759
{
1760
	int ret;
1761

1762
	GEM_BUG_ON(!intel_context_is_pinned(request->hw_context));
1763

1764 1765
	/*
	 * Flush enough space to reduce the likelihood of waiting after
1766 1767 1768 1769 1770
	 * we start building the request - in which case we will just
	 * have to repeat work.
	 */
	request->reserved_space += EXECLISTS_REQUEST_SIZE;

1771 1772
	/*
	 * Note that after this point, we have committed to using
1773 1774 1775 1776 1777 1778
	 * this request as it is being used to both track the
	 * state of engine initialisation and liveness of the
	 * golden renderstate above. Think twice before you try
	 * to cancel/unwind this request now.
	 */

1779
	/* Unconditionally invalidate GPU caches and TLBs. */
1780
	if (i915_vm_is_4lvl(request->gem_context->vm))
1781 1782 1783 1784 1785 1786
		ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
	else
		ret = emit_pdps(request);
	if (ret)
		return ret;

1787 1788 1789 1790
	request->reserved_space -= EXECLISTS_REQUEST_SIZE;
	return 0;
}

1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806
/*
 * In this WA we need to set GEN8_L3SQCREG4[21:21] and reset it after
 * PIPE_CONTROL instruction. This is required for the flush to happen correctly
 * but there is a slight complication as this is applied in WA batch where the
 * values are only initialized once so we cannot take register value at the
 * beginning and reuse it further; hence we save its value to memory, upload a
 * constant value with bit21 set and then we restore it back with the saved value.
 * To simplify the WA, a constant value is formed by using the default value
 * of this register. This shouldn't be a problem because we are only modifying
 * it for a short period and this batch in non-premptible. We can ofcourse
 * use additional instructions that read the actual value of the register
 * at that time and set our bit of interest but it makes the WA complicated.
 *
 * This WA is also required for Gen9 so extracting as a function avoids
 * code duplication.
 */
1807 1808
static u32 *
gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch)
1809
{
1810
	/* NB no one else is allowed to scribble over scratch + 256! */
1811 1812
	*batch++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
	*batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
1813 1814
	*batch++ = intel_gt_scratch_offset(engine->gt,
					   INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA);
1815 1816 1817 1818 1819 1820
	*batch++ = 0;

	*batch++ = MI_LOAD_REGISTER_IMM(1);
	*batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
	*batch++ = 0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES;

1821 1822 1823 1824
	batch = gen8_emit_pipe_control(batch,
				       PIPE_CONTROL_CS_STALL |
				       PIPE_CONTROL_DC_FLUSH_ENABLE,
				       0);
1825 1826 1827

	*batch++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
	*batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
1828 1829
	*batch++ = intel_gt_scratch_offset(engine->gt,
					   INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA);
1830 1831 1832
	*batch++ = 0;

	return batch;
1833 1834
}

1835 1836 1837 1838 1839 1840
static u32 slm_offset(struct intel_engine_cs *engine)
{
	return intel_gt_scratch_offset(engine->gt,
				       INTEL_GT_SCRATCH_FIELD_CLEAR_SLM_WA);
}

1841 1842 1843 1844 1845 1846
/*
 * Typically we only have one indirect_ctx and per_ctx batch buffer which are
 * initialized at the beginning and shared across all contexts but this field
 * helps us to have multiple batches at different offsets and select them based
 * on a criteria. At the moment this batch always start at the beginning of the page
 * and at this point we don't have multiple wa_ctx batch buffers.
1847
 *
1848 1849
 * The number of WA applied are not known at the beginning; we use this field
 * to return the no of DWORDS written.
1850
 *
1851 1852 1853 1854
 * It is to be noted that this batch does not contain MI_BATCH_BUFFER_END
 * so it adds NOOPs as padding to make it cacheline aligned.
 * MI_BATCH_BUFFER_END will be added to perctx batch and both of them together
 * makes a complete batch buffer.
1855
 */
1856
static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
1857
{
1858
	/* WaDisableCtxRestoreArbitration:bdw,chv */
1859
	*batch++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
1860

1861
	/* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
1862 1863
	if (IS_BROADWELL(engine->i915))
		batch = gen8_emit_flush_coherentl3_wa(engine, batch);
1864

1865 1866
	/* WaClearSlmSpaceAtContextSwitch:bdw,chv */
	/* Actual scratch location is at 128 bytes offset */
1867 1868 1869 1870 1871
	batch = gen8_emit_pipe_control(batch,
				       PIPE_CONTROL_FLUSH_L3 |
				       PIPE_CONTROL_GLOBAL_GTT_IVB |
				       PIPE_CONTROL_CS_STALL |
				       PIPE_CONTROL_QW_WRITE,
1872
				       slm_offset(engine));
1873

C
Chris Wilson 已提交
1874 1875
	*batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;

1876
	/* Pad to end of cacheline */
1877 1878
	while ((unsigned long)batch % CACHELINE_BYTES)
		*batch++ = MI_NOOP;
1879 1880 1881 1882 1883 1884 1885

	/*
	 * MI_BATCH_BUFFER_END is not required in Indirect ctx BB because
	 * execution depends on the length specified in terms of cache lines
	 * in the register CTX_RCS_INDIRECT_CTX
	 */

1886
	return batch;
1887 1888
}

1889 1890 1891 1892 1893 1894
struct lri {
	i915_reg_t reg;
	u32 value;
};

static u32 *emit_lri(u32 *batch, const struct lri *lri, unsigned int count)
1895
{
1896
	GEM_BUG_ON(!count || count > 63);
C
Chris Wilson 已提交
1897

1898 1899 1900 1901 1902 1903
	*batch++ = MI_LOAD_REGISTER_IMM(count);
	do {
		*batch++ = i915_mmio_reg_offset(lri->reg);
		*batch++ = lri->value;
	} while (lri++, --count);
	*batch++ = MI_NOOP;
1904

1905 1906
	return batch;
}
1907

1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931
static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
{
	static const struct lri lri[] = {
		/* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl,glk */
		{
			COMMON_SLICE_CHICKEN2,
			__MASKED_FIELD(GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE,
				       0),
		},

		/* BSpec: 11391 */
		{
			FF_SLICE_CHICKEN,
			__MASKED_FIELD(FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX,
				       FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX),
		},

		/* BSpec: 11299 */
		{
			_3D_CHICKEN3,
			__MASKED_FIELD(_3D_CHICKEN_SF_PROVOKING_VERTEX_FIX,
				       _3D_CHICKEN_SF_PROVOKING_VERTEX_FIX),
		}
	};
1932

1933
	*batch++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
1934

1935 1936
	/* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */
	batch = gen8_emit_flush_coherentl3_wa(engine, batch);
1937

1938
	batch = emit_lri(batch, lri, ARRAY_SIZE(lri));
1939

1940
	/* WaMediaPoolStateCmdInWABB:bxt,glk */
1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954
	if (HAS_POOLED_EU(engine->i915)) {
		/*
		 * EU pool configuration is setup along with golden context
		 * during context initialization. This value depends on
		 * device type (2x6 or 3x6) and needs to be updated based
		 * on which subslice is disabled especially for 2x6
		 * devices, however it is safe to load default
		 * configuration of 3x6 device instead of masking off
		 * corresponding bits because HW ignores bits of a disabled
		 * subslice and drops down to appropriate config. Please
		 * see render_state_setup() in i915_gem_render_state.c for
		 * possible configurations, to avoid duplication they are
		 * not shown here again.
		 */
1955 1956 1957 1958 1959 1960
		*batch++ = GEN9_MEDIA_POOL_STATE;
		*batch++ = GEN9_MEDIA_POOL_ENABLE;
		*batch++ = 0x00777000;
		*batch++ = 0;
		*batch++ = 0;
		*batch++ = 0;
1961 1962
	}

C
Chris Wilson 已提交
1963 1964
	*batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;

1965
	/* Pad to end of cacheline */
1966 1967
	while ((unsigned long)batch % CACHELINE_BYTES)
		*batch++ = MI_NOOP;
1968

1969
	return batch;
1970 1971
}

1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005
static u32 *
gen10_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
{
	int i;

	/*
	 * WaPipeControlBefore3DStateSamplePattern: cnl
	 *
	 * Ensure the engine is idle prior to programming a
	 * 3DSTATE_SAMPLE_PATTERN during a context restore.
	 */
	batch = gen8_emit_pipe_control(batch,
				       PIPE_CONTROL_CS_STALL,
				       0);
	/*
	 * WaPipeControlBefore3DStateSamplePattern says we need 4 dwords for
	 * the PIPE_CONTROL followed by 12 dwords of 0x0, so 16 dwords in
	 * total. However, a PIPE_CONTROL is 6 dwords long, not 4, which is
	 * confusing. Since gen8_emit_pipe_control() already advances the
	 * batch by 6 dwords, we advance the other 10 here, completing a
	 * cacheline. It's not clear if the workaround requires this padding
	 * before other commands, or if it's just the regular padding we would
	 * already have for the workaround bb, so leave it here for now.
	 */
	for (i = 0; i < 10; i++)
		*batch++ = MI_NOOP;

	/* Pad to end of cacheline */
	while ((unsigned long)batch % CACHELINE_BYTES)
		*batch++ = MI_NOOP;

	return batch;
}

2006 2007 2008
#define CTX_WA_BB_OBJ_SIZE (PAGE_SIZE)

static int lrc_setup_wa_ctx(struct intel_engine_cs *engine)
2009
{
2010 2011 2012
	struct drm_i915_gem_object *obj;
	struct i915_vma *vma;
	int err;
2013

2014
	obj = i915_gem_object_create_shmem(engine->i915, CTX_WA_BB_OBJ_SIZE);
2015 2016
	if (IS_ERR(obj))
		return PTR_ERR(obj);
2017

2018
	vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
2019 2020 2021
	if (IS_ERR(vma)) {
		err = PTR_ERR(vma);
		goto err;
2022 2023
	}

2024
	err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
2025 2026 2027 2028
	if (err)
		goto err;

	engine->wa_ctx.vma = vma;
2029
	return 0;
2030 2031 2032 2033

err:
	i915_gem_object_put(obj);
	return err;
2034 2035
}

2036
static void lrc_destroy_wa_ctx(struct intel_engine_cs *engine)
2037
{
2038
	i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0);
2039 2040
}

2041 2042
typedef u32 *(*wa_bb_func_t)(struct intel_engine_cs *engine, u32 *batch);

2043
static int intel_init_workaround_bb(struct intel_engine_cs *engine)
2044
{
2045
	struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
2046 2047 2048
	struct i915_wa_ctx_bb *wa_bb[2] = { &wa_ctx->indirect_ctx,
					    &wa_ctx->per_ctx };
	wa_bb_func_t wa_bb_fn[2];
2049
	struct page *page;
2050 2051
	void *batch, *batch_ptr;
	unsigned int i;
2052
	int ret;
2053

2054 2055
	if (engine->class != RENDER_CLASS)
		return 0;
2056

2057
	switch (INTEL_GEN(engine->i915)) {
2058 2059
	case 11:
		return 0;
2060
	case 10:
2061 2062 2063
		wa_bb_fn[0] = gen10_init_indirectctx_bb;
		wa_bb_fn[1] = NULL;
		break;
2064 2065
	case 9:
		wa_bb_fn[0] = gen9_init_indirectctx_bb;
2066
		wa_bb_fn[1] = NULL;
2067 2068 2069
		break;
	case 8:
		wa_bb_fn[0] = gen8_init_indirectctx_bb;
2070
		wa_bb_fn[1] = NULL;
2071 2072 2073
		break;
	default:
		MISSING_CASE(INTEL_GEN(engine->i915));
2074
		return 0;
2075
	}
2076

2077
	ret = lrc_setup_wa_ctx(engine);
2078 2079 2080 2081 2082
	if (ret) {
		DRM_DEBUG_DRIVER("Failed to setup context WA page: %d\n", ret);
		return ret;
	}

2083
	page = i915_gem_object_get_dirty_page(wa_ctx->vma->obj, 0);
2084
	batch = batch_ptr = kmap_atomic(page);
2085

2086 2087 2088 2089 2090 2091 2092
	/*
	 * Emit the two workaround batch buffers, recording the offset from the
	 * start of the workaround batch buffer object for each and their
	 * respective sizes.
	 */
	for (i = 0; i < ARRAY_SIZE(wa_bb_fn); i++) {
		wa_bb[i]->offset = batch_ptr - batch;
2093 2094
		if (GEM_DEBUG_WARN_ON(!IS_ALIGNED(wa_bb[i]->offset,
						  CACHELINE_BYTES))) {
2095 2096 2097
			ret = -EINVAL;
			break;
		}
2098 2099
		if (wa_bb_fn[i])
			batch_ptr = wa_bb_fn[i](engine, batch_ptr);
2100
		wa_bb[i]->size = batch_ptr - (batch + wa_bb[i]->offset);
2101 2102
	}

2103 2104
	BUG_ON(batch_ptr - batch > CTX_WA_BB_OBJ_SIZE);

2105 2106
	kunmap_atomic(batch);
	if (ret)
2107
		lrc_destroy_wa_ctx(engine);
2108 2109 2110 2111

	return ret;
}

2112
static void enable_execlists(struct intel_engine_cs *engine)
2113
{
2114 2115 2116 2117
	u32 mode;

	assert_forcewakes_active(engine->uncore, FORCEWAKE_ALL);

2118
	intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */
2119

2120
	if (INTEL_GEN(engine->i915) >= 11)
2121
		mode = _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE);
2122
	else
2123 2124
		mode = _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE);
	ENGINE_WRITE_FW(engine, RING_MODE_GEN7, mode);
2125

2126
	ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
2127

2128 2129 2130
	ENGINE_WRITE_FW(engine,
			RING_HWS_PGA,
			i915_ggtt_offset(engine->status_page.vma));
2131
	ENGINE_POSTING_READ(engine, RING_HWS_PGA);
2132 2133
}

2134 2135 2136 2137
static bool unexpected_starting_state(struct intel_engine_cs *engine)
{
	bool unexpected = false;

2138
	if (ENGINE_READ_FW(engine, RING_MI_MODE) & STOP_RING) {
2139 2140 2141 2142 2143 2144 2145
		DRM_DEBUG_DRIVER("STOP_RING still set in RING_MI_MODE\n");
		unexpected = true;
	}

	return unexpected;
}

2146
static int execlists_resume(struct intel_engine_cs *engine)
2147
{
2148
	intel_engine_apply_workarounds(engine);
2149
	intel_engine_apply_whitelist(engine);
2150

2151
	intel_mocs_init_engine(engine);
2152

2153
	intel_engine_reset_breadcrumbs(engine);
2154

2155 2156 2157 2158 2159 2160
	if (GEM_SHOW_DEBUG() && unexpected_starting_state(engine)) {
		struct drm_printer p = drm_debug_printer(__func__);

		intel_engine_dump(engine, &p, NULL);
	}

2161
	enable_execlists(engine);
2162

2163
	return 0;
2164 2165
}

2166
static void execlists_reset_prepare(struct intel_engine_cs *engine)
2167 2168
{
	struct intel_engine_execlists * const execlists = &engine->execlists;
2169
	unsigned long flags;
2170

2171 2172
	GEM_TRACE("%s: depth<-%d\n", engine->name,
		  atomic_read(&execlists->tasklet.count));
2173 2174 2175 2176 2177 2178

	/*
	 * Prevent request submission to the hardware until we have
	 * completed the reset in i915_gem_reset_finish(). If a request
	 * is completed by one engine, it may then queue a request
	 * to a second via its execlists->tasklet *just* as we are
2179
	 * calling engine->resume() and also writing the ELSP.
2180 2181 2182 2183
	 * Turning off the execlists->tasklet until the reset is over
	 * prevents the race.
	 */
	__tasklet_disable_sync_once(&execlists->tasklet);
2184
	GEM_BUG_ON(!reset_in_progress(execlists));
2185

2186 2187
	intel_engine_stop_cs(engine);

2188
	/* And flush any current direct submission. */
2189 2190
	spin_lock_irqsave(&engine->active.lock, flags);
	spin_unlock_irqrestore(&engine->active.lock, flags);
2191 2192
}

2193
static void reset_csb_pointers(struct intel_engine_cs *engine)
2194
{
2195
	struct intel_engine_execlists * const execlists = &engine->execlists;
2196 2197
	const unsigned int reset_value = execlists->csb_size - 1;

2198 2199
	ring_set_paused(engine, 0);

2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210
	/*
	 * After a reset, the HW starts writing into CSB entry [0]. We
	 * therefore have to set our HEAD pointer back one entry so that
	 * the *first* entry we check is entry 0. To complicate this further,
	 * as we don't wait for the first interrupt after reset, we have to
	 * fake the HW write to point back to the last entry so that our
	 * inline comparison of our cached head position against the last HW
	 * write works even before the first interrupt.
	 */
	execlists->csb_head = reset_value;
	WRITE_ONCE(*execlists->csb_write, reset_value);
2211
	wmb(); /* Make sure this is visible to HW (paranoia?) */
2212 2213 2214 2215 2216

	invalidate_csb_entries(&execlists->csb_status[0],
			       &execlists->csb_status[reset_value]);
}

2217 2218
static struct i915_request *active_request(struct i915_request *rq)
{
2219
	const struct list_head * const list = &rq->engine->active.requests;
2220 2221 2222
	const struct intel_context * const context = rq->hw_context;
	struct i915_request *active = NULL;

2223
	list_for_each_entry_from_reverse(rq, list, sched.link) {
2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235
		if (i915_request_completed(rq))
			break;

		if (rq->hw_context != context)
			break;

		active = rq;
	}

	return active;
}

2236
static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
2237
{
2238
	struct intel_engine_execlists * const execlists = &engine->execlists;
2239
	struct intel_context *ce;
2240
	struct i915_request *rq;
2241
	u32 *regs;
2242

2243 2244 2245
	process_csb(engine); /* drain preemption events */

	/* Following the reset, we need to reload the CSB read/write pointers */
2246
	reset_csb_pointers(engine);
2247 2248 2249 2250 2251 2252

	/*
	 * Save the currently executing context, even if we completed
	 * its request, it was still running at the time of the
	 * reset and will have been clobbered.
	 */
2253 2254
	rq = execlists_active(execlists);
	if (!rq)
2255
		goto unwind;
2256

2257
	ce = rq->hw_context;
2258 2259 2260
	GEM_BUG_ON(i915_active_is_idle(&ce->active));
	GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
	rq = active_request(rq);
2261

2262 2263 2264 2265 2266 2267 2268 2269 2270
	/*
	 * Catch up with any missed context-switch interrupts.
	 *
	 * Ideally we would just read the remaining CSB entries now that we
	 * know the gpu is idle. However, the CSB registers are sometimes^W
	 * often trashed across a GPU reset! Instead we have to rely on
	 * guessing the missed context-switch events by looking at what
	 * requests were completed.
	 */
2271
	execlists_cancel_port_requests(execlists);
2272

2273 2274
	if (!rq) {
		ce->ring->head = ce->ring->tail;
2275
		goto out_replay;
2276 2277 2278
	}

	ce->ring->head = intel_ring_wrap(ce->ring, rq->head);
2279

2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291
	/*
	 * If this request hasn't started yet, e.g. it is waiting on a
	 * semaphore, we need to avoid skipping the request or else we
	 * break the signaling chain. However, if the context is corrupt
	 * the request will not restart and we will be stuck with a wedged
	 * device. It is quite often the case that if we issue a reset
	 * while the GPU is loading the context image, that the context
	 * image becomes corrupt.
	 *
	 * Otherwise, if we have not started yet, the request should replay
	 * perfectly and we do not need to flag the result as being erroneous.
	 */
2292
	if (!i915_request_started(rq))
2293
		goto out_replay;
2294

2295 2296
	/*
	 * If the request was innocent, we leave the request in the ELSP
2297 2298 2299 2300 2301 2302 2303 2304 2305
	 * and will try to replay it on restarting. The context image may
	 * have been corrupted by the reset, in which case we may have
	 * to service a new GPU hang, but more likely we can continue on
	 * without impact.
	 *
	 * If the request was guilty, we presume the context is corrupt
	 * and have to at least restore the RING register in the context
	 * image back to the expected values to skip over the guilty request.
	 */
2306
	__i915_request_reset(rq, stalled);
2307
	if (!stalled)
2308
		goto out_replay;
2309

2310 2311
	/*
	 * We want a simple context + ring to execute the breadcrumb update.
2312 2313 2314 2315 2316 2317
	 * We cannot rely on the context being intact across the GPU hang,
	 * so clear it and rebuild just what we need for the breadcrumb.
	 * All pending requests for this context will be zapped, and any
	 * future request will be after userspace has had the opportunity
	 * to recreate its own state.
	 */
2318
	regs = ce->lrc_reg_state;
2319 2320 2321 2322
	if (engine->pinned_default_state) {
		memcpy(regs, /* skip restoring the vanilla PPHWSP */
		       engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE,
		       engine->context_size - PAGE_SIZE);
2323
	}
2324
	execlists_init_reg_state(regs, ce, engine, ce->ring);
2325

2326
out_replay:
2327 2328
	GEM_TRACE("%s replay {head:%04x, tail:%04x\n",
		  engine->name, ce->ring->head, ce->ring->tail);
2329 2330 2331
	intel_ring_update_space(ce->ring);
	__execlists_update_reg_state(ce, engine);

2332
unwind:
2333 2334
	/* Push back any incomplete requests for replay after the reset. */
	__unwind_incomplete_requests(engine);
2335
}
2336

2337 2338 2339 2340 2341 2342
static void execlists_reset(struct intel_engine_cs *engine, bool stalled)
{
	unsigned long flags;

	GEM_TRACE("%s\n", engine->name);

2343
	spin_lock_irqsave(&engine->active.lock, flags);
2344 2345 2346

	__execlists_reset(engine, stalled);

2347
	spin_unlock_irqrestore(&engine->active.lock, flags);
2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377
}

static void nop_submission_tasklet(unsigned long data)
{
	/* The driver is wedged; don't process any more events. */
}

static void execlists_cancel_requests(struct intel_engine_cs *engine)
{
	struct intel_engine_execlists * const execlists = &engine->execlists;
	struct i915_request *rq, *rn;
	struct rb_node *rb;
	unsigned long flags;

	GEM_TRACE("%s\n", engine->name);

	/*
	 * Before we call engine->cancel_requests(), we should have exclusive
	 * access to the submission state. This is arranged for us by the
	 * caller disabling the interrupt generation, the tasklet and other
	 * threads that may then access the same state, giving us a free hand
	 * to reset state. However, we still need to let lockdep be aware that
	 * we know this state may be accessed in hardirq context, so we
	 * disable the irq around this manipulation and we want to keep
	 * the spinlock focused on its duties and not accidentally conflate
	 * coverage to the submission's irq state. (Similarly, although we
	 * shouldn't need to disable irq around the manipulation of the
	 * submission's irq state, we also wish to remind ourselves that
	 * it is irq state.)
	 */
2378
	spin_lock_irqsave(&engine->active.lock, flags);
2379 2380 2381 2382

	__execlists_reset(engine, true);

	/* Mark all executing requests as skipped. */
2383
	list_for_each_entry(rq, &engine->active.requests, sched.link) {
2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405
		if (!i915_request_signaled(rq))
			dma_fence_set_error(&rq->fence, -EIO);

		i915_request_mark_complete(rq);
	}

	/* Flush the queued requests to the timeline list (for retiring). */
	while ((rb = rb_first_cached(&execlists->queue))) {
		struct i915_priolist *p = to_priolist(rb);
		int i;

		priolist_for_each_request_consume(rq, rn, p, i) {
			list_del_init(&rq->sched.link);
			__i915_request_submit(rq);
			dma_fence_set_error(&rq->fence, -EIO);
			i915_request_mark_complete(rq);
		}

		rb_erase_cached(&p->node, &execlists->queue);
		i915_priolist_free(p);
	}

2406 2407 2408 2409 2410 2411 2412 2413
	/* Cancel all attached virtual engines */
	while ((rb = rb_first_cached(&execlists->virtual))) {
		struct virtual_engine *ve =
			rb_entry(rb, typeof(*ve), nodes[engine->id].rb);

		rb_erase_cached(rb, &execlists->virtual);
		RB_CLEAR_NODE(rb);

2414
		spin_lock(&ve->base.active.lock);
2415 2416 2417 2418 2419 2420 2421 2422
		if (ve->request) {
			ve->request->engine = engine;
			__i915_request_submit(ve->request);
			dma_fence_set_error(&ve->request->fence, -EIO);
			i915_request_mark_complete(ve->request);
			ve->base.execlists.queue_priority_hint = INT_MIN;
			ve->request = NULL;
		}
2423
		spin_unlock(&ve->base.active.lock);
2424 2425
	}

2426 2427 2428 2429 2430 2431 2432
	/* Remaining _unready_ requests will be nop'ed when submitted */

	execlists->queue_priority_hint = INT_MIN;
	execlists->queue = RB_ROOT_CACHED;

	GEM_BUG_ON(__tasklet_is_enabled(&execlists->tasklet));
	execlists->tasklet.func = nop_submission_tasklet;
2433

2434
	spin_unlock_irqrestore(&engine->active.lock, flags);
2435 2436
}

2437 2438
static void execlists_reset_finish(struct intel_engine_cs *engine)
{
2439 2440
	struct intel_engine_execlists * const execlists = &engine->execlists;

2441
	/*
2442 2443 2444
	 * After a GPU reset, we may have requests to replay. Do so now while
	 * we still have the forcewake to be sure that the GPU is not allowed
	 * to sleep before we restart and reload a context.
2445
	 */
2446
	GEM_BUG_ON(!reset_in_progress(execlists));
2447 2448
	if (!RB_EMPTY_ROOT(&execlists->queue.rb_root))
		execlists->tasklet.func(execlists->tasklet.data);
2449

2450 2451 2452
	if (__tasklet_enable(&execlists->tasklet))
		/* And kick in case we missed a new request submission. */
		tasklet_hi_schedule(&execlists->tasklet);
2453 2454
	GEM_TRACE("%s: depth->%d\n", engine->name,
		  atomic_read(&execlists->tasklet.count));
2455 2456
}

2457
static int gen8_emit_bb_start(struct i915_request *rq,
2458
			      u64 offset, u32 len,
2459
			      const unsigned int flags)
2460
{
2461
	u32 *cs;
2462

2463
	cs = intel_ring_begin(rq, 4);
2464 2465
	if (IS_ERR(cs))
		return PTR_ERR(cs);
2466

2467 2468 2469 2470 2471 2472 2473
	/*
	 * WaDisableCtxRestoreArbitration:bdw,chv
	 *
	 * We don't need to perform MI_ARB_ENABLE as often as we do (in
	 * particular all the gen that do not need the w/a at all!), if we
	 * took care to make sure that on every switch into this context
	 * (both ordinary and for preemption) that arbitrartion was enabled
2474 2475 2476 2477 2478
	 * we would be fine.  However, for gen8 there is another w/a that
	 * requires us to not preempt inside GPGPU execution, so we keep
	 * arbitration disabled for gen8 batches. Arbitration will be
	 * re-enabled before we close the request
	 * (engine->emit_fini_breadcrumb).
2479
	 */
2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502
	*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;

	/* FIXME(BDW+): Address space and security selectors. */
	*cs++ = MI_BATCH_BUFFER_START_GEN8 |
		(flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
	*cs++ = lower_32_bits(offset);
	*cs++ = upper_32_bits(offset);

	intel_ring_advance(rq, cs);

	return 0;
}

static int gen9_emit_bb_start(struct i915_request *rq,
			      u64 offset, u32 len,
			      const unsigned int flags)
{
	u32 *cs;

	cs = intel_ring_begin(rq, 6);
	if (IS_ERR(cs))
		return PTR_ERR(cs);

2503 2504
	*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;

2505
	*cs++ = MI_BATCH_BUFFER_START_GEN8 |
2506
		(flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
2507 2508
	*cs++ = lower_32_bits(offset);
	*cs++ = upper_32_bits(offset);
2509 2510 2511

	*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
	*cs++ = MI_NOOP;
2512

2513
	intel_ring_advance(rq, cs);
2514 2515 2516 2517

	return 0;
}

2518
static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine)
2519
{
2520 2521 2522
	ENGINE_WRITE(engine, RING_IMR,
		     ~(engine->irq_enable_mask | engine->irq_keep_mask));
	ENGINE_POSTING_READ(engine, RING_IMR);
2523 2524
}

2525
static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine)
2526
{
2527
	ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask);
2528 2529
}

2530
static int gen8_emit_flush(struct i915_request *request, u32 mode)
2531
{
2532
	u32 cmd, *cs;
2533

2534 2535 2536
	cs = intel_ring_begin(request, 4);
	if (IS_ERR(cs))
		return PTR_ERR(cs);
2537 2538 2539

	cmd = MI_FLUSH_DW + 1;

2540 2541 2542 2543 2544 2545 2546
	/* We always require a command barrier so that subsequent
	 * commands, such as breadcrumb interrupts, are strictly ordered
	 * wrt the contents of the write cache being flushed to memory
	 * (and thus being coherent from the CPU).
	 */
	cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;

2547
	if (mode & EMIT_INVALIDATE) {
2548
		cmd |= MI_INVALIDATE_TLB;
2549
		if (request->engine->class == VIDEO_DECODE_CLASS)
2550
			cmd |= MI_INVALIDATE_BSD;
2551 2552
	}

2553 2554 2555 2556 2557
	*cs++ = cmd;
	*cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
	*cs++ = 0; /* upper addr */
	*cs++ = 0; /* value */
	intel_ring_advance(request, cs);
2558 2559 2560 2561

	return 0;
}

2562
static int gen8_emit_flush_render(struct i915_request *request,
2563
				  u32 mode)
2564
{
2565
	struct intel_engine_cs *engine = request->engine;
2566
	u32 scratch_addr =
2567 2568
		intel_gt_scratch_offset(engine->gt,
					INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
M
Mika Kuoppala 已提交
2569
	bool vf_flush_wa = false, dc_flush_wa = false;
2570
	u32 *cs, flags = 0;
M
Mika Kuoppala 已提交
2571
	int len;
2572 2573 2574

	flags |= PIPE_CONTROL_CS_STALL;

2575
	if (mode & EMIT_FLUSH) {
2576 2577
		flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
		flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
2578
		flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
2579
		flags |= PIPE_CONTROL_FLUSH_ENABLE;
2580 2581
	}

2582
	if (mode & EMIT_INVALIDATE) {
2583 2584 2585 2586 2587 2588 2589 2590 2591
		flags |= PIPE_CONTROL_TLB_INVALIDATE;
		flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
		flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
		flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
		flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
		flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
		flags |= PIPE_CONTROL_QW_WRITE;
		flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;

2592 2593 2594 2595
		/*
		 * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
		 * pipe control.
		 */
2596
		if (IS_GEN(request->i915, 9))
2597
			vf_flush_wa = true;
M
Mika Kuoppala 已提交
2598 2599 2600 2601

		/* WaForGAMHang:kbl */
		if (IS_KBL_REVID(request->i915, 0, KBL_REVID_B0))
			dc_flush_wa = true;
2602
	}
2603

M
Mika Kuoppala 已提交
2604 2605 2606 2607 2608 2609 2610 2611
	len = 6;

	if (vf_flush_wa)
		len += 6;

	if (dc_flush_wa)
		len += 12;

2612 2613 2614
	cs = intel_ring_begin(request, len);
	if (IS_ERR(cs))
		return PTR_ERR(cs);
2615

2616 2617
	if (vf_flush_wa)
		cs = gen8_emit_pipe_control(cs, 0, 0);
2618

2619 2620 2621
	if (dc_flush_wa)
		cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_DC_FLUSH_ENABLE,
					    0);
M
Mika Kuoppala 已提交
2622

2623
	cs = gen8_emit_pipe_control(cs, flags, scratch_addr);
M
Mika Kuoppala 已提交
2624

2625 2626
	if (dc_flush_wa)
		cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_CS_STALL, 0);
M
Mika Kuoppala 已提交
2627

2628
	intel_ring_advance(request, cs);
2629 2630 2631 2632

	return 0;
}

2633 2634 2635 2636 2637
/*
 * Reserve space for 2 NOOPs at the end of each request to be
 * used as a workaround for not being allowed to do lite
 * restore with HEAD==TAIL (WaIdleLiteRestore).
 */
2638
static u32 *gen8_emit_wa_tail(struct i915_request *request, u32 *cs)
2639
{
C
Chris Wilson 已提交
2640 2641
	/* Ensure there's always at least one preemption point per-request. */
	*cs++ = MI_ARB_CHECK;
2642 2643
	*cs++ = MI_NOOP;
	request->wa_tail = intel_ring_offset(request, cs);
2644 2645

	return cs;
C
Chris Wilson 已提交
2646
}
2647

2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660
static u32 *emit_preempt_busywait(struct i915_request *request, u32 *cs)
{
	*cs++ = MI_SEMAPHORE_WAIT |
		MI_SEMAPHORE_GLOBAL_GTT |
		MI_SEMAPHORE_POLL |
		MI_SEMAPHORE_SAD_EQ_SDD;
	*cs++ = 0;
	*cs++ = intel_hws_preempt_address(request->engine);
	*cs++ = 0;

	return cs;
}

2661
static u32 *gen8_emit_fini_breadcrumb(struct i915_request *request, u32 *cs)
C
Chris Wilson 已提交
2662
{
2663 2664
	cs = gen8_emit_ggtt_write(cs,
				  request->fence.seqno,
2665 2666
				  request->timeline->hwsp_offset,
				  0);
2667
	*cs++ = MI_USER_INTERRUPT;
2668

2669
	*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
2670 2671
	if (intel_engine_has_semaphores(request->engine))
		cs = emit_preempt_busywait(request, cs);
2672

2673
	request->tail = intel_ring_offset(request, cs);
2674
	assert_ring_tail_valid(request->ring, request->tail);
C
Chris Wilson 已提交
2675

2676
	return gen8_emit_wa_tail(request, cs);
2677
}
2678

2679
static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
2680
{
2681
	/* XXX flush+write+CS_STALL all in one upsets gem_concurrent_blt:kbl */
2682
	cs = gen8_emit_ggtt_write_rcs(cs,
2683 2684
				      request->fence.seqno,
				      request->timeline->hwsp_offset,
2685 2686
				      PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
				      PIPE_CONTROL_DEPTH_CACHE_FLUSH |
2687 2688 2689 2690 2691
				      PIPE_CONTROL_DC_FLUSH_ENABLE);
	cs = gen8_emit_pipe_control(cs,
				    PIPE_CONTROL_FLUSH_ENABLE |
				    PIPE_CONTROL_CS_STALL,
				    0);
2692
	*cs++ = MI_USER_INTERRUPT;
2693

2694
	*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
2695 2696
	if (intel_engine_has_semaphores(request->engine))
		cs = emit_preempt_busywait(request, cs);
2697

2698
	request->tail = intel_ring_offset(request, cs);
2699
	assert_ring_tail_valid(request->ring, request->tail);
C
Chris Wilson 已提交
2700

2701
	return gen8_emit_wa_tail(request, cs);
2702
}
2703

2704
static int gen8_init_rcs_context(struct i915_request *rq)
2705 2706 2707
{
	int ret;

2708
	ret = intel_engine_emit_ctx_wa(rq);
2709 2710 2711
	if (ret)
		return ret;

2712
	ret = intel_rcs_context_init_mocs(rq);
2713 2714 2715 2716 2717 2718 2719
	/*
	 * Failing to program the MOCS is non-fatal.The system will not
	 * run at peak performance. So generate an error and carry on.
	 */
	if (ret)
		DRM_ERROR("MOCS failed to program: expect performance issues.\n");

2720
	return intel_renderstate_emit(rq);
2721 2722
}

2723 2724
static void execlists_park(struct intel_engine_cs *engine)
{
2725
	del_timer_sync(&engine->execlists.timer);
2726 2727 2728
	intel_engine_park(engine);
}

2729
void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
2730
{
2731
	engine->submit_request = execlists_submit_request;
2732
	engine->cancel_requests = execlists_cancel_requests;
2733
	engine->schedule = i915_schedule;
2734
	engine->execlists.tasklet.func = execlists_submission_tasklet;
2735

2736
	engine->reset.prepare = execlists_reset_prepare;
2737 2738
	engine->reset.reset = execlists_reset;
	engine->reset.finish = execlists_reset_finish;
2739

2740
	engine->park = execlists_park;
2741
	engine->unpark = NULL;
2742 2743

	engine->flags |= I915_ENGINE_SUPPORTS_STATS;
2744
	if (!intel_vgpu_active(engine->i915)) {
2745
		engine->flags |= I915_ENGINE_HAS_SEMAPHORES;
2746 2747 2748
		if (HAS_LOGICAL_RING_PREEMPTION(engine->i915))
			engine->flags |= I915_ENGINE_HAS_PREEMPTION;
	}
2749 2750
}

2751 2752 2753 2754 2755 2756 2757
static void execlists_destroy(struct intel_engine_cs *engine)
{
	intel_engine_cleanup_common(engine);
	lrc_destroy_wa_ctx(engine);
	kfree(engine);
}

2758
static void
2759
logical_ring_default_vfuncs(struct intel_engine_cs *engine)
2760 2761
{
	/* Default vfuncs which can be overriden by each engine. */
2762 2763

	engine->destroy = execlists_destroy;
2764
	engine->resume = execlists_resume;
2765 2766 2767 2768

	engine->reset.prepare = execlists_reset_prepare;
	engine->reset.reset = execlists_reset;
	engine->reset.finish = execlists_reset_finish;
2769

2770
	engine->cops = &execlists_context_ops;
2771 2772
	engine->request_alloc = execlists_request_alloc;

2773
	engine->emit_flush = gen8_emit_flush;
2774 2775
	engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb;
	engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb;
2776

2777
	engine->set_default_submission = intel_execlists_set_default_submission;
2778

2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789
	if (INTEL_GEN(engine->i915) < 11) {
		engine->irq_enable = gen8_logical_ring_enable_irq;
		engine->irq_disable = gen8_logical_ring_disable_irq;
	} else {
		/*
		 * TODO: On Gen11 interrupt masks need to be clear
		 * to allow C6 entry. Keep interrupts enabled at
		 * and take the hit of generating extra interrupts
		 * until a more refined solution exists.
		 */
	}
2790 2791 2792 2793
	if (IS_GEN(engine->i915, 8))
		engine->emit_bb_start = gen8_emit_bb_start;
	else
		engine->emit_bb_start = gen9_emit_bb_start;
2794 2795
}

2796
static inline void
2797
logical_ring_default_irqs(struct intel_engine_cs *engine)
2798
{
2799 2800 2801 2802
	unsigned int shift = 0;

	if (INTEL_GEN(engine->i915) < 11) {
		const u8 irq_shifts[] = {
2803 2804 2805 2806 2807
			[RCS0]  = GEN8_RCS_IRQ_SHIFT,
			[BCS0]  = GEN8_BCS_IRQ_SHIFT,
			[VCS0]  = GEN8_VCS0_IRQ_SHIFT,
			[VCS1]  = GEN8_VCS1_IRQ_SHIFT,
			[VECS0] = GEN8_VECS_IRQ_SHIFT,
2808 2809 2810 2811 2812
		};

		shift = irq_shifts[engine->id];
	}

2813 2814
	engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
	engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
2815 2816
}

2817
int intel_execlists_submission_setup(struct intel_engine_cs *engine)
2818 2819 2820 2821
{
	/* Intentionally left blank. */
	engine->buffer = NULL;

2822 2823
	tasklet_init(&engine->execlists.tasklet,
		     execlists_submission_tasklet, (unsigned long)engine);
2824
	timer_setup(&engine->execlists.timer, execlists_submission_timer, 0);
2825 2826 2827

	logical_ring_default_vfuncs(engine);
	logical_ring_default_irqs(engine);
2828

2829 2830 2831 2832 2833 2834
	if (engine->class == RENDER_CLASS) {
		engine->init_context = gen8_init_rcs_context;
		engine->emit_flush = gen8_emit_flush_render;
		engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_rcs;
	}

2835
	return 0;
2836 2837
}

2838
int intel_execlists_submission_init(struct intel_engine_cs *engine)
2839
{
2840
	struct intel_engine_execlists * const execlists = &engine->execlists;
2841 2842
	struct drm_i915_private *i915 = engine->i915;
	struct intel_uncore *uncore = engine->uncore;
2843
	u32 base = engine->mmio_base;
2844 2845
	int ret;

2846
	ret = intel_engine_init_common(engine);
2847
	if (ret)
2848
		return ret;
2849

2850 2851 2852 2853 2854 2855 2856
	if (intel_init_workaround_bb(engine))
		/*
		 * We continue even if we fail to initialize WA batch
		 * because we only expect rare glitches but nothing
		 * critical to prevent us from using GPU
		 */
		DRM_ERROR("WA batch buffer initialization failed\n");
2857

2858
	if (HAS_LOGICAL_RING_ELSQ(i915)) {
2859
		execlists->submit_reg = uncore->regs +
2860
			i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(base));
2861
		execlists->ctrl_reg = uncore->regs +
2862
			i915_mmio_reg_offset(RING_EXECLIST_CONTROL(base));
2863
	} else {
2864
		execlists->submit_reg = uncore->regs +
2865
			i915_mmio_reg_offset(RING_ELSP(base));
2866
	}
2867

2868
	execlists->csb_status =
2869
		&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
2870

2871
	execlists->csb_write =
2872
		&engine->status_page.addr[intel_hws_csb_write_index(i915)];
2873

2874
	if (INTEL_GEN(i915) < 11)
2875 2876 2877
		execlists->csb_size = GEN8_CSB_ENTRIES;
	else
		execlists->csb_size = GEN11_CSB_ENTRIES;
2878

2879
	reset_csb_pointers(engine);
2880

2881 2882 2883
	return 0;
}

2884
static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
2885 2886 2887
{
	u32 indirect_ctx_offset;

2888
	switch (INTEL_GEN(engine->i915)) {
2889
	default:
2890
		MISSING_CASE(INTEL_GEN(engine->i915));
2891
		/* fall through */
2892 2893 2894 2895
	case 11:
		indirect_ctx_offset =
			GEN11_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
		break;
2896 2897 2898 2899
	case 10:
		indirect_ctx_offset =
			GEN10_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
		break;
2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912
	case 9:
		indirect_ctx_offset =
			GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
		break;
	case 8:
		indirect_ctx_offset =
			GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
		break;
	}

	return indirect_ctx_offset;
}

2913
static void execlists_init_reg_state(u32 *regs,
2914
				     struct intel_context *ce,
2915 2916
				     struct intel_engine_cs *engine,
				     struct intel_ring *ring)
2917
{
2918
	struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ce->gem_context->vm);
2919
	bool rcs = engine->class == RENDER_CLASS;
2920
	u32 base = engine->mmio_base;
2921

2922 2923
	/*
	 * A context is actually a big batch buffer with several
2924 2925 2926 2927 2928
	 * MI_LOAD_REGISTER_IMM commands followed by (reg, value) pairs. The
	 * values we are setting here are only for the first context restore:
	 * on a subsequent save, the GPU will recreate this batchbuffer with new
	 * values (including all the missing MI_LOAD_REGISTER_IMM commands that
	 * we are not initializing here).
2929 2930
	 *
	 * Must keep consistent with virtual_update_register_offsets().
2931 2932 2933 2934
	 */
	regs[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(rcs ? 14 : 11) |
				 MI_LRI_FORCE_POSTED;

2935
	CTX_REG(regs, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(base),
2936
		_MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) |
2937
		_MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH));
2938
	if (INTEL_GEN(engine->i915) < 11) {
2939 2940 2941 2942
		regs[CTX_CONTEXT_CONTROL + 1] |=
			_MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT |
					    CTX_CTRL_RS_CTX_ENABLE);
	}
2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954
	CTX_REG(regs, CTX_RING_HEAD, RING_HEAD(base), 0);
	CTX_REG(regs, CTX_RING_TAIL, RING_TAIL(base), 0);
	CTX_REG(regs, CTX_RING_BUFFER_START, RING_START(base), 0);
	CTX_REG(regs, CTX_RING_BUFFER_CONTROL, RING_CTL(base),
		RING_CTL_SIZE(ring->size) | RING_VALID);
	CTX_REG(regs, CTX_BB_HEAD_U, RING_BBADDR_UDW(base), 0);
	CTX_REG(regs, CTX_BB_HEAD_L, RING_BBADDR(base), 0);
	CTX_REG(regs, CTX_BB_STATE, RING_BBSTATE(base), RING_BB_PPGTT);
	CTX_REG(regs, CTX_SECOND_BB_HEAD_U, RING_SBBADDR_UDW(base), 0);
	CTX_REG(regs, CTX_SECOND_BB_HEAD_L, RING_SBBADDR(base), 0);
	CTX_REG(regs, CTX_SECOND_BB_STATE, RING_SBBSTATE(base), 0);
	if (rcs) {
2955 2956
		struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;

2957 2958 2959
		CTX_REG(regs, CTX_RCS_INDIRECT_CTX, RING_INDIRECT_CTX(base), 0);
		CTX_REG(regs, CTX_RCS_INDIRECT_CTX_OFFSET,
			RING_INDIRECT_CTX_OFFSET(base), 0);
2960
		if (wa_ctx->indirect_ctx.size) {
2961
			u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
2962

2963
			regs[CTX_RCS_INDIRECT_CTX + 1] =
2964 2965
				(ggtt_offset + wa_ctx->indirect_ctx.offset) |
				(wa_ctx->indirect_ctx.size / CACHELINE_BYTES);
2966

2967
			regs[CTX_RCS_INDIRECT_CTX_OFFSET + 1] =
2968
				intel_lr_indirect_ctx_offset(engine) << 6;
2969 2970 2971 2972 2973
		}

		CTX_REG(regs, CTX_BB_PER_CTX_PTR, RING_BB_PER_CTX_PTR(base), 0);
		if (wa_ctx->per_ctx.size) {
			u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
2974

2975
			regs[CTX_BB_PER_CTX_PTR + 1] =
2976
				(ggtt_offset + wa_ctx->per_ctx.offset) | 0x01;
2977
		}
2978
	}
2979 2980 2981 2982

	regs[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9) | MI_LRI_FORCE_POSTED;

	CTX_REG(regs, CTX_CTX_TIMESTAMP, RING_CTX_TIMESTAMP(base), 0);
2983
	/* PDP values well be assigned later if needed */
2984 2985 2986 2987 2988 2989 2990 2991
	CTX_REG(regs, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(base, 3), 0);
	CTX_REG(regs, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(base, 3), 0);
	CTX_REG(regs, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(base, 2), 0);
	CTX_REG(regs, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(base, 2), 0);
	CTX_REG(regs, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(base, 1), 0);
	CTX_REG(regs, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(base, 1), 0);
	CTX_REG(regs, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(base, 0), 0);
	CTX_REG(regs, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(base, 0), 0);
2992

2993
	if (i915_vm_is_4lvl(&ppgtt->vm)) {
2994 2995 2996 2997
		/* 64b PPGTT (48bit canonical)
		 * PDP0_DESCRIPTOR contains the base address to PML4 and
		 * other PDP Descriptors are ignored.
		 */
2998
		ASSIGN_CTX_PML4(ppgtt, regs);
2999
	} else {
3000 3001 3002 3003
		ASSIGN_CTX_PDP(ppgtt, regs, 3);
		ASSIGN_CTX_PDP(ppgtt, regs, 2);
		ASSIGN_CTX_PDP(ppgtt, regs, 1);
		ASSIGN_CTX_PDP(ppgtt, regs, 0);
3004 3005
	}

3006 3007
	if (rcs) {
		regs[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
3008
		CTX_REG(regs, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE, 0);
3009
	}
3010 3011

	regs[CTX_END] = MI_BATCH_BUFFER_END;
3012
	if (INTEL_GEN(engine->i915) >= 10)
3013
		regs[CTX_END] |= BIT(0);
3014 3015 3016
}

static int
3017
populate_lr_context(struct intel_context *ce,
3018 3019 3020 3021 3022
		    struct drm_i915_gem_object *ctx_obj,
		    struct intel_engine_cs *engine,
		    struct intel_ring *ring)
{
	void *vaddr;
3023
	u32 *regs;
3024 3025 3026 3027 3028 3029 3030 3031 3032
	int ret;

	vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB);
	if (IS_ERR(vaddr)) {
		ret = PTR_ERR(vaddr);
		DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
		return ret;
	}

3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043
	if (engine->default_state) {
		/*
		 * We only want to copy over the template context state;
		 * skipping over the headers reserved for GuC communication,
		 * leaving those as zero.
		 */
		const unsigned long start = LRC_HEADER_PAGES * PAGE_SIZE;
		void *defaults;

		defaults = i915_gem_object_pin_map(engine->default_state,
						   I915_MAP_WB);
3044 3045 3046 3047
		if (IS_ERR(defaults)) {
			ret = PTR_ERR(defaults);
			goto err_unpin_ctx;
		}
3048 3049 3050 3051 3052

		memcpy(vaddr + start, defaults + start, engine->context_size);
		i915_gem_object_unpin_map(engine->default_state);
	}

3053 3054
	/* The second page of the context object contains some fields which must
	 * be set up prior to the first execution. */
3055
	regs = vaddr + LRC_STATE_PN * PAGE_SIZE;
3056
	execlists_init_reg_state(regs, ce, engine, ring);
3057 3058 3059
	if (!engine->default_state)
		regs[CTX_CONTEXT_CONTROL + 1] |=
			_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
3060

3061
	ret = 0;
3062
err_unpin_ctx:
3063 3064 3065
	__i915_gem_object_flush_map(ctx_obj,
				    LRC_HEADER_PAGES * PAGE_SIZE,
				    engine->context_size);
3066
	i915_gem_object_unpin_map(ctx_obj);
3067
	return ret;
3068 3069
}

3070
static struct intel_timeline *
3071
get_timeline(struct i915_gem_context *ctx, struct intel_gt *gt)
3072
{
3073
	if (ctx->timeline)
3074
		return intel_timeline_get(ctx->timeline);
3075
	else
3076
		return intel_timeline_create(gt, NULL);
3077 3078 3079 3080
}

static int execlists_context_deferred_alloc(struct intel_context *ce,
					    struct intel_engine_cs *engine)
3081
{
3082
	struct drm_i915_gem_object *ctx_obj;
3083
	struct i915_vma *vma;
3084
	u32 context_size;
3085
	struct intel_ring *ring;
3086
	struct intel_timeline *timeline;
3087 3088
	int ret;

3089 3090
	if (ce->state)
		return 0;
3091

3092
	context_size = round_up(engine->context_size, I915_GTT_PAGE_SIZE);
3093

3094 3095 3096 3097 3098
	/*
	 * Before the actual start of the context image, we insert a few pages
	 * for our own use and for sharing with the GuC.
	 */
	context_size += LRC_HEADER_PAGES * PAGE_SIZE;
3099

3100
	ctx_obj = i915_gem_object_create_shmem(engine->i915, context_size);
3101 3102
	if (IS_ERR(ctx_obj))
		return PTR_ERR(ctx_obj);
3103

3104
	vma = i915_vma_instance(ctx_obj, &engine->gt->ggtt->vm, NULL);
3105 3106 3107 3108 3109
	if (IS_ERR(vma)) {
		ret = PTR_ERR(vma);
		goto error_deref_obj;
	}

3110
	timeline = get_timeline(ce->gem_context, engine->gt);
3111 3112 3113 3114 3115
	if (IS_ERR(timeline)) {
		ret = PTR_ERR(timeline);
		goto error_deref_obj;
	}

3116 3117 3118
	ring = intel_engine_create_ring(engine,
					timeline,
					ce->gem_context->ring_size);
3119
	intel_timeline_put(timeline);
3120 3121
	if (IS_ERR(ring)) {
		ret = PTR_ERR(ring);
3122
		goto error_deref_obj;
3123 3124
	}

3125
	ret = populate_lr_context(ce, ctx_obj, engine, ring);
3126 3127
	if (ret) {
		DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
3128
		goto error_ring_free;
3129 3130
	}

3131
	ce->ring = ring;
3132
	ce->state = vma;
3133 3134

	return 0;
3135

3136
error_ring_free:
3137
	intel_ring_put(ring);
3138
error_deref_obj:
3139
	i915_gem_object_put(ctx_obj);
3140
	return ret;
3141
}
3142

3143 3144 3145 3146 3147
static struct list_head *virtual_queue(struct virtual_engine *ve)
{
	return &ve->base.execlists.default_priolist.requests[0];
}

3148 3149 3150 3151 3152 3153
static void virtual_context_destroy(struct kref *kref)
{
	struct virtual_engine *ve =
		container_of(kref, typeof(*ve), context.ref);
	unsigned int n;

3154
	GEM_BUG_ON(!list_empty(virtual_queue(ve)));
3155
	GEM_BUG_ON(ve->request);
3156
	GEM_BUG_ON(ve->context.inflight);
3157 3158 3159 3160 3161 3162 3163 3164

	for (n = 0; n < ve->num_siblings; n++) {
		struct intel_engine_cs *sibling = ve->siblings[n];
		struct rb_node *node = &ve->nodes[sibling->id].rb;

		if (RB_EMPTY_NODE(node))
			continue;

3165
		spin_lock_irq(&sibling->active.lock);
3166 3167 3168 3169 3170

		/* Detachment is lazily performed in the execlists tasklet */
		if (!RB_EMPTY_NODE(node))
			rb_erase_cached(node, &sibling->execlists.virtual);

3171
		spin_unlock_irq(&sibling->active.lock);
3172 3173 3174 3175 3176 3177
	}
	GEM_BUG_ON(__tasklet_is_scheduled(&ve->base.execlists.tasklet));

	if (ve->context.state)
		__execlists_context_fini(&ve->context);

3178
	kfree(ve->bonds);
3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249
	kfree(ve);
}

static void virtual_engine_initial_hint(struct virtual_engine *ve)
{
	int swp;

	/*
	 * Pick a random sibling on starting to help spread the load around.
	 *
	 * New contexts are typically created with exactly the same order
	 * of siblings, and often started in batches. Due to the way we iterate
	 * the array of sibling when submitting requests, sibling[0] is
	 * prioritised for dequeuing. If we make sure that sibling[0] is fairly
	 * randomised across the system, we also help spread the load by the
	 * first engine we inspect being different each time.
	 *
	 * NB This does not force us to execute on this engine, it will just
	 * typically be the first we inspect for submission.
	 */
	swp = prandom_u32_max(ve->num_siblings);
	if (!swp)
		return;

	swap(ve->siblings[swp], ve->siblings[0]);
	virtual_update_register_offsets(ve->context.lrc_reg_state,
					ve->siblings[0]);
}

static int virtual_context_pin(struct intel_context *ce)
{
	struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
	int err;

	/* Note: we must use a real engine class for setting up reg state */
	err = __execlists_context_pin(ce, ve->siblings[0]);
	if (err)
		return err;

	virtual_engine_initial_hint(ve);
	return 0;
}

static void virtual_context_enter(struct intel_context *ce)
{
	struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
	unsigned int n;

	for (n = 0; n < ve->num_siblings; n++)
		intel_engine_pm_get(ve->siblings[n]);
}

static void virtual_context_exit(struct intel_context *ce)
{
	struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
	unsigned int n;

	for (n = 0; n < ve->num_siblings; n++)
		intel_engine_pm_put(ve->siblings[n]);
}

static const struct intel_context_ops virtual_context_ops = {
	.pin = virtual_context_pin,
	.unpin = execlists_context_unpin,

	.enter = virtual_context_enter,
	.exit = virtual_context_exit,

	.destroy = virtual_context_destroy,
};

3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274
static intel_engine_mask_t virtual_submission_mask(struct virtual_engine *ve)
{
	struct i915_request *rq;
	intel_engine_mask_t mask;

	rq = READ_ONCE(ve->request);
	if (!rq)
		return 0;

	/* The rq is ready for submission; rq->execution_mask is now stable. */
	mask = rq->execution_mask;
	if (unlikely(!mask)) {
		/* Invalid selection, submit to a random engine in error */
		i915_request_skip(rq, -ENODEV);
		mask = ve->siblings[0]->mask;
	}

	GEM_TRACE("%s: rq=%llx:%lld, mask=%x, prio=%d\n",
		  ve->base.name,
		  rq->fence.context, rq->fence.seqno,
		  mask, ve->base.execlists.queue_priority_hint);

	return mask;
}

3275 3276 3277 3278
static void virtual_submission_tasklet(unsigned long data)
{
	struct virtual_engine * const ve = (struct virtual_engine *)data;
	const int prio = ve->base.execlists.queue_priority_hint;
3279
	intel_engine_mask_t mask;
3280 3281
	unsigned int n;

3282 3283 3284 3285 3286 3287
	rcu_read_lock();
	mask = virtual_submission_mask(ve);
	rcu_read_unlock();
	if (unlikely(!mask))
		return;

3288 3289 3290 3291 3292 3293 3294
	local_irq_disable();
	for (n = 0; READ_ONCE(ve->request) && n < ve->num_siblings; n++) {
		struct intel_engine_cs *sibling = ve->siblings[n];
		struct ve_node * const node = &ve->nodes[sibling->id];
		struct rb_node **parent, *rb;
		bool first;

3295 3296
		if (unlikely(!(mask & sibling->mask))) {
			if (!RB_EMPTY_NODE(&node->rb)) {
3297
				spin_lock(&sibling->active.lock);
3298 3299 3300
				rb_erase_cached(&node->rb,
						&sibling->execlists.virtual);
				RB_CLEAR_NODE(&node->rb);
3301
				spin_unlock(&sibling->active.lock);
3302 3303 3304 3305
			}
			continue;
		}

3306
		spin_lock(&sibling->active.lock);
3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349

		if (!RB_EMPTY_NODE(&node->rb)) {
			/*
			 * Cheat and avoid rebalancing the tree if we can
			 * reuse this node in situ.
			 */
			first = rb_first_cached(&sibling->execlists.virtual) ==
				&node->rb;
			if (prio == node->prio || (prio > node->prio && first))
				goto submit_engine;

			rb_erase_cached(&node->rb, &sibling->execlists.virtual);
		}

		rb = NULL;
		first = true;
		parent = &sibling->execlists.virtual.rb_root.rb_node;
		while (*parent) {
			struct ve_node *other;

			rb = *parent;
			other = rb_entry(rb, typeof(*other), rb);
			if (prio > other->prio) {
				parent = &rb->rb_left;
			} else {
				parent = &rb->rb_right;
				first = false;
			}
		}

		rb_link_node(&node->rb, rb, parent);
		rb_insert_color_cached(&node->rb,
				       &sibling->execlists.virtual,
				       first);

submit_engine:
		GEM_BUG_ON(RB_EMPTY_NODE(&node->rb));
		node->prio = prio;
		if (first && prio > sibling->execlists.queue_priority_hint) {
			sibling->execlists.queue_priority_hint = prio;
			tasklet_hi_schedule(&sibling->execlists.tasklet);
		}

3350
		spin_unlock(&sibling->active.lock);
3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366
	}
	local_irq_enable();
}

static void virtual_submit_request(struct i915_request *rq)
{
	struct virtual_engine *ve = to_virtual_engine(rq->engine);

	GEM_TRACE("%s: rq=%llx:%lld\n",
		  ve->base.name,
		  rq->fence.context,
		  rq->fence.seqno);

	GEM_BUG_ON(ve->base.submit_request != virtual_submit_request);

	GEM_BUG_ON(ve->request);
3367 3368
	GEM_BUG_ON(!list_empty(virtual_queue(ve)));

3369 3370 3371
	ve->base.execlists.queue_priority_hint = rq_prio(rq);
	WRITE_ONCE(ve->request, rq);

3372 3373
	list_move_tail(&rq->sched.link, virtual_queue(ve));

3374 3375 3376
	tasklet_schedule(&ve->base.execlists.tasklet);
}

3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408
static struct ve_bond *
virtual_find_bond(struct virtual_engine *ve,
		  const struct intel_engine_cs *master)
{
	int i;

	for (i = 0; i < ve->num_bonds; i++) {
		if (ve->bonds[i].master == master)
			return &ve->bonds[i];
	}

	return NULL;
}

static void
virtual_bond_execute(struct i915_request *rq, struct dma_fence *signal)
{
	struct virtual_engine *ve = to_virtual_engine(rq->engine);
	struct ve_bond *bond;

	bond = virtual_find_bond(ve, to_request(signal)->engine);
	if (bond) {
		intel_engine_mask_t old, new, cmp;

		cmp = READ_ONCE(rq->execution_mask);
		do {
			old = cmp;
			new = cmp & bond->sibling_mask;
		} while ((cmp = cmpxchg(&rq->execution_mask, old, new)) != old);
	}
}

3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428
struct intel_context *
intel_execlists_create_virtual(struct i915_gem_context *ctx,
			       struct intel_engine_cs **siblings,
			       unsigned int count)
{
	struct virtual_engine *ve;
	unsigned int n;
	int err;

	if (count == 0)
		return ERR_PTR(-EINVAL);

	if (count == 1)
		return intel_context_create(ctx, siblings[0]);

	ve = kzalloc(struct_size(ve, siblings, count), GFP_KERNEL);
	if (!ve)
		return ERR_PTR(-ENOMEM);

	ve->base.i915 = ctx->i915;
3429
	ve->base.gt = siblings[0]->gt;
3430 3431 3432 3433 3434
	ve->base.id = -1;
	ve->base.class = OTHER_CLASS;
	ve->base.uabi_class = I915_ENGINE_CLASS_INVALID;
	ve->base.instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;

3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449
	/*
	 * The decision on whether to submit a request using semaphores
	 * depends on the saturated state of the engine. We only compute
	 * this during HW submission of the request, and we need for this
	 * state to be globally applied to all requests being submitted
	 * to this engine. Virtual engines encompass more than one physical
	 * engine and so we cannot accurately tell in advance if one of those
	 * engines is already saturated and so cannot afford to use a semaphore
	 * and be pessimized in priority for doing so -- if we are the only
	 * context using semaphores after all other clients have stopped, we
	 * will be starved on the saturated system. Such a global switch for
	 * semaphores is less than ideal, but alas is the current compromise.
	 */
	ve->base.saturated = ALL_ENGINES;

3450 3451
	snprintf(ve->base.name, sizeof(ve->base.name), "virtual");

3452
	intel_engine_init_active(&ve->base, ENGINE_VIRTUAL);
3453 3454 3455 3456 3457 3458 3459 3460

	intel_engine_init_execlists(&ve->base);

	ve->base.cops = &virtual_context_ops;
	ve->base.request_alloc = execlists_request_alloc;

	ve->base.schedule = i915_schedule;
	ve->base.submit_request = virtual_submit_request;
3461
	ve->base.bond_execute = virtual_bond_execute;
3462

3463
	INIT_LIST_HEAD(virtual_queue(ve));
3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529
	ve->base.execlists.queue_priority_hint = INT_MIN;
	tasklet_init(&ve->base.execlists.tasklet,
		     virtual_submission_tasklet,
		     (unsigned long)ve);

	intel_context_init(&ve->context, ctx, &ve->base);

	for (n = 0; n < count; n++) {
		struct intel_engine_cs *sibling = siblings[n];

		GEM_BUG_ON(!is_power_of_2(sibling->mask));
		if (sibling->mask & ve->base.mask) {
			DRM_DEBUG("duplicate %s entry in load balancer\n",
				  sibling->name);
			err = -EINVAL;
			goto err_put;
		}

		/*
		 * The virtual engine implementation is tightly coupled to
		 * the execlists backend -- we push out request directly
		 * into a tree inside each physical engine. We could support
		 * layering if we handle cloning of the requests and
		 * submitting a copy into each backend.
		 */
		if (sibling->execlists.tasklet.func !=
		    execlists_submission_tasklet) {
			err = -ENODEV;
			goto err_put;
		}

		GEM_BUG_ON(RB_EMPTY_NODE(&ve->nodes[sibling->id].rb));
		RB_CLEAR_NODE(&ve->nodes[sibling->id].rb);

		ve->siblings[ve->num_siblings++] = sibling;
		ve->base.mask |= sibling->mask;

		/*
		 * All physical engines must be compatible for their emission
		 * functions (as we build the instructions during request
		 * construction and do not alter them before submission
		 * on the physical engine). We use the engine class as a guide
		 * here, although that could be refined.
		 */
		if (ve->base.class != OTHER_CLASS) {
			if (ve->base.class != sibling->class) {
				DRM_DEBUG("invalid mixing of engine class, sibling %d, already %d\n",
					  sibling->class, ve->base.class);
				err = -EINVAL;
				goto err_put;
			}
			continue;
		}

		ve->base.class = sibling->class;
		ve->base.uabi_class = sibling->uabi_class;
		snprintf(ve->base.name, sizeof(ve->base.name),
			 "v%dx%d", ve->base.class, count);
		ve->base.context_size = sibling->context_size;

		ve->base.emit_bb_start = sibling->emit_bb_start;
		ve->base.emit_flush = sibling->emit_flush;
		ve->base.emit_init_breadcrumb = sibling->emit_init_breadcrumb;
		ve->base.emit_fini_breadcrumb = sibling->emit_fini_breadcrumb;
		ve->base.emit_fini_breadcrumb_dw =
			sibling->emit_fini_breadcrumb_dw;
3530 3531

		ve->base.flags = sibling->flags;
3532 3533
	}

3534 3535
	ve->base.flags |= I915_ENGINE_IS_VIRTUAL;

3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555
	return &ve->context;

err_put:
	intel_context_put(&ve->context);
	return ERR_PTR(err);
}

struct intel_context *
intel_execlists_clone_virtual(struct i915_gem_context *ctx,
			      struct intel_engine_cs *src)
{
	struct virtual_engine *se = to_virtual_engine(src);
	struct intel_context *dst;

	dst = intel_execlists_create_virtual(ctx,
					     se->siblings,
					     se->num_siblings);
	if (IS_ERR(dst))
		return dst;

3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569
	if (se->num_bonds) {
		struct virtual_engine *de = to_virtual_engine(dst->engine);

		de->bonds = kmemdup(se->bonds,
				    sizeof(*se->bonds) * se->num_bonds,
				    GFP_KERNEL);
		if (!de->bonds) {
			intel_context_put(dst);
			return ERR_PTR(-ENOMEM);
		}

		de->num_bonds = se->num_bonds;
	}

3570 3571 3572
	return dst;
}

3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608
int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine,
				     const struct intel_engine_cs *master,
				     const struct intel_engine_cs *sibling)
{
	struct virtual_engine *ve = to_virtual_engine(engine);
	struct ve_bond *bond;
	int n;

	/* Sanity check the sibling is part of the virtual engine */
	for (n = 0; n < ve->num_siblings; n++)
		if (sibling == ve->siblings[n])
			break;
	if (n == ve->num_siblings)
		return -EINVAL;

	bond = virtual_find_bond(ve, master);
	if (bond) {
		bond->sibling_mask |= sibling->mask;
		return 0;
	}

	bond = krealloc(ve->bonds,
			sizeof(*bond) * (ve->num_bonds + 1),
			GFP_KERNEL);
	if (!bond)
		return -ENOMEM;

	bond[ve->num_bonds].master = master;
	bond[ve->num_bonds].sibling_mask = sibling->mask;

	ve->bonds = bond;
	ve->num_bonds++;

	return 0;
}

3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621
void intel_execlists_show_requests(struct intel_engine_cs *engine,
				   struct drm_printer *m,
				   void (*show_request)(struct drm_printer *m,
							struct i915_request *rq,
							const char *prefix),
				   unsigned int max)
{
	const struct intel_engine_execlists *execlists = &engine->execlists;
	struct i915_request *rq, *last;
	unsigned long flags;
	unsigned int count;
	struct rb_node *rb;

3622
	spin_lock_irqsave(&engine->active.lock, flags);
3623 3624 3625

	last = NULL;
	count = 0;
3626
	list_for_each_entry(rq, &engine->active.requests, sched.link) {
3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642
		if (count++ < max - 1)
			show_request(m, rq, "\t\tE ");
		else
			last = rq;
	}
	if (last) {
		if (count > max) {
			drm_printf(m,
				   "\t\t...skipping %d executing requests...\n",
				   count - max);
		}
		show_request(m, last, "\t\tE ");
	}

	last = NULL;
	count = 0;
3643 3644 3645
	if (execlists->queue_priority_hint != INT_MIN)
		drm_printf(m, "\t\tQueue priority hint: %d\n",
			   execlists->queue_priority_hint);
3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665
	for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
		struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
		int i;

		priolist_for_each_request(rq, p, i) {
			if (count++ < max - 1)
				show_request(m, rq, "\t\tQ ");
			else
				last = rq;
		}
	}
	if (last) {
		if (count > max) {
			drm_printf(m,
				   "\t\t...skipping %d queued requests...\n",
				   count - max);
		}
		show_request(m, last, "\t\tQ ");
	}

3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688
	last = NULL;
	count = 0;
	for (rb = rb_first_cached(&execlists->virtual); rb; rb = rb_next(rb)) {
		struct virtual_engine *ve =
			rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
		struct i915_request *rq = READ_ONCE(ve->request);

		if (rq) {
			if (count++ < max - 1)
				show_request(m, rq, "\t\tV ");
			else
				last = rq;
		}
	}
	if (last) {
		if (count > max) {
			drm_printf(m,
				   "\t\t...skipping %d virtual requests...\n",
				   count - max);
		}
		show_request(m, last, "\t\tV ");
	}

3689
	spin_unlock_irqrestore(&engine->active.lock, flags);
3690 3691
}

3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722
void intel_lr_context_reset(struct intel_engine_cs *engine,
			    struct intel_context *ce,
			    u32 head,
			    bool scrub)
{
	/*
	 * We want a simple context + ring to execute the breadcrumb update.
	 * We cannot rely on the context being intact across the GPU hang,
	 * so clear it and rebuild just what we need for the breadcrumb.
	 * All pending requests for this context will be zapped, and any
	 * future request will be after userspace has had the opportunity
	 * to recreate its own state.
	 */
	if (scrub) {
		u32 *regs = ce->lrc_reg_state;

		if (engine->pinned_default_state) {
			memcpy(regs, /* skip restoring the vanilla PPHWSP */
			       engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE,
			       engine->context_size - PAGE_SIZE);
		}
		execlists_init_reg_state(regs, ce, engine, ce->ring);
	}

	/* Rerun the request; its payload has been neutered (if guilty). */
	ce->ring->head = head;
	intel_ring_update_space(ce->ring);

	__execlists_update_reg_state(ce, engine);
}

3723
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
3724
#include "selftest_lrc.c"
3725
#endif