intel_lrc.c 83.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
/*
 * Copyright © 2014 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Ben Widawsky <ben@bwidawsk.net>
 *    Michel Thierry <michel.thierry@intel.com>
 *    Thomas Daniel <thomas.daniel@intel.com>
 *    Oscar Mateo <oscar.mateo@intel.com>
 *
 */

31 32 33 34
/**
 * DOC: Logical Rings, Logical Ring Contexts and Execlists
 *
 * Motivation:
35 36 37 38
 * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts".
 * These expanded contexts enable a number of new abilities, especially
 * "Execlists" (also implemented in this file).
 *
39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89
 * One of the main differences with the legacy HW contexts is that logical
 * ring contexts incorporate many more things to the context's state, like
 * PDPs or ringbuffer control registers:
 *
 * The reason why PDPs are included in the context is straightforward: as
 * PPGTTs (per-process GTTs) are actually per-context, having the PDPs
 * contained there mean you don't need to do a ppgtt->switch_mm yourself,
 * instead, the GPU will do it for you on the context switch.
 *
 * But, what about the ringbuffer control registers (head, tail, etc..)?
 * shouldn't we just need a set of those per engine command streamer? This is
 * where the name "Logical Rings" starts to make sense: by virtualizing the
 * rings, the engine cs shifts to a new "ring buffer" with every context
 * switch. When you want to submit a workload to the GPU you: A) choose your
 * context, B) find its appropriate virtualized ring, C) write commands to it
 * and then, finally, D) tell the GPU to switch to that context.
 *
 * Instead of the legacy MI_SET_CONTEXT, the way you tell the GPU to switch
 * to a contexts is via a context execution list, ergo "Execlists".
 *
 * LRC implementation:
 * Regarding the creation of contexts, we have:
 *
 * - One global default context.
 * - One local default context for each opened fd.
 * - One local extra context for each context create ioctl call.
 *
 * Now that ringbuffers belong per-context (and not per-engine, like before)
 * and that contexts are uniquely tied to a given engine (and not reusable,
 * like before) we need:
 *
 * - One ringbuffer per-engine inside each context.
 * - One backing object per-engine inside each context.
 *
 * The global default context starts its life with these new objects fully
 * allocated and populated. The local default context for each opened fd is
 * more complex, because we don't know at creation time which engine is going
 * to use them. To handle this, we have implemented a deferred creation of LR
 * contexts:
 *
 * The local context starts its life as a hollow or blank holder, that only
 * gets populated for a given engine once we receive an execbuffer. If later
 * on we receive another execbuffer ioctl for the same context but a different
 * engine, we allocate/populate a new ringbuffer and context backing object and
 * so on.
 *
 * Finally, regarding local contexts created using the ioctl call: as they are
 * only allowed with the render ring, we can allocate & populate them right
 * away (no need to defer anything, at least for now).
 *
 * Execlists implementation:
90 91
 * Execlists are the new method by which, on gen8+ hardware, workloads are
 * submitted for execution (as opposed to the legacy, ringbuffer-based, method).
92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132
 * This method works as follows:
 *
 * When a request is committed, its commands (the BB start and any leading or
 * trailing commands, like the seqno breadcrumbs) are placed in the ringbuffer
 * for the appropriate context. The tail pointer in the hardware context is not
 * updated at this time, but instead, kept by the driver in the ringbuffer
 * structure. A structure representing this request is added to a request queue
 * for the appropriate engine: this structure contains a copy of the context's
 * tail after the request was written to the ring buffer and a pointer to the
 * context itself.
 *
 * If the engine's request queue was empty before the request was added, the
 * queue is processed immediately. Otherwise the queue will be processed during
 * a context switch interrupt. In any case, elements on the queue will get sent
 * (in pairs) to the GPU's ExecLists Submit Port (ELSP, for short) with a
 * globally unique 20-bits submission ID.
 *
 * When execution of a request completes, the GPU updates the context status
 * buffer with a context complete event and generates a context switch interrupt.
 * During the interrupt handling, the driver examines the events in the buffer:
 * for each context complete event, if the announced ID matches that on the head
 * of the request queue, then that request is retired and removed from the queue.
 *
 * After processing, if any requests were retired and the queue is not empty
 * then a new execution list can be submitted. The two requests at the front of
 * the queue are next to be submitted but since a context may not occur twice in
 * an execution list, if subsequent requests have the same ID as the first then
 * the two requests must be combined. This is done simply by discarding requests
 * at the head of the queue until either only one requests is left (in which case
 * we use a NULL second context) or the first two requests have unique IDs.
 *
 * By always executing the first two requests in the queue the driver ensures
 * that the GPU is kept as busy as possible. In the case where a single context
 * completes but a second context is still executing, the request for this second
 * context will be at the head of the queue when we remove the first one. This
 * request will then be resubmitted along with a new request for a different context,
 * which will cause the hardware to continue executing the second request and queue
 * the new request (the GPU detects the condition of a context getting preempted
 * with the same context and optimizes the context switch flow by not doing
 * preemption, but just sampling the new tail pointer).
 *
133
 */
134
#include <linux/interrupt.h>
135 136

#include "i915_drv.h"
137
#include "i915_gem_render_state.h"
138
#include "i915_vgpu.h"
139
#include "intel_engine_pm.h"
140
#include "intel_lrc_reg.h"
141
#include "intel_mocs.h"
142
#include "intel_reset.h"
143
#include "intel_workarounds.h"
144

145 146 147 148 149 150 151 152 153 154 155 156 157
#define RING_EXECLIST_QFULL		(1 << 0x2)
#define RING_EXECLIST1_VALID		(1 << 0x3)
#define RING_EXECLIST0_VALID		(1 << 0x4)
#define RING_EXECLIST_ACTIVE_STATUS	(3 << 0xE)
#define RING_EXECLIST1_ACTIVE		(1 << 0x11)
#define RING_EXECLIST0_ACTIVE		(1 << 0x12)

#define GEN8_CTX_STATUS_IDLE_ACTIVE	(1 << 0)
#define GEN8_CTX_STATUS_PREEMPTED	(1 << 1)
#define GEN8_CTX_STATUS_ELEMENT_SWITCH	(1 << 2)
#define GEN8_CTX_STATUS_ACTIVE_IDLE	(1 << 3)
#define GEN8_CTX_STATUS_COMPLETE	(1 << 4)
#define GEN8_CTX_STATUS_LITE_RESTORE	(1 << 15)
158

159
#define GEN8_CTX_STATUS_COMPLETED_MASK \
160
	 (GEN8_CTX_STATUS_COMPLETE | GEN8_CTX_STATUS_PREEMPTED)
161

162 163
/* Typical size of the average request (2 pipecontrols and a MI_BB) */
#define EXECLISTS_REQUEST_SIZE 64 /* bytes */
164
#define WA_TAIL_DWORDS 2
165
#define WA_TAIL_BYTES (sizeof(u32) * WA_TAIL_DWORDS)
166

167 168
static int execlists_context_deferred_alloc(struct intel_context *ce,
					    struct intel_engine_cs *engine);
169
static void execlists_init_reg_state(u32 *reg_state,
170
				     struct intel_context *ce,
171 172
				     struct intel_engine_cs *engine,
				     struct intel_ring *ring);
173

174 175 176 177 178 179 180
static inline struct i915_priolist *to_priolist(struct rb_node *rb)
{
	return rb_entry(rb, struct i915_priolist, node);
}

static inline int rq_prio(const struct i915_request *rq)
{
181
	return rq->sched.attr.priority;
182 183
}

184 185
static int effective_prio(const struct i915_request *rq)
{
186 187 188 189
	int prio = rq_prio(rq);

	/*
	 * On unwinding the active request, we give it a priority bump
190 191 192
	 * if it has completed waiting on any semaphore. If we know that
	 * the request has already started, we can prevent an unwanted
	 * preempt-to-idle cycle by taking that into account now.
193
	 */
194 195
	if (__i915_request_has_started(rq))
		prio |= I915_PRIORITY_NOSEMAPHORE;
196

197
	/* Restrict mere WAIT boosts from triggering preemption */
198
	return prio | __NO_PREEMPTION;
199 200
}

201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217
static int queue_prio(const struct intel_engine_execlists *execlists)
{
	struct i915_priolist *p;
	struct rb_node *rb;

	rb = rb_first_cached(&execlists->queue);
	if (!rb)
		return INT_MIN;

	/*
	 * As the priolist[] are inverted, with the highest priority in [0],
	 * we have to flip the index value to become priority.
	 */
	p = to_priolist(rb);
	return ((p->priority + 1) << I915_USER_PRIORITY_SHIFT) - ffs(p->used);
}

218
static inline bool need_preempt(const struct intel_engine_cs *engine,
219 220
				const struct i915_request *rq)
{
221
	int last_prio;
222

223
	if (!engine->preempt_context)
224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240
		return false;

	if (i915_request_completed(rq))
		return false;

	/*
	 * Check if the current priority hint merits a preemption attempt.
	 *
	 * We record the highest value priority we saw during rescheduling
	 * prior to this dequeue, therefore we know that if it is strictly
	 * less than the current tail of ESLP[0], we do not need to force
	 * a preempt-to-idle cycle.
	 *
	 * However, the priority hint is a mere hint that we may need to
	 * preempt. If that hint is stale or we may be trying to preempt
	 * ourselves, ignore the request.
	 */
241
	last_prio = effective_prio(rq);
242 243
	if (!i915_scheduler_need_preempt(engine->execlists.queue_priority_hint,
					 last_prio))
244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267
		return false;

	/*
	 * Check against the first request in ELSP[1], it will, thanks to the
	 * power of PI, be the highest priority of that context.
	 */
	if (!list_is_last(&rq->link, &engine->timeline.requests) &&
	    rq_prio(list_next_entry(rq, link)) > last_prio)
		return true;

	/*
	 * If the inflight context did not trigger the preemption, then maybe
	 * it was the set of queued requests? Pick the highest priority in
	 * the queue (the first active priolist) and see if it deserves to be
	 * running instead of ELSP[0].
	 *
	 * The highest priority request in the queue can not be either
	 * ELSP[0] or ELSP[1] as, thanks again to PI, if it was the same
	 * context, it's priority would not exceed ELSP[0] aka last_prio.
	 */
	return queue_prio(&engine->execlists) > last_prio;
}

__maybe_unused static inline bool
268
assert_priority_queue(const struct i915_request *prev,
269
		      const struct i915_request *next)
270
{
271 272
	const struct intel_engine_execlists *execlists =
		&prev->engine->execlists;
273 274 275 276 277 278 279 280 281 282 283 284

	/*
	 * Without preemption, the prev may refer to the still active element
	 * which we refuse to let go.
	 *
	 * Even with preemption, there are times when we think it is better not
	 * to preempt and leave an ostensibly lower priority request in flight.
	 */
	if (port_request(execlists->port) == prev)
		return true;

	return rq_prio(prev) >= rq_prio(next);
285 286
}

287
/*
288 289 290 291 292
 * The context descriptor encodes various attributes of a context,
 * including its GTT address and some flags. Because it's fairly
 * expensive to calculate, we'll just do it once and cache the result,
 * which remains valid until the context is unpinned.
 *
293 294
 * This is what a descriptor looks like, from LSB to MSB::
 *
295
 *      bits  0-11:    flags, GEN8_CTX_* (cached in ctx->desc_template)
296
 *      bits 12-31:    LRCA, GTT address of (the HWSP of) this context
297
 *      bits 32-52:    ctx ID, a globally unique tag (highest bit used by GuC)
298 299
 *      bits 53-54:    mbz, reserved for use by hardware
 *      bits 55-63:    group ID, currently unused and set to 0
300 301 302 303 304 305 306 307 308 309 310 311
 *
 * Starting from Gen11, the upper dword of the descriptor has a new format:
 *
 *      bits 32-36:    reserved
 *      bits 37-47:    SW context ID
 *      bits 48:53:    engine instance
 *      bit 54:        mbz, reserved for use by hardware
 *      bits 55-60:    SW counter
 *      bits 61-63:    engine class
 *
 * engine info, SW context ID and SW counter need to form a unique number
 * (Context ID) per lrc.
312
 */
313 314
static u64
lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine)
315
{
316
	struct i915_gem_context *ctx = ce->gem_context;
317
	u64 desc;
318

319 320
	BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (BIT(GEN8_CTX_ID_WIDTH)));
	BUILD_BUG_ON(GEN11_MAX_CONTEXT_HW_ID > (BIT(GEN11_SW_CTX_ID_WIDTH)));
321

322
	desc = ctx->desc_template;				/* bits  0-11 */
323 324
	GEM_BUG_ON(desc & GENMASK_ULL(63, 12));

325
	desc |= i915_ggtt_offset(ce->state) + LRC_HEADER_PAGES * PAGE_SIZE;
326
								/* bits 12-31 */
327 328
	GEM_BUG_ON(desc & GENMASK_ULL(63, 32));

329 330 331 332 333
	/*
	 * The following 32bits are copied into the OA reports (dword 2).
	 * Consider updating oa_get_render_ctx_id in i915_perf.c when changing
	 * anything below.
	 */
334
	if (INTEL_GEN(engine->i915) >= 11) {
335 336 337 338 339 340 341 342 343 344 345 346 347 348 349
		GEM_BUG_ON(ctx->hw_id >= BIT(GEN11_SW_CTX_ID_WIDTH));
		desc |= (u64)ctx->hw_id << GEN11_SW_CTX_ID_SHIFT;
								/* bits 37-47 */

		desc |= (u64)engine->instance << GEN11_ENGINE_INSTANCE_SHIFT;
								/* bits 48-53 */

		/* TODO: decide what to do with SW counter (bits 55-60) */

		desc |= (u64)engine->class << GEN11_ENGINE_CLASS_SHIFT;
								/* bits 61-63 */
	} else {
		GEM_BUG_ON(ctx->hw_id >= BIT(GEN8_CTX_ID_WIDTH));
		desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT;	/* bits 32-52 */
	}
350

351
	return desc;
352 353
}

354
static void unwind_wa_tail(struct i915_request *rq)
355 356 357 358 359
{
	rq->tail = intel_ring_wrap(rq->ring, rq->wa_tail - WA_TAIL_BYTES);
	assert_ring_tail_valid(rq->ring, rq->tail);
}

360
static struct i915_request *
361
__unwind_incomplete_requests(struct intel_engine_cs *engine)
362
{
363
	struct i915_request *rq, *rn, *active = NULL;
364
	struct list_head *uninitialized_var(pl);
365
	int prio = I915_PRIORITY_INVALID;
366

367
	lockdep_assert_held(&engine->timeline.lock);
368 369

	list_for_each_entry_safe_reverse(rq, rn,
370
					 &engine->timeline.requests,
371
					 link) {
372
		if (i915_request_completed(rq))
373
			break;
374

375
		__i915_request_unsubmit(rq);
376 377
		unwind_wa_tail(rq);

378 379
		GEM_BUG_ON(rq->hw_context->active);

380
		GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
381 382
		if (rq_prio(rq) != prio) {
			prio = rq_prio(rq);
383
			pl = i915_sched_lookup_priolist(engine, prio);
384
		}
385
		GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
386

387
		list_add(&rq->sched.link, pl);
388 389 390 391

		active = rq;
	}

392
	return active;
393 394
}

395
struct i915_request *
396 397 398 399 400
execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists)
{
	struct intel_engine_cs *engine =
		container_of(execlists, typeof(*engine), execlists);

401
	return __unwind_incomplete_requests(engine);
402 403
}

404
static inline void
405
execlists_context_status_change(struct i915_request *rq, unsigned long status)
406
{
407 408 409 410 411 412
	/*
	 * Only used when GVT-g is enabled now. When GVT-g is disabled,
	 * The compiler should eliminate this function as dead-code.
	 */
	if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
		return;
413

414 415
	atomic_notifier_call_chain(&rq->engine->context_status_notifier,
				   status, rq);
416 417
}

418 419 420 421 422 423 424 425 426 427 428 429 430
inline void
execlists_user_begin(struct intel_engine_execlists *execlists,
		     const struct execlist_port *port)
{
	execlists_set_active_once(execlists, EXECLISTS_ACTIVE_USER);
}

inline void
execlists_user_end(struct intel_engine_execlists *execlists)
{
	execlists_clear_active(execlists, EXECLISTS_ACTIVE_USER);
}

431
static inline void
432
execlists_context_schedule_in(struct i915_request *rq)
433
{
434 435
	GEM_BUG_ON(rq->hw_context->active);

436
	execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
437
	intel_engine_context_in(rq->engine);
438
	rq->hw_context->active = rq->engine;
439 440 441
}

static inline void
442
execlists_context_schedule_out(struct i915_request *rq, unsigned long status)
443
{
444
	rq->hw_context->active = NULL;
445
	intel_engine_context_out(rq->engine);
446 447
	execlists_context_status_change(rq, status);
	trace_i915_request_out(rq);
448 449
}

450
static u64 execlists_update_context(struct i915_request *rq)
451
{
452
	struct intel_context *ce = rq->hw_context;
453

454 455
	ce->lrc_reg_state[CTX_RING_TAIL + 1] =
		intel_ring_set_tail(rq->ring, rq->tail);
456

457 458 459 460 461 462 463 464 465
	/*
	 * Make sure the context image is complete before we submit it to HW.
	 *
	 * Ostensibly, writes (including the WCB) should be flushed prior to
	 * an uncached write such as our mmio register access, the empirical
	 * evidence (esp. on Braswell) suggests that the WC write into memory
	 * may not be visible to the HW prior to the completion of the UC
	 * register write and that we may begin execution from the context
	 * before its image is complete leading to invalid PD chasing.
466 467 468 469 470
	 *
	 * Furthermore, Braswell, at least, wants a full mb to be sure that
	 * the writes are coherent in memory (visible to the GPU) prior to
	 * execution, and not just visible to other CPUs (as is the result of
	 * wmb).
471
	 */
472
	mb();
473
	return ce->lrc_desc;
474 475
}

476
static inline void write_desc(struct intel_engine_execlists *execlists, u64 desc, u32 port)
C
Chris Wilson 已提交
477
{
478 479 480 481 482 483 484
	if (execlists->ctrl_reg) {
		writel(lower_32_bits(desc), execlists->submit_reg + port * 2);
		writel(upper_32_bits(desc), execlists->submit_reg + port * 2 + 1);
	} else {
		writel(upper_32_bits(desc), execlists->submit_reg);
		writel(lower_32_bits(desc), execlists->submit_reg);
	}
C
Chris Wilson 已提交
485 486
}

487
static void execlists_submit_ports(struct intel_engine_cs *engine)
488
{
489 490
	struct intel_engine_execlists *execlists = &engine->execlists;
	struct execlist_port *port = execlists->port;
491
	unsigned int n;
492

493 494 495 496 497 498 499 500
	/*
	 * We can skip acquiring intel_runtime_pm_get() here as it was taken
	 * on our behalf by the request (see i915_gem_mark_busy()) and it will
	 * not be relinquished until the device is idle (see
	 * i915_gem_idle_work_handler()). As a precaution, we make sure
	 * that all ELSP are drained i.e. we have processed the CSB,
	 * before allowing ourselves to idle and calling intel_runtime_pm_put().
	 */
501
	GEM_BUG_ON(!intel_wakeref_active(&engine->wakeref));
502

503 504 505 506 507 508 509
	/*
	 * ELSQ note: the submit queue is not cleared after being submitted
	 * to the HW so we need to make sure we always clean it up. This is
	 * currently ensured by the fact that we always write the same number
	 * of elsq entries, keep this in mind before changing the loop below.
	 */
	for (n = execlists_num_ports(execlists); n--; ) {
510
		struct i915_request *rq;
511 512 513 514 515 516 517
		unsigned int count;
		u64 desc;

		rq = port_unpack(&port[n], &count);
		if (rq) {
			GEM_BUG_ON(count > !n);
			if (!count++)
518
				execlists_context_schedule_in(rq);
519 520 521
			port_set(&port[n], port_pack(rq, count));
			desc = execlists_update_context(rq);
			GEM_DEBUG_EXEC(port[n].context_id = upper_32_bits(desc));
522

523
			GEM_TRACE("%s in[%d]:  ctx=%d.%d, fence %llx:%lld (current %d), prio=%d\n",
524
				  engine->name, n,
525
				  port[n].context_id, count,
526
				  rq->fence.context, rq->fence.seqno,
527
				  hwsp_seqno(rq),
528
				  rq_prio(rq));
529 530 531 532
		} else {
			GEM_BUG_ON(!n);
			desc = 0;
		}
533

534
		write_desc(execlists, desc, n);
535
	}
536 537 538 539 540 541

	/* we need to manually load the submit queue */
	if (execlists->ctrl_reg)
		writel(EL_CTRL_LOAD, execlists->ctrl_reg);

	execlists_clear_active(execlists, EXECLISTS_ACTIVE_HWACK);
542 543
}

544
static bool ctx_single_port_submission(const struct intel_context *ce)
545
{
546
	return (IS_ENABLED(CONFIG_DRM_I915_GVT) &&
547
		i915_gem_context_force_single_submission(ce->gem_context));
548
}
549

550 551
static bool can_merge_ctx(const struct intel_context *prev,
			  const struct intel_context *next)
552 553 554
{
	if (prev != next)
		return false;
555

556 557
	if (ctx_single_port_submission(prev))
		return false;
558

559
	return true;
560 561
}

562 563 564 565 566 567 568 569 570 571 572
static bool can_merge_rq(const struct i915_request *prev,
			 const struct i915_request *next)
{
	GEM_BUG_ON(!assert_priority_queue(prev, next));

	if (!can_merge_ctx(prev->hw_context, next->hw_context))
		return false;

	return true;
}

573
static void port_assign(struct execlist_port *port, struct i915_request *rq)
574 575 576 577
{
	GEM_BUG_ON(rq == port_request(port));

	if (port_isset(port))
578
		i915_request_put(port_request(port));
579

580
	port_set(port, port_pack(i915_request_get(rq), port_count(port)));
581 582
}

C
Chris Wilson 已提交
583 584
static void inject_preempt_context(struct intel_engine_cs *engine)
{
585
	struct intel_engine_execlists *execlists = &engine->execlists;
586
	struct intel_context *ce = engine->preempt_context;
C
Chris Wilson 已提交
587 588
	unsigned int n;

589
	GEM_BUG_ON(execlists->preempt_complete_status !=
590
		   upper_32_bits(ce->lrc_desc));
591

592 593 594 595
	/*
	 * Switch to our empty preempt context so
	 * the state of the GPU is known (idle).
	 */
596
	GEM_TRACE("%s\n", engine->name);
597 598 599 600 601 602 603 604
	for (n = execlists_num_ports(execlists); --n; )
		write_desc(execlists, 0, n);

	write_desc(execlists, ce->lrc_desc, n);

	/* we need to manually load the submit queue */
	if (execlists->ctrl_reg)
		writel(EL_CTRL_LOAD, execlists->ctrl_reg);
C
Chris Wilson 已提交
605

606 607
	execlists_clear_active(execlists, EXECLISTS_ACTIVE_HWACK);
	execlists_set_active(execlists, EXECLISTS_ACTIVE_PREEMPT);
608 609

	(void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++);
610 611 612 613 614 615
}

static void complete_preempt_context(struct intel_engine_execlists *execlists)
{
	GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT));

616 617 618
	if (inject_preempt_hang(execlists))
		return;

619
	execlists_cancel_port_requests(execlists);
620 621
	__unwind_incomplete_requests(container_of(execlists,
						  struct intel_engine_cs,
622
						  execlists));
C
Chris Wilson 已提交
623 624
}

625
static void execlists_dequeue(struct intel_engine_cs *engine)
626
{
627 628
	struct intel_engine_execlists * const execlists = &engine->execlists;
	struct execlist_port *port = execlists->port;
629 630
	const struct execlist_port * const last_port =
		&execlists->port[execlists->port_mask];
631
	struct i915_request *last = port_request(port);
632
	struct rb_node *rb;
633 634
	bool submit = false;

635 636
	/*
	 * Hardware submission is through 2 ports. Conceptually each port
637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654
	 * has a (RING_START, RING_HEAD, RING_TAIL) tuple. RING_START is
	 * static for a context, and unique to each, so we only execute
	 * requests belonging to a single context from each ring. RING_HEAD
	 * is maintained by the CS in the context image, it marks the place
	 * where it got up to last time, and through RING_TAIL we tell the CS
	 * where we want to execute up to this time.
	 *
	 * In this list the requests are in order of execution. Consecutive
	 * requests from the same context are adjacent in the ringbuffer. We
	 * can combine these requests into a single RING_TAIL update:
	 *
	 *              RING_HEAD...req1...req2
	 *                                    ^- RING_TAIL
	 * since to execute req2 the CS must first execute req1.
	 *
	 * Our goal then is to point each port to the end of a consecutive
	 * sequence of requests as being the most optimal (fewest wake ups
	 * and context switches) submission.
655
	 */
656

C
Chris Wilson 已提交
657 658 659 660 661 662 663
	if (last) {
		/*
		 * Don't resubmit or switch until all outstanding
		 * preemptions (lite-restore) are seen. Then we
		 * know the next preemption status we see corresponds
		 * to this ELSP update.
		 */
664 665
		GEM_BUG_ON(!execlists_is_active(execlists,
						EXECLISTS_ACTIVE_USER));
666
		GEM_BUG_ON(!port_count(&port[0]));
C
Chris Wilson 已提交
667

668 669 670 671 672 673 674 675
		/*
		 * If we write to ELSP a second time before the HW has had
		 * a chance to respond to the previous write, we can confuse
		 * the HW and hit "undefined behaviour". After writing to ELSP,
		 * we must then wait until we see a context-switch event from
		 * the HW to indicate that it has had a chance to respond.
		 */
		if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_HWACK))
676
			return;
677

678
		if (need_preempt(engine, last)) {
C
Chris Wilson 已提交
679
			inject_preempt_context(engine);
680
			return;
C
Chris Wilson 已提交
681
		}
682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704

		/*
		 * In theory, we could coalesce more requests onto
		 * the second port (the first port is active, with
		 * no preemptions pending). However, that means we
		 * then have to deal with the possible lite-restore
		 * of the second port (as we submit the ELSP, there
		 * may be a context-switch) but also we may complete
		 * the resubmission before the context-switch. Ergo,
		 * coalescing onto the second port will cause a
		 * preemption event, but we cannot predict whether
		 * that will affect port[0] or port[1].
		 *
		 * If the second port is already active, we can wait
		 * until the next context-switch before contemplating
		 * new requests. The GPU will be busy and we should be
		 * able to resubmit the new ELSP before it idles,
		 * avoiding pipeline bubbles (momentary pauses where
		 * the driver is unable to keep up the supply of new
		 * work). However, we have to double check that the
		 * priorities of the ports haven't been switch.
		 */
		if (port_count(&port[1]))
705
			return;
706 707 708 709 710

		/*
		 * WaIdleLiteRestore:bdw,skl
		 * Apply the wa NOOPs to prevent
		 * ring:HEAD == rq:TAIL as we resubmit the
711
		 * request. See gen8_emit_fini_breadcrumb() for
712 713 714 715
		 * where we prepare the padding after the
		 * end of the request.
		 */
		last->tail = last->wa_tail;
C
Chris Wilson 已提交
716 717
	}

718
	while ((rb = rb_first_cached(&execlists->queue))) {
719
		struct i915_priolist *p = to_priolist(rb);
720
		struct i915_request *rq, *rn;
721
		int i;
722

723
		priolist_for_each_request_consume(rq, rn, p, i) {
724 725 726 727 728 729 730 731 732 733
			/*
			 * Can we combine this request with the current port?
			 * It has to be the same context/ringbuffer and not
			 * have any exceptions (e.g. GVT saying never to
			 * combine contexts).
			 *
			 * If we can combine the requests, we can execute both
			 * by updating the RING_TAIL to point to the end of the
			 * second request, and so we never need to tell the
			 * hardware about the first.
734
			 */
735
			if (last && !can_merge_rq(last, rq)) {
736 737 738 739 740
				/*
				 * If we are on the second port and cannot
				 * combine this request with the last, then we
				 * are done.
				 */
741
				if (port == last_port)
742 743
					goto done;

744 745 746 747 748 749 750 751
				/*
				 * We must not populate both ELSP[] with the
				 * same LRCA, i.e. we must submit 2 different
				 * contexts if we submit 2 ELSP.
				 */
				if (last->hw_context == rq->hw_context)
					goto done;

752 753 754 755 756 757 758
				/*
				 * If GVT overrides us we only ever submit
				 * port[0], leaving port[1] empty. Note that we
				 * also have to be careful that we don't queue
				 * the same context (even though a different
				 * request) to the second port.
				 */
759
				if (ctx_single_port_submission(last->hw_context) ||
760
				    ctx_single_port_submission(rq->hw_context))
761 762 763 764 765 766
					goto done;


				if (submit)
					port_assign(port, last);
				port++;
767 768

				GEM_BUG_ON(port_isset(port));
769
			}
770

771 772
			list_del_init(&rq->sched.link);

773 774
			__i915_request_submit(rq);
			trace_i915_request_in(rq, port_index(port, execlists));
775

776 777
			last = rq;
			submit = true;
778
		}
779

780
		rb_erase_cached(&p->node, &execlists->queue);
781
		i915_priolist_free(p);
782
	}
783

784
done:
785 786 787
	/*
	 * Here be a bit of magic! Or sleight-of-hand, whichever you prefer.
	 *
788
	 * We choose the priority hint such that if we add a request of greater
789 790 791
	 * priority than this, we kick the submission tasklet to decide on
	 * the right order of submitting the requests to hardware. We must
	 * also be prepared to reorder requests as they are in-flight on the
792
	 * HW. We derive the priority hint then as the first "hole" in
793 794 795 796
	 * the HW submission ports and if there are no available slots,
	 * the priority of the lowest executing request, i.e. last.
	 *
	 * When we do receive a higher priority request ready to run from the
797
	 * user, see queue_request(), the priority hint is bumped to that
798 799 800
	 * request triggering preemption on the next dequeue (or subsequent
	 * interrupt for secondary ports).
	 */
801
	execlists->queue_priority_hint = queue_prio(execlists);
802

803
	if (submit) {
804
		port_assign(port, last);
805 806
		execlists_submit_ports(engine);
	}
807 808

	/* We must always keep the beast fed if we have work piled up */
809 810
	GEM_BUG_ON(rb_first_cached(&execlists->queue) &&
		   !port_isset(execlists->port));
811

812 813
	/* Re-evaluate the executing context setup after each preemptive kick */
	if (last)
814
		execlists_user_begin(execlists, execlists->port);
815

816 817 818 819
	/* If the engine is now idle, so should be the flag; and vice versa. */
	GEM_BUG_ON(execlists_is_active(&engine->execlists,
				       EXECLISTS_ACTIVE_USER) ==
		   !port_isset(engine->execlists.port));
820 821
}

822
void
823
execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
824
{
825
	struct execlist_port *port = execlists->port;
826
	unsigned int num_ports = execlists_num_ports(execlists);
827

828
	while (num_ports-- && port_isset(port)) {
829
		struct i915_request *rq = port_request(port);
830

831
		GEM_TRACE("%s:port%u fence %llx:%lld, (current %d)\n",
832 833 834
			  rq->engine->name,
			  (unsigned int)(port - execlists->port),
			  rq->fence.context, rq->fence.seqno,
835
			  hwsp_seqno(rq));
836

837
		GEM_BUG_ON(!execlists->active);
838 839 840 841
		execlists_context_schedule_out(rq,
					       i915_request_completed(rq) ?
					       INTEL_CONTEXT_SCHEDULE_OUT :
					       INTEL_CONTEXT_SCHEDULE_PREEMPTED);
842

843
		i915_request_put(rq);
844

845 846 847
		memset(port, 0, sizeof(*port));
		port++;
	}
848

849
	execlists_clear_all_active(execlists);
850 851
}

852 853 854 855 856 857 858
static inline void
invalidate_csb_entries(const u32 *first, const u32 *last)
{
	clflush((void *)first);
	clflush((void *)last);
}

859 860 861 862 863 864
static inline bool
reset_in_progress(const struct intel_engine_execlists *execlists)
{
	return unlikely(!__tasklet_is_enabled(&execlists->tasklet));
}

865
static void process_csb(struct intel_engine_cs *engine)
866
{
867
	struct intel_engine_execlists * const execlists = &engine->execlists;
868
	struct execlist_port *port = execlists->port;
869
	const u32 * const buf = execlists->csb_status;
870
	const u8 num_entries = execlists->csb_size;
871
	u8 head, tail;
872

873 874
	lockdep_assert_held(&engine->timeline.lock);

875 876 877 878 879 880 881 882 883 884 885 886 887 888 889
	/*
	 * Note that csb_write, csb_status may be either in HWSP or mmio.
	 * When reading from the csb_write mmio register, we have to be
	 * careful to only use the GEN8_CSB_WRITE_PTR portion, which is
	 * the low 4bits. As it happens we know the next 4bits are always
	 * zero and so we can simply masked off the low u8 of the register
	 * and treat it identically to reading from the HWSP (without having
	 * to use explicit shifting and masking, and probably bifurcating
	 * the code to handle the legacy mmio read).
	 */
	head = execlists->csb_head;
	tail = READ_ONCE(*execlists->csb_write);
	GEM_TRACE("%s cs-irq head=%d, tail=%d\n", engine->name, head, tail);
	if (unlikely(head == tail))
		return;
890

891 892 893 894 895 896 897 898 899
	/*
	 * Hopefully paired with a wmb() in HW!
	 *
	 * We must complete the read of the write pointer before any reads
	 * from the CSB, so that we do not see stale values. Without an rmb
	 * (lfence) the HW may speculatively perform the CSB[] reads *before*
	 * we perform the READ_ONCE(*csb_write).
	 */
	rmb();
900

901
	do {
902 903 904 905
		struct i915_request *rq;
		unsigned int status;
		unsigned int count;

906
		if (++head == num_entries)
907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928
			head = 0;

		/*
		 * We are flying near dragons again.
		 *
		 * We hold a reference to the request in execlist_port[]
		 * but no more than that. We are operating in softirq
		 * context and so cannot hold any mutex or sleep. That
		 * prevents us stopping the requests we are processing
		 * in port[] from being retired simultaneously (the
		 * breadcrumb will be complete before we see the
		 * context-switch). As we only hold the reference to the
		 * request, any pointer chasing underneath the request
		 * is subject to a potential use-after-free. Thus we
		 * store all of the bookkeeping within port[] as
		 * required, and avoid using unguarded pointers beneath
		 * request itself. The same applies to the atomic
		 * status notifier.
		 */

		GEM_TRACE("%s csb[%d]: status=0x%08x:0x%08x, active=0x%x\n",
			  engine->name, head,
929
			  buf[2 * head + 0], buf[2 * head + 1],
930 931
			  execlists->active);

932
		status = buf[2 * head];
933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951
		if (status & (GEN8_CTX_STATUS_IDLE_ACTIVE |
			      GEN8_CTX_STATUS_PREEMPTED))
			execlists_set_active(execlists,
					     EXECLISTS_ACTIVE_HWACK);
		if (status & GEN8_CTX_STATUS_ACTIVE_IDLE)
			execlists_clear_active(execlists,
					       EXECLISTS_ACTIVE_HWACK);

		if (!(status & GEN8_CTX_STATUS_COMPLETED_MASK))
			continue;

		/* We should never get a COMPLETED | IDLE_ACTIVE! */
		GEM_BUG_ON(status & GEN8_CTX_STATUS_IDLE_ACTIVE);

		if (status & GEN8_CTX_STATUS_COMPLETE &&
		    buf[2*head + 1] == execlists->preempt_complete_status) {
			GEM_TRACE("%s preempt-idle\n", engine->name);
			complete_preempt_context(execlists);
			continue;
952
		}
953

954 955 956 957
		if (status & GEN8_CTX_STATUS_PREEMPTED &&
		    execlists_is_active(execlists,
					EXECLISTS_ACTIVE_PREEMPT))
			continue;
958

959 960
		GEM_BUG_ON(!execlists_is_active(execlists,
						EXECLISTS_ACTIVE_USER));
961

962
		rq = port_unpack(port, &count);
963
		GEM_TRACE("%s out[0]: ctx=%d.%d, fence %llx:%lld (current %d), prio=%d\n",
964 965 966 967
			  engine->name,
			  port->context_id, count,
			  rq ? rq->fence.context : 0,
			  rq ? rq->fence.seqno : 0,
968
			  rq ? hwsp_seqno(rq) : 0,
969 970 971 972 973 974 975
			  rq ? rq_prio(rq) : 0);

		/* Check the context/desc id for this event matches */
		GEM_DEBUG_BUG_ON(buf[2 * head + 1] != port->context_id);

		GEM_BUG_ON(count == 0);
		if (--count == 0) {
976
			/*
977 978 979 980 981 982
			 * On the final event corresponding to the
			 * submission of this context, we expect either
			 * an element-switch event or a completion
			 * event (and on completion, the active-idle
			 * marker). No more preemptions, lite-restore
			 * or otherwise.
983
			 */
984 985 986 987 988
			GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED);
			GEM_BUG_ON(port_isset(&port[1]) &&
				   !(status & GEN8_CTX_STATUS_ELEMENT_SWITCH));
			GEM_BUG_ON(!port_isset(&port[1]) &&
				   !(status & GEN8_CTX_STATUS_ACTIVE_IDLE));
989

990 991 992 993 994 995 996
			/*
			 * We rely on the hardware being strongly
			 * ordered, that the breadcrumb write is
			 * coherent (visible from the CPU) before the
			 * user interrupt and CSB is processed.
			 */
			GEM_BUG_ON(!i915_request_completed(rq));
C
Chris Wilson 已提交
997

998 999 1000
			execlists_context_schedule_out(rq,
						       INTEL_CONTEXT_SCHEDULE_OUT);
			i915_request_put(rq);
1001

1002 1003
			GEM_TRACE("%s completed ctx=%d\n",
				  engine->name, port->context_id);
1004

1005 1006 1007 1008 1009 1010 1011
			port = execlists_port_complete(execlists, port);
			if (port_isset(port))
				execlists_user_begin(execlists, port);
			else
				execlists_user_end(execlists);
		} else {
			port_set(port, port_pack(rq, count));
1012
		}
1013
	} while (head != tail);
1014

1015
	execlists->csb_head = head;
1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027

	/*
	 * Gen11 has proven to fail wrt global observation point between
	 * entry and tail update, failing on the ordering and thus
	 * we see an old entry in the context status buffer.
	 *
	 * Forcibly evict out entries for the next gpu csb update,
	 * to increase the odds that we get a fresh entries with non
	 * working hardware. The cost for doing so comes out mostly with
	 * the wash as hardware, working or not, will need to do the
	 * invalidation before.
	 */
1028
	invalidate_csb_entries(&buf[0], &buf[num_entries - 1]);
1029
}
1030

1031
static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
1032
{
1033
	lockdep_assert_held(&engine->timeline.lock);
1034

C
Chris Wilson 已提交
1035
	process_csb(engine);
1036 1037
	if (!execlists_is_active(&engine->execlists, EXECLISTS_ACTIVE_PREEMPT))
		execlists_dequeue(engine);
1038 1039
}

1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050
/*
 * Check the unread Context Status Buffers and manage the submission of new
 * contexts to the ELSP accordingly.
 */
static void execlists_submission_tasklet(unsigned long data)
{
	struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
	unsigned long flags;

	GEM_TRACE("%s awake?=%d, active=%x\n",
		  engine->name,
1051
		  !!intel_wakeref_active(&engine->wakeref),
1052 1053 1054
		  engine->execlists.active);

	spin_lock_irqsave(&engine->timeline.lock, flags);
1055
	__execlists_submission_tasklet(engine);
1056 1057 1058
	spin_unlock_irqrestore(&engine->timeline.lock, flags);
}

1059
static void queue_request(struct intel_engine_cs *engine,
1060
			  struct i915_sched_node *node,
1061
			  int prio)
1062
{
1063
	list_add_tail(&node->link, i915_sched_lookup_priolist(engine, prio));
1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076
}

static void __submit_queue_imm(struct intel_engine_cs *engine)
{
	struct intel_engine_execlists * const execlists = &engine->execlists;

	if (reset_in_progress(execlists))
		return; /* defer until we restart the engine following reset */

	if (execlists->tasklet.func == execlists_submission_tasklet)
		__execlists_submission_tasklet(engine);
	else
		tasklet_hi_schedule(&execlists->tasklet);
1077 1078
}

1079 1080
static void submit_queue(struct intel_engine_cs *engine, int prio)
{
1081 1082
	if (prio > engine->execlists.queue_priority_hint) {
		engine->execlists.queue_priority_hint = prio;
1083 1084
		__submit_queue_imm(engine);
	}
1085 1086
}

1087
static void execlists_submit_request(struct i915_request *request)
1088
{
1089
	struct intel_engine_cs *engine = request->engine;
1090
	unsigned long flags;
1091

1092
	/* Will be called from irq-context when using foreign fences. */
1093
	spin_lock_irqsave(&engine->timeline.lock, flags);
1094

1095
	queue_request(engine, &request->sched, rq_prio(request));
1096

1097
	GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
1098
	GEM_BUG_ON(list_empty(&request->sched.link));
1099

1100 1101
	submit_queue(engine, rq_prio(request));

1102
	spin_unlock_irqrestore(&engine->timeline.lock, flags);
1103 1104
}

1105
static void __execlists_context_fini(struct intel_context *ce)
1106
{
1107
	intel_ring_put(ce->ring);
1108 1109 1110

	GEM_BUG_ON(i915_gem_object_is_active(ce->state->obj));
	i915_gem_object_put(ce->state->obj);
1111 1112
}

1113
static void execlists_context_destroy(struct kref *kref)
1114
{
1115 1116
	struct intel_context *ce = container_of(kref, typeof(*ce), ref);

1117
	GEM_BUG_ON(intel_context_is_pinned(ce));
1118 1119 1120 1121 1122 1123 1124

	if (ce->state)
		__execlists_context_fini(ce);

	intel_context_free(ce);
}

1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148
static int __context_pin(struct i915_vma *vma)
{
	unsigned int flags;
	int err;

	flags = PIN_GLOBAL | PIN_HIGH;
	flags |= PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);

	err = i915_vma_pin(vma, 0, 0, flags);
	if (err)
		return err;

	vma->obj->pin_global++;
	vma->obj->mm.dirty = true;

	return 0;
}

static void __context_unpin(struct i915_vma *vma)
{
	vma->obj->pin_global--;
	__i915_vma_unpin(vma);
}

1149
static void execlists_context_unpin(struct intel_context *ce)
1150
{
1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172
	struct intel_engine_cs *engine;

	/*
	 * The tasklet may still be using a pointer to our state, via an
	 * old request. However, since we know we only unpin the context
	 * on retirement of the following request, we know that the last
	 * request referencing us will have had a completion CS interrupt.
	 * If we see that it is still active, it means that the tasklet hasn't
	 * had the chance to run yet; let it run before we teardown the
	 * reference it may use.
	 */
	engine = READ_ONCE(ce->active);
	if (unlikely(engine)) {
		unsigned long flags;

		spin_lock_irqsave(&engine->timeline.lock, flags);
		process_csb(engine);
		spin_unlock_irqrestore(&engine->timeline.lock, flags);

		GEM_BUG_ON(READ_ONCE(ce->active));
	}

1173 1174
	i915_gem_context_unpin_hw_id(ce->gem_context);

1175 1176 1177
	intel_ring_unpin(ce->ring);

	i915_gem_object_unpin_map(ce->state->obj);
1178
	__context_unpin(ce->state);
1179 1180
}

1181
static void
1182 1183
__execlists_update_reg_state(struct intel_context *ce,
			     struct intel_engine_cs *engine)
1184 1185
{
	struct intel_ring *ring = ce->ring;
1186 1187 1188 1189
	u32 *regs = ce->lrc_reg_state;

	GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
	GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
1190 1191 1192 1193 1194 1195 1196

	regs[CTX_RING_BUFFER_START + 1] = i915_ggtt_offset(ring->vma);
	regs[CTX_RING_HEAD + 1] = ring->head;
	regs[CTX_RING_TAIL + 1] = ring->tail;

	/* RPCS */
	if (engine->class == RENDER_CLASS)
1197
		regs[CTX_R_PWR_CLK_STATE + 1] =
1198
			intel_sseu_make_rpcs(engine->i915, &ce->sseu);
1199 1200
}

1201 1202 1203
static int
__execlists_context_pin(struct intel_context *ce,
			struct intel_engine_cs *engine)
1204
{
1205
	void *vaddr;
1206
	int ret;
1207

1208 1209 1210
	GEM_BUG_ON(!ce->gem_context->ppgtt);

	ret = execlists_context_deferred_alloc(ce, engine);
1211 1212
	if (ret)
		goto err;
1213
	GEM_BUG_ON(!ce->state);
1214

1215
	ret = __context_pin(ce->state);
1216
	if (ret)
1217
		goto err;
1218

1219
	vaddr = i915_gem_object_pin_map(ce->state->obj,
1220
					i915_coherent_map_type(engine->i915) |
1221
					I915_MAP_OVERRIDE);
1222 1223
	if (IS_ERR(vaddr)) {
		ret = PTR_ERR(vaddr);
1224
		goto unpin_vma;
1225 1226
	}

1227
	ret = intel_ring_pin(ce->ring);
1228
	if (ret)
1229
		goto unpin_map;
1230

1231
	ret = i915_gem_context_pin_hw_id(ce->gem_context);
1232 1233 1234
	if (ret)
		goto unpin_ring;

1235
	ce->lrc_desc = lrc_descriptor(ce, engine);
1236
	ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
1237
	__execlists_update_reg_state(ce, engine);
1238

1239
	return 0;
1240

1241 1242
unpin_ring:
	intel_ring_unpin(ce->ring);
1243
unpin_map:
1244 1245
	i915_gem_object_unpin_map(ce->state->obj);
unpin_vma:
1246
	__context_unpin(ce->state);
1247
err:
1248
	return ret;
1249 1250
}

1251
static int execlists_context_pin(struct intel_context *ce)
1252
{
1253
	return __execlists_context_pin(ce, ce->engine);
1254 1255
}

1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277
static void execlists_context_reset(struct intel_context *ce)
{
	/*
	 * Because we emit WA_TAIL_DWORDS there may be a disparity
	 * between our bookkeeping in ce->ring->head and ce->ring->tail and
	 * that stored in context. As we only write new commands from
	 * ce->ring->tail onwards, everything before that is junk. If the GPU
	 * starts reading from its RING_HEAD from the context, it may try to
	 * execute that junk and die.
	 *
	 * The contexts that are stilled pinned on resume belong to the
	 * kernel, and are local to each engine. All other contexts will
	 * have their head/tail sanitized upon pinning before use, so they
	 * will never see garbage,
	 *
	 * So to avoid that we reset the context images upon resume. For
	 * simplicity, we just zero everything out.
	 */
	intel_ring_reset(ce->ring, 0);
	__execlists_update_reg_state(ce, ce->engine);
}

1278
static const struct intel_context_ops execlists_context_ops = {
1279
	.pin = execlists_context_pin,
1280
	.unpin = execlists_context_unpin,
1281

1282 1283 1284
	.enter = intel_context_enter_engine,
	.exit = intel_context_exit_engine,

1285
	.reset = execlists_context_reset,
1286 1287 1288
	.destroy = execlists_context_destroy,
};

1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313
static int gen8_emit_init_breadcrumb(struct i915_request *rq)
{
	u32 *cs;

	GEM_BUG_ON(!rq->timeline->has_initial_breadcrumb);

	cs = intel_ring_begin(rq, 6);
	if (IS_ERR(cs))
		return PTR_ERR(cs);

	/*
	 * Check if we have been preempted before we even get started.
	 *
	 * After this point i915_request_started() reports true, even if
	 * we get preempted and so are no longer running.
	 */
	*cs++ = MI_ARB_CHECK;
	*cs++ = MI_NOOP;

	*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
	*cs++ = rq->timeline->hwsp_offset;
	*cs++ = 0;
	*cs++ = rq->fence.seqno - 1;

	intel_ring_advance(rq, cs);
1314 1315 1316 1317

	/* Record the updated position of the request's payload */
	rq->infix = intel_ring_offset(rq, cs);

1318 1319 1320
	return 0;
}

1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354
static int emit_pdps(struct i915_request *rq)
{
	const struct intel_engine_cs * const engine = rq->engine;
	struct i915_hw_ppgtt * const ppgtt = rq->gem_context->ppgtt;
	int err, i;
	u32 *cs;

	GEM_BUG_ON(intel_vgpu_active(rq->i915));

	/*
	 * Beware ye of the dragons, this sequence is magic!
	 *
	 * Small changes to this sequence can cause anything from
	 * GPU hangs to forcewake errors and machine lockups!
	 */

	/* Flush any residual operations from the context load */
	err = engine->emit_flush(rq, EMIT_FLUSH);
	if (err)
		return err;

	/* Magic required to prevent forcewake errors! */
	err = engine->emit_flush(rq, EMIT_INVALIDATE);
	if (err)
		return err;

	cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
	if (IS_ERR(cs))
		return PTR_ERR(cs);

	/* Ensure the LRI have landed before we invalidate & continue */
	*cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED;
	for (i = GEN8_3LVL_PDPES; i--; ) {
		const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
1355
		u32 base = engine->mmio_base;
1356

1357
		*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i));
1358
		*cs++ = upper_32_bits(pd_daddr);
1359
		*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i));
1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374
		*cs++ = lower_32_bits(pd_daddr);
	}
	*cs++ = MI_NOOP;

	intel_ring_advance(rq, cs);

	/* Be doubly sure the LRI have landed before proceeding */
	err = engine->emit_flush(rq, EMIT_FLUSH);
	if (err)
		return err;

	/* Re-invalidate the TLB for luck */
	return engine->emit_flush(rq, EMIT_INVALIDATE);
}

1375
static int execlists_request_alloc(struct i915_request *request)
1376
{
1377
	int ret;
1378

1379
	GEM_BUG_ON(!intel_context_is_pinned(request->hw_context));
1380

1381 1382
	/*
	 * Flush enough space to reduce the likelihood of waiting after
1383 1384 1385 1386 1387
	 * we start building the request - in which case we will just
	 * have to repeat work.
	 */
	request->reserved_space += EXECLISTS_REQUEST_SIZE;

1388 1389
	/*
	 * Note that after this point, we have committed to using
1390 1391 1392 1393 1394 1395
	 * this request as it is being used to both track the
	 * state of engine initialisation and liveness of the
	 * golden renderstate above. Think twice before you try
	 * to cancel/unwind this request now.
	 */

1396
	/* Unconditionally invalidate GPU caches and TLBs. */
1397
	if (i915_vm_is_4lvl(&request->gem_context->ppgtt->vm))
1398 1399 1400 1401 1402 1403
		ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
	else
		ret = emit_pdps(request);
	if (ret)
		return ret;

1404 1405 1406 1407
	request->reserved_space -= EXECLISTS_REQUEST_SIZE;
	return 0;
}

1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423
/*
 * In this WA we need to set GEN8_L3SQCREG4[21:21] and reset it after
 * PIPE_CONTROL instruction. This is required for the flush to happen correctly
 * but there is a slight complication as this is applied in WA batch where the
 * values are only initialized once so we cannot take register value at the
 * beginning and reuse it further; hence we save its value to memory, upload a
 * constant value with bit21 set and then we restore it back with the saved value.
 * To simplify the WA, a constant value is formed by using the default value
 * of this register. This shouldn't be a problem because we are only modifying
 * it for a short period and this batch in non-premptible. We can ofcourse
 * use additional instructions that read the actual value of the register
 * at that time and set our bit of interest but it makes the WA complicated.
 *
 * This WA is also required for Gen9 so extracting as a function avoids
 * code duplication.
 */
1424 1425
static u32 *
gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch)
1426
{
1427
	/* NB no one else is allowed to scribble over scratch + 256! */
1428 1429
	*batch++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
	*batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
1430
	*batch++ = i915_scratch_offset(engine->i915) + 256;
1431 1432 1433 1434 1435 1436
	*batch++ = 0;

	*batch++ = MI_LOAD_REGISTER_IMM(1);
	*batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
	*batch++ = 0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES;

1437 1438 1439 1440
	batch = gen8_emit_pipe_control(batch,
				       PIPE_CONTROL_CS_STALL |
				       PIPE_CONTROL_DC_FLUSH_ENABLE,
				       0);
1441 1442 1443

	*batch++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
	*batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
1444
	*batch++ = i915_scratch_offset(engine->i915) + 256;
1445 1446 1447
	*batch++ = 0;

	return batch;
1448 1449
}

1450 1451 1452 1453 1454 1455
/*
 * Typically we only have one indirect_ctx and per_ctx batch buffer which are
 * initialized at the beginning and shared across all contexts but this field
 * helps us to have multiple batches at different offsets and select them based
 * on a criteria. At the moment this batch always start at the beginning of the page
 * and at this point we don't have multiple wa_ctx batch buffers.
1456
 *
1457 1458
 * The number of WA applied are not known at the beginning; we use this field
 * to return the no of DWORDS written.
1459
 *
1460 1461 1462 1463
 * It is to be noted that this batch does not contain MI_BATCH_BUFFER_END
 * so it adds NOOPs as padding to make it cacheline aligned.
 * MI_BATCH_BUFFER_END will be added to perctx batch and both of them together
 * makes a complete batch buffer.
1464
 */
1465
static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
1466
{
1467
	/* WaDisableCtxRestoreArbitration:bdw,chv */
1468
	*batch++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
1469

1470
	/* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
1471 1472
	if (IS_BROADWELL(engine->i915))
		batch = gen8_emit_flush_coherentl3_wa(engine, batch);
1473

1474 1475
	/* WaClearSlmSpaceAtContextSwitch:bdw,chv */
	/* Actual scratch location is at 128 bytes offset */
1476 1477 1478 1479 1480
	batch = gen8_emit_pipe_control(batch,
				       PIPE_CONTROL_FLUSH_L3 |
				       PIPE_CONTROL_GLOBAL_GTT_IVB |
				       PIPE_CONTROL_CS_STALL |
				       PIPE_CONTROL_QW_WRITE,
1481
				       i915_scratch_offset(engine->i915) +
1482
				       2 * CACHELINE_BYTES);
1483

C
Chris Wilson 已提交
1484 1485
	*batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;

1486
	/* Pad to end of cacheline */
1487 1488
	while ((unsigned long)batch % CACHELINE_BYTES)
		*batch++ = MI_NOOP;
1489 1490 1491 1492 1493 1494 1495

	/*
	 * MI_BATCH_BUFFER_END is not required in Indirect ctx BB because
	 * execution depends on the length specified in terms of cache lines
	 * in the register CTX_RCS_INDIRECT_CTX
	 */

1496
	return batch;
1497 1498
}

1499 1500 1501 1502 1503 1504
struct lri {
	i915_reg_t reg;
	u32 value;
};

static u32 *emit_lri(u32 *batch, const struct lri *lri, unsigned int count)
1505
{
1506
	GEM_BUG_ON(!count || count > 63);
C
Chris Wilson 已提交
1507

1508 1509 1510 1511 1512 1513
	*batch++ = MI_LOAD_REGISTER_IMM(count);
	do {
		*batch++ = i915_mmio_reg_offset(lri->reg);
		*batch++ = lri->value;
	} while (lri++, --count);
	*batch++ = MI_NOOP;
1514

1515 1516
	return batch;
}
1517

1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541
static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
{
	static const struct lri lri[] = {
		/* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl,glk */
		{
			COMMON_SLICE_CHICKEN2,
			__MASKED_FIELD(GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE,
				       0),
		},

		/* BSpec: 11391 */
		{
			FF_SLICE_CHICKEN,
			__MASKED_FIELD(FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX,
				       FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX),
		},

		/* BSpec: 11299 */
		{
			_3D_CHICKEN3,
			__MASKED_FIELD(_3D_CHICKEN_SF_PROVOKING_VERTEX_FIX,
				       _3D_CHICKEN_SF_PROVOKING_VERTEX_FIX),
		}
	};
1542

1543
	*batch++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
1544

1545 1546
	/* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */
	batch = gen8_emit_flush_coherentl3_wa(engine, batch);
1547

1548
	batch = emit_lri(batch, lri, ARRAY_SIZE(lri));
1549

1550
	/* WaMediaPoolStateCmdInWABB:bxt,glk */
1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564
	if (HAS_POOLED_EU(engine->i915)) {
		/*
		 * EU pool configuration is setup along with golden context
		 * during context initialization. This value depends on
		 * device type (2x6 or 3x6) and needs to be updated based
		 * on which subslice is disabled especially for 2x6
		 * devices, however it is safe to load default
		 * configuration of 3x6 device instead of masking off
		 * corresponding bits because HW ignores bits of a disabled
		 * subslice and drops down to appropriate config. Please
		 * see render_state_setup() in i915_gem_render_state.c for
		 * possible configurations, to avoid duplication they are
		 * not shown here again.
		 */
1565 1566 1567 1568 1569 1570
		*batch++ = GEN9_MEDIA_POOL_STATE;
		*batch++ = GEN9_MEDIA_POOL_ENABLE;
		*batch++ = 0x00777000;
		*batch++ = 0;
		*batch++ = 0;
		*batch++ = 0;
1571 1572
	}

C
Chris Wilson 已提交
1573 1574
	*batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;

1575
	/* Pad to end of cacheline */
1576 1577
	while ((unsigned long)batch % CACHELINE_BYTES)
		*batch++ = MI_NOOP;
1578

1579
	return batch;
1580 1581
}

1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615
static u32 *
gen10_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
{
	int i;

	/*
	 * WaPipeControlBefore3DStateSamplePattern: cnl
	 *
	 * Ensure the engine is idle prior to programming a
	 * 3DSTATE_SAMPLE_PATTERN during a context restore.
	 */
	batch = gen8_emit_pipe_control(batch,
				       PIPE_CONTROL_CS_STALL,
				       0);
	/*
	 * WaPipeControlBefore3DStateSamplePattern says we need 4 dwords for
	 * the PIPE_CONTROL followed by 12 dwords of 0x0, so 16 dwords in
	 * total. However, a PIPE_CONTROL is 6 dwords long, not 4, which is
	 * confusing. Since gen8_emit_pipe_control() already advances the
	 * batch by 6 dwords, we advance the other 10 here, completing a
	 * cacheline. It's not clear if the workaround requires this padding
	 * before other commands, or if it's just the regular padding we would
	 * already have for the workaround bb, so leave it here for now.
	 */
	for (i = 0; i < 10; i++)
		*batch++ = MI_NOOP;

	/* Pad to end of cacheline */
	while ((unsigned long)batch % CACHELINE_BYTES)
		*batch++ = MI_NOOP;

	return batch;
}

1616 1617 1618
#define CTX_WA_BB_OBJ_SIZE (PAGE_SIZE)

static int lrc_setup_wa_ctx(struct intel_engine_cs *engine)
1619
{
1620 1621 1622
	struct drm_i915_gem_object *obj;
	struct i915_vma *vma;
	int err;
1623

1624
	obj = i915_gem_object_create(engine->i915, CTX_WA_BB_OBJ_SIZE);
1625 1626
	if (IS_ERR(obj))
		return PTR_ERR(obj);
1627

1628
	vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
1629 1630 1631
	if (IS_ERR(vma)) {
		err = PTR_ERR(vma);
		goto err;
1632 1633
	}

1634
	err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
1635 1636 1637 1638
	if (err)
		goto err;

	engine->wa_ctx.vma = vma;
1639
	return 0;
1640 1641 1642 1643

err:
	i915_gem_object_put(obj);
	return err;
1644 1645
}

1646
static void lrc_destroy_wa_ctx(struct intel_engine_cs *engine)
1647
{
1648
	i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0);
1649 1650
}

1651 1652
typedef u32 *(*wa_bb_func_t)(struct intel_engine_cs *engine, u32 *batch);

1653
static int intel_init_workaround_bb(struct intel_engine_cs *engine)
1654
{
1655
	struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
1656 1657 1658
	struct i915_wa_ctx_bb *wa_bb[2] = { &wa_ctx->indirect_ctx,
					    &wa_ctx->per_ctx };
	wa_bb_func_t wa_bb_fn[2];
1659
	struct page *page;
1660 1661
	void *batch, *batch_ptr;
	unsigned int i;
1662
	int ret;
1663

1664 1665
	if (engine->class != RENDER_CLASS)
		return 0;
1666

1667
	switch (INTEL_GEN(engine->i915)) {
1668 1669
	case 11:
		return 0;
1670
	case 10:
1671 1672 1673
		wa_bb_fn[0] = gen10_init_indirectctx_bb;
		wa_bb_fn[1] = NULL;
		break;
1674 1675
	case 9:
		wa_bb_fn[0] = gen9_init_indirectctx_bb;
1676
		wa_bb_fn[1] = NULL;
1677 1678 1679
		break;
	case 8:
		wa_bb_fn[0] = gen8_init_indirectctx_bb;
1680
		wa_bb_fn[1] = NULL;
1681 1682 1683
		break;
	default:
		MISSING_CASE(INTEL_GEN(engine->i915));
1684
		return 0;
1685
	}
1686

1687
	ret = lrc_setup_wa_ctx(engine);
1688 1689 1690 1691 1692
	if (ret) {
		DRM_DEBUG_DRIVER("Failed to setup context WA page: %d\n", ret);
		return ret;
	}

1693
	page = i915_gem_object_get_dirty_page(wa_ctx->vma->obj, 0);
1694
	batch = batch_ptr = kmap_atomic(page);
1695

1696 1697 1698 1699 1700 1701 1702
	/*
	 * Emit the two workaround batch buffers, recording the offset from the
	 * start of the workaround batch buffer object for each and their
	 * respective sizes.
	 */
	for (i = 0; i < ARRAY_SIZE(wa_bb_fn); i++) {
		wa_bb[i]->offset = batch_ptr - batch;
1703 1704
		if (GEM_DEBUG_WARN_ON(!IS_ALIGNED(wa_bb[i]->offset,
						  CACHELINE_BYTES))) {
1705 1706 1707
			ret = -EINVAL;
			break;
		}
1708 1709
		if (wa_bb_fn[i])
			batch_ptr = wa_bb_fn[i](engine, batch_ptr);
1710
		wa_bb[i]->size = batch_ptr - (batch + wa_bb[i]->offset);
1711 1712
	}

1713 1714
	BUG_ON(batch_ptr - batch > CTX_WA_BB_OBJ_SIZE);

1715 1716
	kunmap_atomic(batch);
	if (ret)
1717
		lrc_destroy_wa_ctx(engine);
1718 1719 1720 1721

	return ret;
}

1722
static void enable_execlists(struct intel_engine_cs *engine)
1723
{
1724
	struct drm_i915_private *dev_priv = engine->i915;
1725

1726
	intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */
1727 1728 1729

	if (INTEL_GEN(dev_priv) >= 11)
		I915_WRITE(RING_MODE_GEN7(engine),
1730
			   _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE));
1731 1732 1733 1734
	else
		I915_WRITE(RING_MODE_GEN7(engine),
			   _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));

1735 1736 1737
	I915_WRITE(RING_MI_MODE(engine->mmio_base),
		   _MASKED_BIT_DISABLE(STOP_RING));

1738
	I915_WRITE(RING_HWS_PGA(engine->mmio_base),
1739
		   i915_ggtt_offset(engine->status_page.vma));
1740 1741 1742
	POSTING_READ(RING_HWS_PGA(engine->mmio_base));
}

1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755
static bool unexpected_starting_state(struct intel_engine_cs *engine)
{
	struct drm_i915_private *dev_priv = engine->i915;
	bool unexpected = false;

	if (I915_READ(RING_MI_MODE(engine->mmio_base)) & STOP_RING) {
		DRM_DEBUG_DRIVER("STOP_RING still set in RING_MI_MODE\n");
		unexpected = true;
	}

	return unexpected;
}

1756
static int execlists_resume(struct intel_engine_cs *engine)
1757
{
1758
	intel_engine_apply_workarounds(engine);
1759
	intel_engine_apply_whitelist(engine);
1760

1761
	intel_mocs_init_engine(engine);
1762

1763
	intel_engine_reset_breadcrumbs(engine);
1764

1765 1766 1767 1768 1769 1770
	if (GEM_SHOW_DEBUG() && unexpected_starting_state(engine)) {
		struct drm_printer p = drm_debug_printer(__func__);

		intel_engine_dump(engine, &p, NULL);
	}

1771
	enable_execlists(engine);
1772

1773
	return 0;
1774 1775
}

1776
static void execlists_reset_prepare(struct intel_engine_cs *engine)
1777 1778
{
	struct intel_engine_execlists * const execlists = &engine->execlists;
1779
	unsigned long flags;
1780

1781 1782
	GEM_TRACE("%s: depth<-%d\n", engine->name,
		  atomic_read(&execlists->tasklet.count));
1783 1784 1785 1786 1787 1788

	/*
	 * Prevent request submission to the hardware until we have
	 * completed the reset in i915_gem_reset_finish(). If a request
	 * is completed by one engine, it may then queue a request
	 * to a second via its execlists->tasklet *just* as we are
1789
	 * calling engine->resume() and also writing the ELSP.
1790 1791 1792 1793
	 * Turning off the execlists->tasklet until the reset is over
	 * prevents the race.
	 */
	__tasklet_disable_sync_once(&execlists->tasklet);
1794
	GEM_BUG_ON(!reset_in_progress(execlists));
1795

1796 1797
	intel_engine_stop_cs(engine);

1798
	/* And flush any current direct submission. */
1799 1800
	spin_lock_irqsave(&engine->timeline.lock, flags);
	spin_unlock_irqrestore(&engine->timeline.lock, flags);
1801 1802
}

1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819
static bool lrc_regs_ok(const struct i915_request *rq)
{
	const struct intel_ring *ring = rq->ring;
	const u32 *regs = rq->hw_context->lrc_reg_state;

	/* Quick spot check for the common signs of context corruption */

	if (regs[CTX_RING_BUFFER_CONTROL + 1] !=
	    (RING_CTL_SIZE(ring->size) | RING_VALID))
		return false;

	if (regs[CTX_RING_BUFFER_START + 1] != i915_ggtt_offset(ring->vma))
		return false;

	return true;
}

1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834
static void reset_csb_pointers(struct intel_engine_execlists *execlists)
{
	const unsigned int reset_value = execlists->csb_size - 1;

	/*
	 * After a reset, the HW starts writing into CSB entry [0]. We
	 * therefore have to set our HEAD pointer back one entry so that
	 * the *first* entry we check is entry 0. To complicate this further,
	 * as we don't wait for the first interrupt after reset, we have to
	 * fake the HW write to point back to the last entry so that our
	 * inline comparison of our cached head position against the last HW
	 * write works even before the first interrupt.
	 */
	execlists->csb_head = reset_value;
	WRITE_ONCE(*execlists->csb_write, reset_value);
1835
	wmb(); /* Make sure this is visible to HW (paranoia?) */
1836 1837 1838 1839 1840 1841

	invalidate_csb_entries(&execlists->csb_status[0],
			       &execlists->csb_status[reset_value]);
}

static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
1842
{
1843
	struct intel_engine_execlists * const execlists = &engine->execlists;
1844
	struct intel_context *ce;
1845
	struct i915_request *rq;
1846
	u32 *regs;
1847

1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861
	process_csb(engine); /* drain preemption events */

	/* Following the reset, we need to reload the CSB read/write pointers */
	reset_csb_pointers(&engine->execlists);

	/*
	 * Save the currently executing context, even if we completed
	 * its request, it was still running at the time of the
	 * reset and will have been clobbered.
	 */
	if (!port_isset(execlists->port))
		goto out_clear;

	ce = port_request(execlists->port)->hw_context;
1862

1863 1864 1865 1866 1867 1868 1869 1870 1871
	/*
	 * Catch up with any missed context-switch interrupts.
	 *
	 * Ideally we would just read the remaining CSB entries now that we
	 * know the gpu is idle. However, the CSB registers are sometimes^W
	 * often trashed across a GPU reset! Instead we have to rely on
	 * guessing the missed context-switch events by looking at what
	 * requests were completed.
	 */
1872
	execlists_cancel_port_requests(execlists);
1873

1874
	/* Push back any incomplete requests for replay after the reset. */
1875
	rq = __unwind_incomplete_requests(engine);
1876
	if (!rq)
1877 1878 1879 1880 1881 1882
		goto out_replay;

	if (rq->hw_context != ce) { /* caught just before a CS event */
		rq = NULL;
		goto out_replay;
	}
1883

1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896
	/*
	 * If this request hasn't started yet, e.g. it is waiting on a
	 * semaphore, we need to avoid skipping the request or else we
	 * break the signaling chain. However, if the context is corrupt
	 * the request will not restart and we will be stuck with a wedged
	 * device. It is quite often the case that if we issue a reset
	 * while the GPU is loading the context image, that the context
	 * image becomes corrupt.
	 *
	 * Otherwise, if we have not started yet, the request should replay
	 * perfectly and we do not need to flag the result as being erroneous.
	 */
	if (!i915_request_started(rq) && lrc_regs_ok(rq))
1897
		goto out_replay;
1898

1899 1900
	/*
	 * If the request was innocent, we leave the request in the ELSP
1901 1902 1903 1904 1905 1906 1907 1908 1909
	 * and will try to replay it on restarting. The context image may
	 * have been corrupted by the reset, in which case we may have
	 * to service a new GPU hang, but more likely we can continue on
	 * without impact.
	 *
	 * If the request was guilty, we presume the context is corrupt
	 * and have to at least restore the RING register in the context
	 * image back to the expected values to skip over the guilty request.
	 */
1910
	i915_reset_request(rq, stalled);
1911
	if (!stalled && lrc_regs_ok(rq))
1912
		goto out_replay;
1913

1914 1915
	/*
	 * We want a simple context + ring to execute the breadcrumb update.
1916 1917 1918 1919 1920 1921
	 * We cannot rely on the context being intact across the GPU hang,
	 * so clear it and rebuild just what we need for the breadcrumb.
	 * All pending requests for this context will be zapped, and any
	 * future request will be after userspace has had the opportunity
	 * to recreate its own state.
	 */
1922
	regs = ce->lrc_reg_state;
1923 1924 1925 1926
	if (engine->pinned_default_state) {
		memcpy(regs, /* skip restoring the vanilla PPHWSP */
		       engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE,
		       engine->context_size - PAGE_SIZE);
1927
	}
1928
	execlists_init_reg_state(regs, ce, engine, ce->ring);
1929

1930
	/* Rerun the request; its payload has been neutered (if guilty). */
1931 1932 1933 1934 1935 1936 1937 1938 1939
out_replay:
	ce->ring->head =
		rq ? intel_ring_wrap(ce->ring, rq->head) : ce->ring->tail;
	intel_ring_update_space(ce->ring);
	__execlists_update_reg_state(ce, engine);

out_clear:
	execlists_clear_all_active(execlists);
}
1940

1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017
static void execlists_reset(struct intel_engine_cs *engine, bool stalled)
{
	unsigned long flags;

	GEM_TRACE("%s\n", engine->name);

	spin_lock_irqsave(&engine->timeline.lock, flags);

	__execlists_reset(engine, stalled);

	spin_unlock_irqrestore(&engine->timeline.lock, flags);
}

static void nop_submission_tasklet(unsigned long data)
{
	/* The driver is wedged; don't process any more events. */
}

static void execlists_cancel_requests(struct intel_engine_cs *engine)
{
	struct intel_engine_execlists * const execlists = &engine->execlists;
	struct i915_request *rq, *rn;
	struct rb_node *rb;
	unsigned long flags;

	GEM_TRACE("%s\n", engine->name);

	/*
	 * Before we call engine->cancel_requests(), we should have exclusive
	 * access to the submission state. This is arranged for us by the
	 * caller disabling the interrupt generation, the tasklet and other
	 * threads that may then access the same state, giving us a free hand
	 * to reset state. However, we still need to let lockdep be aware that
	 * we know this state may be accessed in hardirq context, so we
	 * disable the irq around this manipulation and we want to keep
	 * the spinlock focused on its duties and not accidentally conflate
	 * coverage to the submission's irq state. (Similarly, although we
	 * shouldn't need to disable irq around the manipulation of the
	 * submission's irq state, we also wish to remind ourselves that
	 * it is irq state.)
	 */
	spin_lock_irqsave(&engine->timeline.lock, flags);

	__execlists_reset(engine, true);

	/* Mark all executing requests as skipped. */
	list_for_each_entry(rq, &engine->timeline.requests, link) {
		if (!i915_request_signaled(rq))
			dma_fence_set_error(&rq->fence, -EIO);

		i915_request_mark_complete(rq);
	}

	/* Flush the queued requests to the timeline list (for retiring). */
	while ((rb = rb_first_cached(&execlists->queue))) {
		struct i915_priolist *p = to_priolist(rb);
		int i;

		priolist_for_each_request_consume(rq, rn, p, i) {
			list_del_init(&rq->sched.link);
			__i915_request_submit(rq);
			dma_fence_set_error(&rq->fence, -EIO);
			i915_request_mark_complete(rq);
		}

		rb_erase_cached(&p->node, &execlists->queue);
		i915_priolist_free(p);
	}

	/* Remaining _unready_ requests will be nop'ed when submitted */

	execlists->queue_priority_hint = INT_MIN;
	execlists->queue = RB_ROOT_CACHED;
	GEM_BUG_ON(port_isset(execlists->port));

	GEM_BUG_ON(__tasklet_is_enabled(&execlists->tasklet));
	execlists->tasklet.func = nop_submission_tasklet;
2018

2019
	spin_unlock_irqrestore(&engine->timeline.lock, flags);
2020 2021
}

2022 2023
static void execlists_reset_finish(struct intel_engine_cs *engine)
{
2024 2025
	struct intel_engine_execlists * const execlists = &engine->execlists;

2026
	/*
2027 2028 2029
	 * After a GPU reset, we may have requests to replay. Do so now while
	 * we still have the forcewake to be sure that the GPU is not allowed
	 * to sleep before we restart and reload a context.
2030
	 */
2031
	GEM_BUG_ON(!reset_in_progress(execlists));
2032 2033
	if (!RB_EMPTY_ROOT(&execlists->queue.rb_root))
		execlists->tasklet.func(execlists->tasklet.data);
2034

2035 2036 2037
	if (__tasklet_enable(&execlists->tasklet))
		/* And kick in case we missed a new request submission. */
		tasklet_hi_schedule(&execlists->tasklet);
2038 2039
	GEM_TRACE("%s: depth->%d\n", engine->name,
		  atomic_read(&execlists->tasklet.count));
2040 2041
}

2042
static int gen8_emit_bb_start(struct i915_request *rq,
2043
			      u64 offset, u32 len,
2044
			      const unsigned int flags)
2045
{
2046
	u32 *cs;
2047

2048
	cs = intel_ring_begin(rq, 4);
2049 2050
	if (IS_ERR(cs))
		return PTR_ERR(cs);
2051

2052 2053 2054 2055 2056 2057 2058
	/*
	 * WaDisableCtxRestoreArbitration:bdw,chv
	 *
	 * We don't need to perform MI_ARB_ENABLE as often as we do (in
	 * particular all the gen that do not need the w/a at all!), if we
	 * took care to make sure that on every switch into this context
	 * (both ordinary and for preemption) that arbitrartion was enabled
2059 2060 2061 2062 2063
	 * we would be fine.  However, for gen8 there is another w/a that
	 * requires us to not preempt inside GPGPU execution, so we keep
	 * arbitration disabled for gen8 batches. Arbitration will be
	 * re-enabled before we close the request
	 * (engine->emit_fini_breadcrumb).
2064
	 */
2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087
	*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;

	/* FIXME(BDW+): Address space and security selectors. */
	*cs++ = MI_BATCH_BUFFER_START_GEN8 |
		(flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
	*cs++ = lower_32_bits(offset);
	*cs++ = upper_32_bits(offset);

	intel_ring_advance(rq, cs);

	return 0;
}

static int gen9_emit_bb_start(struct i915_request *rq,
			      u64 offset, u32 len,
			      const unsigned int flags)
{
	u32 *cs;

	cs = intel_ring_begin(rq, 6);
	if (IS_ERR(cs))
		return PTR_ERR(cs);

2088 2089
	*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;

2090
	*cs++ = MI_BATCH_BUFFER_START_GEN8 |
2091
		(flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
2092 2093
	*cs++ = lower_32_bits(offset);
	*cs++ = upper_32_bits(offset);
2094 2095 2096

	*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
	*cs++ = MI_NOOP;
2097

2098
	intel_ring_advance(rq, cs);
2099 2100 2101 2102

	return 0;
}

2103
static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine)
2104
{
2105 2106 2107
	ENGINE_WRITE(engine, RING_IMR,
		     ~(engine->irq_enable_mask | engine->irq_keep_mask));
	ENGINE_POSTING_READ(engine, RING_IMR);
2108 2109
}

2110
static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine)
2111
{
2112
	ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask);
2113 2114
}

2115
static int gen8_emit_flush(struct i915_request *request, u32 mode)
2116
{
2117
	u32 cmd, *cs;
2118

2119 2120 2121
	cs = intel_ring_begin(request, 4);
	if (IS_ERR(cs))
		return PTR_ERR(cs);
2122 2123 2124

	cmd = MI_FLUSH_DW + 1;

2125 2126 2127 2128 2129 2130 2131
	/* We always require a command barrier so that subsequent
	 * commands, such as breadcrumb interrupts, are strictly ordered
	 * wrt the contents of the write cache being flushed to memory
	 * (and thus being coherent from the CPU).
	 */
	cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;

2132
	if (mode & EMIT_INVALIDATE) {
2133
		cmd |= MI_INVALIDATE_TLB;
2134
		if (request->engine->class == VIDEO_DECODE_CLASS)
2135
			cmd |= MI_INVALIDATE_BSD;
2136 2137
	}

2138 2139 2140 2141 2142
	*cs++ = cmd;
	*cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
	*cs++ = 0; /* upper addr */
	*cs++ = 0; /* value */
	intel_ring_advance(request, cs);
2143 2144 2145 2146

	return 0;
}

2147
static int gen8_emit_flush_render(struct i915_request *request,
2148
				  u32 mode)
2149
{
2150
	struct intel_engine_cs *engine = request->engine;
2151
	u32 scratch_addr =
2152
		i915_scratch_offset(engine->i915) + 2 * CACHELINE_BYTES;
M
Mika Kuoppala 已提交
2153
	bool vf_flush_wa = false, dc_flush_wa = false;
2154
	u32 *cs, flags = 0;
M
Mika Kuoppala 已提交
2155
	int len;
2156 2157 2158

	flags |= PIPE_CONTROL_CS_STALL;

2159
	if (mode & EMIT_FLUSH) {
2160 2161
		flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
		flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
2162
		flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
2163
		flags |= PIPE_CONTROL_FLUSH_ENABLE;
2164 2165
	}

2166
	if (mode & EMIT_INVALIDATE) {
2167 2168 2169 2170 2171 2172 2173 2174 2175
		flags |= PIPE_CONTROL_TLB_INVALIDATE;
		flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
		flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
		flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
		flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
		flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
		flags |= PIPE_CONTROL_QW_WRITE;
		flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;

2176 2177 2178 2179
		/*
		 * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
		 * pipe control.
		 */
2180
		if (IS_GEN(request->i915, 9))
2181
			vf_flush_wa = true;
M
Mika Kuoppala 已提交
2182 2183 2184 2185

		/* WaForGAMHang:kbl */
		if (IS_KBL_REVID(request->i915, 0, KBL_REVID_B0))
			dc_flush_wa = true;
2186
	}
2187

M
Mika Kuoppala 已提交
2188 2189 2190 2191 2192 2193 2194 2195
	len = 6;

	if (vf_flush_wa)
		len += 6;

	if (dc_flush_wa)
		len += 12;

2196 2197 2198
	cs = intel_ring_begin(request, len);
	if (IS_ERR(cs))
		return PTR_ERR(cs);
2199

2200 2201
	if (vf_flush_wa)
		cs = gen8_emit_pipe_control(cs, 0, 0);
2202

2203 2204 2205
	if (dc_flush_wa)
		cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_DC_FLUSH_ENABLE,
					    0);
M
Mika Kuoppala 已提交
2206

2207
	cs = gen8_emit_pipe_control(cs, flags, scratch_addr);
M
Mika Kuoppala 已提交
2208

2209 2210
	if (dc_flush_wa)
		cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_CS_STALL, 0);
M
Mika Kuoppala 已提交
2211

2212
	intel_ring_advance(request, cs);
2213 2214 2215 2216

	return 0;
}

2217 2218 2219 2220 2221
/*
 * Reserve space for 2 NOOPs at the end of each request to be
 * used as a workaround for not being allowed to do lite
 * restore with HEAD==TAIL (WaIdleLiteRestore).
 */
2222
static u32 *gen8_emit_wa_tail(struct i915_request *request, u32 *cs)
2223
{
C
Chris Wilson 已提交
2224 2225
	/* Ensure there's always at least one preemption point per-request. */
	*cs++ = MI_ARB_CHECK;
2226 2227
	*cs++ = MI_NOOP;
	request->wa_tail = intel_ring_offset(request, cs);
2228 2229

	return cs;
C
Chris Wilson 已提交
2230
}
2231

2232
static u32 *gen8_emit_fini_breadcrumb(struct i915_request *request, u32 *cs)
C
Chris Wilson 已提交
2233
{
2234 2235
	cs = gen8_emit_ggtt_write(cs,
				  request->fence.seqno,
2236 2237
				  request->timeline->hwsp_offset,
				  0);
2238

2239
	*cs++ = MI_USER_INTERRUPT;
2240
	*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
2241

2242
	request->tail = intel_ring_offset(request, cs);
2243
	assert_ring_tail_valid(request->ring, request->tail);
C
Chris Wilson 已提交
2244

2245
	return gen8_emit_wa_tail(request, cs);
2246
}
2247

2248
static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
2249
{
2250
	/* XXX flush+write+CS_STALL all in one upsets gem_concurrent_blt:kbl */
2251
	cs = gen8_emit_ggtt_write_rcs(cs,
2252 2253
				      request->fence.seqno,
				      request->timeline->hwsp_offset,
2254 2255
				      PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
				      PIPE_CONTROL_DEPTH_CACHE_FLUSH |
2256 2257 2258 2259 2260
				      PIPE_CONTROL_DC_FLUSH_ENABLE);
	cs = gen8_emit_pipe_control(cs,
				    PIPE_CONTROL_FLUSH_ENABLE |
				    PIPE_CONTROL_CS_STALL,
				    0);
2261

2262
	*cs++ = MI_USER_INTERRUPT;
2263
	*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
2264

2265
	request->tail = intel_ring_offset(request, cs);
2266
	assert_ring_tail_valid(request->ring, request->tail);
C
Chris Wilson 已提交
2267

2268
	return gen8_emit_wa_tail(request, cs);
2269
}
2270

2271
static int gen8_init_rcs_context(struct i915_request *rq)
2272 2273 2274
{
	int ret;

2275
	ret = intel_engine_emit_ctx_wa(rq);
2276 2277 2278
	if (ret)
		return ret;

2279
	ret = intel_rcs_context_init_mocs(rq);
2280 2281 2282 2283 2284 2285 2286
	/*
	 * Failing to program the MOCS is non-fatal.The system will not
	 * run at peak performance. So generate an error and carry on.
	 */
	if (ret)
		DRM_ERROR("MOCS failed to program: expect performance issues.\n");

2287
	return i915_gem_render_state_emit(rq);
2288 2289
}

2290 2291 2292 2293 2294
static void execlists_park(struct intel_engine_cs *engine)
{
	intel_engine_park(engine);
}

2295
void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
2296
{
2297
	engine->submit_request = execlists_submit_request;
2298
	engine->cancel_requests = execlists_cancel_requests;
2299
	engine->schedule = i915_schedule;
2300
	engine->execlists.tasklet.func = execlists_submission_tasklet;
2301

2302
	engine->reset.prepare = execlists_reset_prepare;
2303 2304
	engine->reset.reset = execlists_reset;
	engine->reset.finish = execlists_reset_finish;
2305

2306
	engine->park = execlists_park;
2307
	engine->unpark = NULL;
2308 2309

	engine->flags |= I915_ENGINE_SUPPORTS_STATS;
2310 2311
	if (!intel_vgpu_active(engine->i915))
		engine->flags |= I915_ENGINE_HAS_SEMAPHORES;
2312 2313
	if (engine->preempt_context &&
	    HAS_LOGICAL_RING_PREEMPTION(engine->i915))
2314
		engine->flags |= I915_ENGINE_HAS_PREEMPTION;
2315 2316
}

2317 2318 2319 2320 2321 2322 2323
static void execlists_destroy(struct intel_engine_cs *engine)
{
	intel_engine_cleanup_common(engine);
	lrc_destroy_wa_ctx(engine);
	kfree(engine);
}

2324
static void
2325
logical_ring_default_vfuncs(struct intel_engine_cs *engine)
2326 2327
{
	/* Default vfuncs which can be overriden by each engine. */
2328 2329

	engine->destroy = execlists_destroy;
2330
	engine->resume = execlists_resume;
2331 2332 2333 2334

	engine->reset.prepare = execlists_reset_prepare;
	engine->reset.reset = execlists_reset;
	engine->reset.finish = execlists_reset_finish;
2335

2336
	engine->cops = &execlists_context_ops;
2337 2338
	engine->request_alloc = execlists_request_alloc;

2339
	engine->emit_flush = gen8_emit_flush;
2340 2341
	engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb;
	engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb;
2342

2343
	engine->set_default_submission = intel_execlists_set_default_submission;
2344

2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355
	if (INTEL_GEN(engine->i915) < 11) {
		engine->irq_enable = gen8_logical_ring_enable_irq;
		engine->irq_disable = gen8_logical_ring_disable_irq;
	} else {
		/*
		 * TODO: On Gen11 interrupt masks need to be clear
		 * to allow C6 entry. Keep interrupts enabled at
		 * and take the hit of generating extra interrupts
		 * until a more refined solution exists.
		 */
	}
2356 2357 2358 2359
	if (IS_GEN(engine->i915, 8))
		engine->emit_bb_start = gen8_emit_bb_start;
	else
		engine->emit_bb_start = gen9_emit_bb_start;
2360 2361
}

2362
static inline void
2363
logical_ring_default_irqs(struct intel_engine_cs *engine)
2364
{
2365 2366 2367 2368
	unsigned int shift = 0;

	if (INTEL_GEN(engine->i915) < 11) {
		const u8 irq_shifts[] = {
2369 2370 2371 2372 2373
			[RCS0]  = GEN8_RCS_IRQ_SHIFT,
			[BCS0]  = GEN8_BCS_IRQ_SHIFT,
			[VCS0]  = GEN8_VCS0_IRQ_SHIFT,
			[VCS1]  = GEN8_VCS1_IRQ_SHIFT,
			[VECS0] = GEN8_VECS_IRQ_SHIFT,
2374 2375 2376 2377 2378
		};

		shift = irq_shifts[engine->id];
	}

2379 2380
	engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
	engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
2381 2382
}

2383
int intel_execlists_submission_setup(struct intel_engine_cs *engine)
2384 2385 2386 2387
{
	/* Intentionally left blank. */
	engine->buffer = NULL;

2388 2389
	tasklet_init(&engine->execlists.tasklet,
		     execlists_submission_tasklet, (unsigned long)engine);
2390 2391 2392

	logical_ring_default_vfuncs(engine);
	logical_ring_default_irqs(engine);
2393

2394 2395 2396 2397 2398 2399
	if (engine->class == RENDER_CLASS) {
		engine->init_context = gen8_init_rcs_context;
		engine->emit_flush = gen8_emit_flush_render;
		engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_rcs;
	}

2400
	return 0;
2401 2402
}

2403
int intel_execlists_submission_init(struct intel_engine_cs *engine)
2404
{
2405 2406
	struct drm_i915_private *i915 = engine->i915;
	struct intel_engine_execlists * const execlists = &engine->execlists;
2407
	u32 base = engine->mmio_base;
2408 2409
	int ret;

2410
	ret = intel_engine_init_common(engine);
2411
	if (ret)
2412
		return ret;
2413

2414
	intel_engine_init_workarounds(engine);
2415 2416 2417 2418 2419 2420 2421 2422 2423
	intel_engine_init_whitelist(engine);

	if (intel_init_workaround_bb(engine))
		/*
		 * We continue even if we fail to initialize WA batch
		 * because we only expect rare glitches but nothing
		 * critical to prevent us from using GPU
		 */
		DRM_ERROR("WA batch buffer initialization failed\n");
2424

2425
	if (HAS_LOGICAL_RING_ELSQ(i915)) {
2426
		execlists->submit_reg = i915->uncore.regs +
2427
			i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(base));
2428
		execlists->ctrl_reg = i915->uncore.regs +
2429
			i915_mmio_reg_offset(RING_EXECLIST_CONTROL(base));
2430
	} else {
2431
		execlists->submit_reg = i915->uncore.regs +
2432
			i915_mmio_reg_offset(RING_ELSP(base));
2433
	}
2434

2435
	execlists->preempt_complete_status = ~0u;
2436
	if (engine->preempt_context)
2437
		execlists->preempt_complete_status =
2438
			upper_32_bits(engine->preempt_context->lrc_desc);
2439

2440
	execlists->csb_status =
2441
		&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
2442

2443
	execlists->csb_write =
2444
		&engine->status_page.addr[intel_hws_csb_write_index(i915)];
2445

2446 2447 2448 2449
	if (INTEL_GEN(engine->i915) < 11)
		execlists->csb_size = GEN8_CSB_ENTRIES;
	else
		execlists->csb_size = GEN11_CSB_ENTRIES;
2450

2451
	reset_csb_pointers(execlists);
2452

2453 2454 2455
	return 0;
}

2456
static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
2457 2458 2459
{
	u32 indirect_ctx_offset;

2460
	switch (INTEL_GEN(engine->i915)) {
2461
	default:
2462
		MISSING_CASE(INTEL_GEN(engine->i915));
2463
		/* fall through */
2464 2465 2466 2467
	case 11:
		indirect_ctx_offset =
			GEN11_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
		break;
2468 2469 2470 2471
	case 10:
		indirect_ctx_offset =
			GEN10_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
		break;
2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484
	case 9:
		indirect_ctx_offset =
			GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
		break;
	case 8:
		indirect_ctx_offset =
			GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
		break;
	}

	return indirect_ctx_offset;
}

2485
static void execlists_init_reg_state(u32 *regs,
2486
				     struct intel_context *ce,
2487 2488
				     struct intel_engine_cs *engine,
				     struct intel_ring *ring)
2489
{
2490
	struct i915_hw_ppgtt *ppgtt = ce->gem_context->ppgtt;
2491
	bool rcs = engine->class == RENDER_CLASS;
2492
	u32 base = engine->mmio_base;
2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503

	/* A context is actually a big batch buffer with several
	 * MI_LOAD_REGISTER_IMM commands followed by (reg, value) pairs. The
	 * values we are setting here are only for the first context restore:
	 * on a subsequent save, the GPU will recreate this batchbuffer with new
	 * values (including all the missing MI_LOAD_REGISTER_IMM commands that
	 * we are not initializing here).
	 */
	regs[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(rcs ? 14 : 11) |
				 MI_LRI_FORCE_POSTED;

2504
	CTX_REG(regs, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(base),
2505
		_MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) |
2506
		_MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH));
2507
	if (INTEL_GEN(engine->i915) < 11) {
2508 2509 2510 2511
		regs[CTX_CONTEXT_CONTROL + 1] |=
			_MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT |
					    CTX_CTRL_RS_CTX_ENABLE);
	}
2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523
	CTX_REG(regs, CTX_RING_HEAD, RING_HEAD(base), 0);
	CTX_REG(regs, CTX_RING_TAIL, RING_TAIL(base), 0);
	CTX_REG(regs, CTX_RING_BUFFER_START, RING_START(base), 0);
	CTX_REG(regs, CTX_RING_BUFFER_CONTROL, RING_CTL(base),
		RING_CTL_SIZE(ring->size) | RING_VALID);
	CTX_REG(regs, CTX_BB_HEAD_U, RING_BBADDR_UDW(base), 0);
	CTX_REG(regs, CTX_BB_HEAD_L, RING_BBADDR(base), 0);
	CTX_REG(regs, CTX_BB_STATE, RING_BBSTATE(base), RING_BB_PPGTT);
	CTX_REG(regs, CTX_SECOND_BB_HEAD_U, RING_SBBADDR_UDW(base), 0);
	CTX_REG(regs, CTX_SECOND_BB_HEAD_L, RING_SBBADDR(base), 0);
	CTX_REG(regs, CTX_SECOND_BB_STATE, RING_SBBSTATE(base), 0);
	if (rcs) {
2524 2525
		struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;

2526 2527 2528
		CTX_REG(regs, CTX_RCS_INDIRECT_CTX, RING_INDIRECT_CTX(base), 0);
		CTX_REG(regs, CTX_RCS_INDIRECT_CTX_OFFSET,
			RING_INDIRECT_CTX_OFFSET(base), 0);
2529
		if (wa_ctx->indirect_ctx.size) {
2530
			u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
2531

2532
			regs[CTX_RCS_INDIRECT_CTX + 1] =
2533 2534
				(ggtt_offset + wa_ctx->indirect_ctx.offset) |
				(wa_ctx->indirect_ctx.size / CACHELINE_BYTES);
2535

2536
			regs[CTX_RCS_INDIRECT_CTX_OFFSET + 1] =
2537
				intel_lr_indirect_ctx_offset(engine) << 6;
2538 2539 2540 2541 2542
		}

		CTX_REG(regs, CTX_BB_PER_CTX_PTR, RING_BB_PER_CTX_PTR(base), 0);
		if (wa_ctx->per_ctx.size) {
			u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
2543

2544
			regs[CTX_BB_PER_CTX_PTR + 1] =
2545
				(ggtt_offset + wa_ctx->per_ctx.offset) | 0x01;
2546
		}
2547
	}
2548 2549 2550 2551

	regs[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9) | MI_LRI_FORCE_POSTED;

	CTX_REG(regs, CTX_CTX_TIMESTAMP, RING_CTX_TIMESTAMP(base), 0);
2552
	/* PDP values well be assigned later if needed */
2553 2554 2555 2556 2557 2558 2559 2560
	CTX_REG(regs, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(base, 3), 0);
	CTX_REG(regs, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(base, 3), 0);
	CTX_REG(regs, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(base, 2), 0);
	CTX_REG(regs, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(base, 2), 0);
	CTX_REG(regs, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(base, 1), 0);
	CTX_REG(regs, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(base, 1), 0);
	CTX_REG(regs, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(base, 0), 0);
	CTX_REG(regs, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(base, 0), 0);
2561

2562
	if (i915_vm_is_4lvl(&ppgtt->vm)) {
2563 2564 2565 2566
		/* 64b PPGTT (48bit canonical)
		 * PDP0_DESCRIPTOR contains the base address to PML4 and
		 * other PDP Descriptors are ignored.
		 */
2567
		ASSIGN_CTX_PML4(ppgtt, regs);
2568
	} else {
2569 2570 2571 2572
		ASSIGN_CTX_PDP(ppgtt, regs, 3);
		ASSIGN_CTX_PDP(ppgtt, regs, 2);
		ASSIGN_CTX_PDP(ppgtt, regs, 1);
		ASSIGN_CTX_PDP(ppgtt, regs, 0);
2573 2574
	}

2575 2576
	if (rcs) {
		regs[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
2577
		CTX_REG(regs, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE, 0);
2578

2579
		i915_oa_init_reg_state(engine, ce, regs);
2580
	}
2581 2582

	regs[CTX_END] = MI_BATCH_BUFFER_END;
2583
	if (INTEL_GEN(engine->i915) >= 10)
2584
		regs[CTX_END] |= BIT(0);
2585 2586 2587
}

static int
2588
populate_lr_context(struct intel_context *ce,
2589 2590 2591 2592 2593
		    struct drm_i915_gem_object *ctx_obj,
		    struct intel_engine_cs *engine,
		    struct intel_ring *ring)
{
	void *vaddr;
2594
	u32 *regs;
2595 2596 2597 2598 2599 2600 2601 2602 2603
	int ret;

	vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB);
	if (IS_ERR(vaddr)) {
		ret = PTR_ERR(vaddr);
		DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
		return ret;
	}

2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614
	if (engine->default_state) {
		/*
		 * We only want to copy over the template context state;
		 * skipping over the headers reserved for GuC communication,
		 * leaving those as zero.
		 */
		const unsigned long start = LRC_HEADER_PAGES * PAGE_SIZE;
		void *defaults;

		defaults = i915_gem_object_pin_map(engine->default_state,
						   I915_MAP_WB);
2615 2616 2617 2618
		if (IS_ERR(defaults)) {
			ret = PTR_ERR(defaults);
			goto err_unpin_ctx;
		}
2619 2620 2621 2622 2623

		memcpy(vaddr + start, defaults + start, engine->context_size);
		i915_gem_object_unpin_map(engine->default_state);
	}

2624 2625
	/* The second page of the context object contains some fields which must
	 * be set up prior to the first execution. */
2626
	regs = vaddr + LRC_STATE_PN * PAGE_SIZE;
2627
	execlists_init_reg_state(regs, ce, engine, ring);
2628 2629 2630
	if (!engine->default_state)
		regs[CTX_CONTEXT_CONTROL + 1] |=
			_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
2631 2632
	if (ce->gem_context == engine->i915->preempt_context &&
	    INTEL_GEN(engine->i915) < 11)
2633 2634 2635
		regs[CTX_CONTEXT_CONTROL + 1] |=
			_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
					   CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT);
2636

2637
	ret = 0;
2638
err_unpin_ctx:
2639 2640 2641
	__i915_gem_object_flush_map(ctx_obj,
				    LRC_HEADER_PAGES * PAGE_SIZE,
				    engine->context_size);
2642
	i915_gem_object_unpin_map(ctx_obj);
2643
	return ret;
2644 2645
}

2646 2647
static struct i915_timeline *get_timeline(struct i915_gem_context *ctx)
{
2648 2649 2650 2651
	if (ctx->timeline)
		return i915_timeline_get(ctx->timeline);
	else
		return i915_timeline_create(ctx->i915, NULL);
2652 2653 2654 2655
}

static int execlists_context_deferred_alloc(struct intel_context *ce,
					    struct intel_engine_cs *engine)
2656
{
2657
	struct drm_i915_gem_object *ctx_obj;
2658
	struct i915_vma *vma;
2659
	u32 context_size;
2660
	struct intel_ring *ring;
2661
	struct i915_timeline *timeline;
2662 2663
	int ret;

2664 2665
	if (ce->state)
		return 0;
2666

2667
	context_size = round_up(engine->context_size, I915_GTT_PAGE_SIZE);
2668

2669 2670 2671 2672 2673
	/*
	 * Before the actual start of the context image, we insert a few pages
	 * for our own use and for sharing with the GuC.
	 */
	context_size += LRC_HEADER_PAGES * PAGE_SIZE;
2674

2675
	ctx_obj = i915_gem_object_create(engine->i915, context_size);
2676 2677
	if (IS_ERR(ctx_obj))
		return PTR_ERR(ctx_obj);
2678

2679
	vma = i915_vma_instance(ctx_obj, &engine->i915->ggtt.vm, NULL);
2680 2681 2682 2683 2684
	if (IS_ERR(vma)) {
		ret = PTR_ERR(vma);
		goto error_deref_obj;
	}

2685
	timeline = get_timeline(ce->gem_context);
2686 2687 2688 2689 2690
	if (IS_ERR(timeline)) {
		ret = PTR_ERR(timeline);
		goto error_deref_obj;
	}

2691 2692 2693
	ring = intel_engine_create_ring(engine,
					timeline,
					ce->gem_context->ring_size);
2694
	i915_timeline_put(timeline);
2695 2696
	if (IS_ERR(ring)) {
		ret = PTR_ERR(ring);
2697
		goto error_deref_obj;
2698 2699
	}

2700
	ret = populate_lr_context(ce, ctx_obj, engine, ring);
2701 2702
	if (ret) {
		DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
2703
		goto error_ring_free;
2704 2705
	}

2706
	ce->ring = ring;
2707
	ce->state = vma;
2708 2709

	return 0;
2710

2711
error_ring_free:
2712
	intel_ring_put(ring);
2713
error_deref_obj:
2714
	i915_gem_object_put(ctx_obj);
2715
	return ret;
2716
}
2717

2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751
void intel_execlists_show_requests(struct intel_engine_cs *engine,
				   struct drm_printer *m,
				   void (*show_request)(struct drm_printer *m,
							struct i915_request *rq,
							const char *prefix),
				   unsigned int max)
{
	const struct intel_engine_execlists *execlists = &engine->execlists;
	struct i915_request *rq, *last;
	unsigned long flags;
	unsigned int count;
	struct rb_node *rb;

	spin_lock_irqsave(&engine->timeline.lock, flags);

	last = NULL;
	count = 0;
	list_for_each_entry(rq, &engine->timeline.requests, link) {
		if (count++ < max - 1)
			show_request(m, rq, "\t\tE ");
		else
			last = rq;
	}
	if (last) {
		if (count > max) {
			drm_printf(m,
				   "\t\t...skipping %d executing requests...\n",
				   count - max);
		}
		show_request(m, last, "\t\tE ");
	}

	last = NULL;
	count = 0;
2752 2753 2754
	if (execlists->queue_priority_hint != INT_MIN)
		drm_printf(m, "\t\tQueue priority hint: %d\n",
			   execlists->queue_priority_hint);
2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777
	for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
		struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
		int i;

		priolist_for_each_request(rq, p, i) {
			if (count++ < max - 1)
				show_request(m, rq, "\t\tQ ");
			else
				last = rq;
		}
	}
	if (last) {
		if (count > max) {
			drm_printf(m,
				   "\t\t...skipping %d queued requests...\n",
				   count - max);
		}
		show_request(m, last, "\t\tQ ");
	}

	spin_unlock_irqrestore(&engine->timeline.lock, flags);
}

2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808
void intel_lr_context_reset(struct intel_engine_cs *engine,
			    struct intel_context *ce,
			    u32 head,
			    bool scrub)
{
	/*
	 * We want a simple context + ring to execute the breadcrumb update.
	 * We cannot rely on the context being intact across the GPU hang,
	 * so clear it and rebuild just what we need for the breadcrumb.
	 * All pending requests for this context will be zapped, and any
	 * future request will be after userspace has had the opportunity
	 * to recreate its own state.
	 */
	if (scrub) {
		u32 *regs = ce->lrc_reg_state;

		if (engine->pinned_default_state) {
			memcpy(regs, /* skip restoring the vanilla PPHWSP */
			       engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE,
			       engine->context_size - PAGE_SIZE);
		}
		execlists_init_reg_state(regs, ce, engine, ce->ring);
	}

	/* Rerun the request; its payload has been neutered (if guilty). */
	ce->ring->head = head;
	intel_ring_update_space(ce->ring);

	__execlists_update_reg_state(ce, engine);
}

2809
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2810
#include "selftest_lrc.c"
2811
#endif