intel_lrc.c 66.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
/*
 * Copyright © 2014 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Ben Widawsky <ben@bwidawsk.net>
 *    Michel Thierry <michel.thierry@intel.com>
 *    Thomas Daniel <thomas.daniel@intel.com>
 *    Oscar Mateo <oscar.mateo@intel.com>
 *
 */

31 32 33 34
/**
 * DOC: Logical Rings, Logical Ring Contexts and Execlists
 *
 * Motivation:
35 36 37 38
 * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts".
 * These expanded contexts enable a number of new abilities, especially
 * "Execlists" (also implemented in this file).
 *
39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89
 * One of the main differences with the legacy HW contexts is that logical
 * ring contexts incorporate many more things to the context's state, like
 * PDPs or ringbuffer control registers:
 *
 * The reason why PDPs are included in the context is straightforward: as
 * PPGTTs (per-process GTTs) are actually per-context, having the PDPs
 * contained there mean you don't need to do a ppgtt->switch_mm yourself,
 * instead, the GPU will do it for you on the context switch.
 *
 * But, what about the ringbuffer control registers (head, tail, etc..)?
 * shouldn't we just need a set of those per engine command streamer? This is
 * where the name "Logical Rings" starts to make sense: by virtualizing the
 * rings, the engine cs shifts to a new "ring buffer" with every context
 * switch. When you want to submit a workload to the GPU you: A) choose your
 * context, B) find its appropriate virtualized ring, C) write commands to it
 * and then, finally, D) tell the GPU to switch to that context.
 *
 * Instead of the legacy MI_SET_CONTEXT, the way you tell the GPU to switch
 * to a contexts is via a context execution list, ergo "Execlists".
 *
 * LRC implementation:
 * Regarding the creation of contexts, we have:
 *
 * - One global default context.
 * - One local default context for each opened fd.
 * - One local extra context for each context create ioctl call.
 *
 * Now that ringbuffers belong per-context (and not per-engine, like before)
 * and that contexts are uniquely tied to a given engine (and not reusable,
 * like before) we need:
 *
 * - One ringbuffer per-engine inside each context.
 * - One backing object per-engine inside each context.
 *
 * The global default context starts its life with these new objects fully
 * allocated and populated. The local default context for each opened fd is
 * more complex, because we don't know at creation time which engine is going
 * to use them. To handle this, we have implemented a deferred creation of LR
 * contexts:
 *
 * The local context starts its life as a hollow or blank holder, that only
 * gets populated for a given engine once we receive an execbuffer. If later
 * on we receive another execbuffer ioctl for the same context but a different
 * engine, we allocate/populate a new ringbuffer and context backing object and
 * so on.
 *
 * Finally, regarding local contexts created using the ioctl call: as they are
 * only allowed with the render ring, we can allocate & populate them right
 * away (no need to defer anything, at least for now).
 *
 * Execlists implementation:
90 91
 * Execlists are the new method by which, on gen8+ hardware, workloads are
 * submitted for execution (as opposed to the legacy, ringbuffer-based, method).
92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132
 * This method works as follows:
 *
 * When a request is committed, its commands (the BB start and any leading or
 * trailing commands, like the seqno breadcrumbs) are placed in the ringbuffer
 * for the appropriate context. The tail pointer in the hardware context is not
 * updated at this time, but instead, kept by the driver in the ringbuffer
 * structure. A structure representing this request is added to a request queue
 * for the appropriate engine: this structure contains a copy of the context's
 * tail after the request was written to the ring buffer and a pointer to the
 * context itself.
 *
 * If the engine's request queue was empty before the request was added, the
 * queue is processed immediately. Otherwise the queue will be processed during
 * a context switch interrupt. In any case, elements on the queue will get sent
 * (in pairs) to the GPU's ExecLists Submit Port (ELSP, for short) with a
 * globally unique 20-bits submission ID.
 *
 * When execution of a request completes, the GPU updates the context status
 * buffer with a context complete event and generates a context switch interrupt.
 * During the interrupt handling, the driver examines the events in the buffer:
 * for each context complete event, if the announced ID matches that on the head
 * of the request queue, then that request is retired and removed from the queue.
 *
 * After processing, if any requests were retired and the queue is not empty
 * then a new execution list can be submitted. The two requests at the front of
 * the queue are next to be submitted but since a context may not occur twice in
 * an execution list, if subsequent requests have the same ID as the first then
 * the two requests must be combined. This is done simply by discarding requests
 * at the head of the queue until either only one requests is left (in which case
 * we use a NULL second context) or the first two requests have unique IDs.
 *
 * By always executing the first two requests in the queue the driver ensures
 * that the GPU is kept as busy as possible. In the case where a single context
 * completes but a second context is still executing, the request for this second
 * context will be at the head of the queue when we remove the first one. This
 * request will then be resubmitted along with a new request for a different context,
 * which will cause the hardware to continue executing the second request and queue
 * the new request (the GPU detects the condition of a context getting preempted
 * with the same context and optimizes the context switch flow by not doing
 * preemption, but just sampling the new tail pointer).
 *
133
 */
134
#include <linux/interrupt.h>
135 136 137 138

#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
139
#include "intel_mocs.h"
140

141
#define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
142 143 144
#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
#define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE)

145 146 147 148 149 150 151 152 153 154 155 156 157
#define RING_EXECLIST_QFULL		(1 << 0x2)
#define RING_EXECLIST1_VALID		(1 << 0x3)
#define RING_EXECLIST0_VALID		(1 << 0x4)
#define RING_EXECLIST_ACTIVE_STATUS	(3 << 0xE)
#define RING_EXECLIST1_ACTIVE		(1 << 0x11)
#define RING_EXECLIST0_ACTIVE		(1 << 0x12)

#define GEN8_CTX_STATUS_IDLE_ACTIVE	(1 << 0)
#define GEN8_CTX_STATUS_PREEMPTED	(1 << 1)
#define GEN8_CTX_STATUS_ELEMENT_SWITCH	(1 << 2)
#define GEN8_CTX_STATUS_ACTIVE_IDLE	(1 << 3)
#define GEN8_CTX_STATUS_COMPLETE	(1 << 4)
#define GEN8_CTX_STATUS_LITE_RESTORE	(1 << 15)
158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187

#define CTX_LRI_HEADER_0		0x01
#define CTX_CONTEXT_CONTROL		0x02
#define CTX_RING_HEAD			0x04
#define CTX_RING_TAIL			0x06
#define CTX_RING_BUFFER_START		0x08
#define CTX_RING_BUFFER_CONTROL		0x0a
#define CTX_BB_HEAD_U			0x0c
#define CTX_BB_HEAD_L			0x0e
#define CTX_BB_STATE			0x10
#define CTX_SECOND_BB_HEAD_U		0x12
#define CTX_SECOND_BB_HEAD_L		0x14
#define CTX_SECOND_BB_STATE		0x16
#define CTX_BB_PER_CTX_PTR		0x18
#define CTX_RCS_INDIRECT_CTX		0x1a
#define CTX_RCS_INDIRECT_CTX_OFFSET	0x1c
#define CTX_LRI_HEADER_1		0x21
#define CTX_CTX_TIMESTAMP		0x22
#define CTX_PDP3_UDW			0x24
#define CTX_PDP3_LDW			0x26
#define CTX_PDP2_UDW			0x28
#define CTX_PDP2_LDW			0x2a
#define CTX_PDP1_UDW			0x2c
#define CTX_PDP1_LDW			0x2e
#define CTX_PDP0_UDW			0x30
#define CTX_PDP0_LDW			0x32
#define CTX_LRI_HEADER_2		0x41
#define CTX_R_PWR_CLK_STATE		0x42
#define CTX_GPGPU_CSR_BASE_ADDRESS	0x44

188 189 190 191 192
#define GEN8_CTX_VALID (1<<0)
#define GEN8_CTX_FORCE_PD_RESTORE (1<<1)
#define GEN8_CTX_FORCE_RESTORE (1<<2)
#define GEN8_CTX_L3LLC_COHERENT (1<<5)
#define GEN8_CTX_PRIVILEGE (1<<8)
193

194
#define ASSIGN_CTX_REG(reg_state, pos, reg, val) do { \
195
	(reg_state)[(pos)+0] = i915_mmio_reg_offset(reg); \
196 197 198 199
	(reg_state)[(pos)+1] = (val); \
} while (0)

#define ASSIGN_CTX_PDP(ppgtt, reg_state, n) do {		\
200
	const u64 _addr = i915_page_dir_dma_addr((ppgtt), (n));	\
201 202
	reg_state[CTX_PDP ## n ## _UDW+1] = upper_32_bits(_addr); \
	reg_state[CTX_PDP ## n ## _LDW+1] = lower_32_bits(_addr); \
203
} while (0)
204

205
#define ASSIGN_CTX_PML4(ppgtt, reg_state) do { \
206 207
	reg_state[CTX_PDP0_UDW + 1] = upper_32_bits(px_dma(&ppgtt->pml4)); \
	reg_state[CTX_PDP0_LDW + 1] = lower_32_bits(px_dma(&ppgtt->pml4)); \
208
} while (0)
209

210 211 212 213 214 215 216
enum {
	FAULT_AND_HANG = 0,
	FAULT_AND_HALT, /* Debug only */
	FAULT_AND_STREAM,
	FAULT_AND_CONTINUE /* Unsupported */
};
#define GEN8_CTX_ID_SHIFT 32
217
#define GEN8_CTX_ID_WIDTH 21
218 219
#define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT	0x17
#define GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT	0x26
220

221 222 223
/* Typical size of the average request (2 pipecontrols and a MI_BB) */
#define EXECLISTS_REQUEST_SIZE 64 /* bytes */

224
static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
225
					    struct intel_engine_cs *engine);
226
static int intel_lr_context_pin(struct i915_gem_context *ctx,
227
				struct intel_engine_cs *engine);
228

229 230
/**
 * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
231
 * @dev_priv: i915 device private
232 233 234
 * @enable_execlists: value of i915.enable_execlists module parameter.
 *
 * Only certain platforms support Execlists (the prerequisites being
235
 * support for Logical Ring Contexts and Aliasing PPGTT or better).
236 237 238
 *
 * Return: 1 if Execlists is supported and has to be enabled.
 */
239
int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv, int enable_execlists)
240
{
241 242 243
	/* On platforms with execlist available, vGPU will only
	 * support execlist mode, no ring buffer mode.
	 */
244
	if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) && intel_vgpu_active(dev_priv))
245 246
		return 1;

247
	if (INTEL_GEN(dev_priv) >= 9)
248 249
		return 1;

250 251 252
	if (enable_execlists == 0)
		return 0;

253 254 255
	if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) &&
	    USES_PPGTT(dev_priv) &&
	    i915.use_mmio_flip >= 0)
256 257 258 259
		return 1;

	return 0;
}
260

261
static void
262
logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
263
{
264
	struct drm_i915_private *dev_priv = engine->i915;
265

266
	if (IS_GEN8(dev_priv) || IS_GEN9(dev_priv))
267
		engine->idle_lite_restore_wa = ~0;
268

269 270
	engine->disable_lite_restore_wa = (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
					IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) &&
271
					(engine->id == VCS || engine->id == VCS2);
272

273
	engine->ctx_desc_template = GEN8_CTX_VALID;
274
	if (IS_GEN8(dev_priv))
275 276
		engine->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT;
	engine->ctx_desc_template |= GEN8_CTX_PRIVILEGE;
277 278 279 280 281 282 283

	/* TODO: WaDisableLiteRestore when we start using semaphore
	 * signalling between Command Streamers */
	/* ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE; */

	/* WaEnableForceRestoreInCtxtDescForVCS:skl */
	/* WaEnableForceRestoreInCtxtDescForVCS:bxt */
284 285
	if (engine->disable_lite_restore_wa)
		engine->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
286 287
}

288
/**
289 290 291
 * intel_lr_context_descriptor_update() - calculate & cache the descriptor
 * 					  descriptor for a pinned context
 * @ctx: Context to work on
292
 * @engine: Engine the descriptor will be used with
293
 *
294 295 296 297 298
 * The context descriptor encodes various attributes of a context,
 * including its GTT address and some flags. Because it's fairly
 * expensive to calculate, we'll just do it once and cache the result,
 * which remains valid until the context is unpinned.
 *
299 300 301 302 303 304 305
 * This is what a descriptor looks like, from LSB to MSB::
 *
 *      bits  0-11:    flags, GEN8_CTX_* (cached in ctx_desc_template)
 *      bits 12-31:    LRCA, GTT address of (the HWSP of) this context
 *      bits 32-52:    ctx ID, a globally unique tag
 *      bits 53-54:    mbz, reserved for use by hardware
 *      bits 55-63:    group ID, currently unused and set to 0
306
 */
307
static void
308
intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
309
				   struct intel_engine_cs *engine)
310
{
311
	struct intel_context *ce = &ctx->engine[engine->id];
312
	u64 desc;
313

314
	BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (1<<GEN8_CTX_ID_WIDTH));
315

316 317
	desc = ctx->desc_template;				/* bits  3-4  */
	desc |= engine->ctx_desc_template;			/* bits  0-11 */
318 319
	desc |= ce->lrc_vma->node.start + LRC_PPHWSP_PN * PAGE_SIZE;
								/* bits 12-31 */
320
	desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT;		/* bits 32-52 */
321

322
	ce->lrc_desc = desc;
323 324
}

325
uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
326
				     struct intel_engine_cs *engine)
327
{
328
	return ctx->engine[engine->id].lrc_desc;
329
}
330

331 332
static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
				 struct drm_i915_gem_request *rq1)
333
{
334

335
	struct intel_engine_cs *engine = rq0->engine;
336
	struct drm_i915_private *dev_priv = rq0->i915;
337
	uint64_t desc[2];
338

339
	if (rq1) {
340
		desc[1] = intel_lr_context_descriptor(rq1->ctx, rq1->engine);
341 342 343 344
		rq1->elsp_submitted++;
	} else {
		desc[1] = 0;
	}
345

346
	desc[0] = intel_lr_context_descriptor(rq0->ctx, rq0->engine);
347
	rq0->elsp_submitted++;
348

349
	/* You must always write both descriptors in the order below. */
350 351
	I915_WRITE_FW(RING_ELSP(engine), upper_32_bits(desc[1]));
	I915_WRITE_FW(RING_ELSP(engine), lower_32_bits(desc[1]));
352

353
	I915_WRITE_FW(RING_ELSP(engine), upper_32_bits(desc[0]));
354
	/* The context is automatically loaded after the following */
355
	I915_WRITE_FW(RING_ELSP(engine), lower_32_bits(desc[0]));
356

357
	/* ELSP is a wo register, use another nearby reg for posting */
358
	POSTING_READ_FW(RING_EXECLIST_STATUS_LO(engine));
359 360
}

361 362 363 364 365 366 367 368 369 370
static void
execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
{
	ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
	ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
	ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
	ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
}

static void execlists_update_context(struct drm_i915_gem_request *rq)
371
{
372
	struct intel_engine_cs *engine = rq->engine;
373
	struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
374
	uint32_t *reg_state = rq->ctx->engine[engine->id].lrc_reg_state;
375

376
	reg_state[CTX_RING_TAIL+1] = intel_ring_offset(rq->ring, rq->tail);
377

378 379 380 381 382 383 384
	/* True 32b PPGTT with dynamic page allocation: update PDP
	 * registers and point the unallocated PDPs to scratch page.
	 * PML4 is allocated during ppgtt init, so this is not needed
	 * in 48-bit mode.
	 */
	if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
		execlists_update_context_pdps(ppgtt, reg_state);
385 386
}

387 388
static void execlists_elsp_submit_contexts(struct drm_i915_gem_request *rq0,
					   struct drm_i915_gem_request *rq1)
389
{
390
	struct drm_i915_private *dev_priv = rq0->i915;
391
	unsigned int fw_domains = rq0->engine->fw_domains;
392

393
	execlists_update_context(rq0);
394

395
	if (rq1)
396
		execlists_update_context(rq1);
397

398
	spin_lock_irq(&dev_priv->uncore.lock);
399
	intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
400

401
	execlists_elsp_write(rq0, rq1);
402

403
	intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
404
	spin_unlock_irq(&dev_priv->uncore.lock);
405 406
}

407 408 409 410 411 412 413 414 415 416 417 418 419 420
static inline void execlists_context_status_change(
		struct drm_i915_gem_request *rq,
		unsigned long status)
{
	/*
	 * Only used when GVT-g is enabled now. When GVT-g is disabled,
	 * The compiler should eliminate this function as dead-code.
	 */
	if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
		return;

	atomic_notifier_call_chain(&rq->ctx->status_notifier, status, rq);
}

421
static void execlists_unqueue(struct intel_engine_cs *engine)
422
{
423
	struct drm_i915_gem_request *req0 = NULL, *req1 = NULL;
424
	struct drm_i915_gem_request *cursor, *tmp;
425

426
	assert_spin_locked(&engine->execlist_lock);
427

428 429 430 431
	/*
	 * If irqs are not active generate a warning as batches that finish
	 * without the irqs may get lost and a GPU Hang may occur.
	 */
432
	WARN_ON(!intel_irqs_enabled(engine->i915));
433

434
	/* Try to read in pairs */
435
	list_for_each_entry_safe(cursor, tmp, &engine->execlist_queue,
436 437 438
				 execlist_link) {
		if (!req0) {
			req0 = cursor;
439
		} else if (req0->ctx == cursor->ctx) {
440 441
			/* Same ctx: ignore first request, as second request
			 * will update tail past first request's workload */
442
			cursor->elsp_submitted = req0->elsp_submitted;
443
			list_del(&req0->execlist_link);
444
			i915_gem_request_put(req0);
445 446
			req0 = cursor;
		} else {
447 448 449 450 451 452 453 454 455 456 457 458 459 460
			if (IS_ENABLED(CONFIG_DRM_I915_GVT)) {
				/*
				 * req0 (after merged) ctx requires single
				 * submission, stop picking
				 */
				if (req0->ctx->execlists_force_single_submission)
					break;
				/*
				 * req0 ctx doesn't require single submission,
				 * but next req ctx requires, stop picking
				 */
				if (cursor->ctx->execlists_force_single_submission)
					break;
			}
461
			req1 = cursor;
462
			WARN_ON(req1->elsp_submitted);
463 464 465 466
			break;
		}
	}

467 468 469
	if (unlikely(!req0))
		return;

470 471 472 473 474 475
	execlists_context_status_change(req0, INTEL_CONTEXT_SCHEDULE_IN);

	if (req1)
		execlists_context_status_change(req1,
						INTEL_CONTEXT_SCHEDULE_IN);

476
	if (req0->elsp_submitted & engine->idle_lite_restore_wa) {
477
		/*
478 479 480 481 482 483
		 * WaIdleLiteRestore: make sure we never cause a lite restore
		 * with HEAD==TAIL.
		 *
		 * Apply the wa NOOPS to prevent ring:HEAD == req:TAIL as we
		 * resubmit the request. See gen8_emit_request() for where we
		 * prepare the padding after the end of the request.
484
		 */
485
		req0->tail += 8;
486
		req0->tail &= req0->ring->size - 1;
487 488
	}

489
	execlists_elsp_submit_contexts(req0, req1);
490 491
}

492
static unsigned int
493
execlists_check_remove_request(struct intel_engine_cs *engine, u32 ctx_id)
494
{
495
	struct drm_i915_gem_request *head_req;
496

497
	assert_spin_locked(&engine->execlist_lock);
498

499
	head_req = list_first_entry_or_null(&engine->execlist_queue,
500
					    struct drm_i915_gem_request,
501 502
					    execlist_link);

503 504
	if (WARN_ON(!head_req || (head_req->ctx_hw_id != ctx_id)))
               return 0;
505 506 507 508 509 510

	WARN(head_req->elsp_submitted == 0, "Never submitted head request\n");

	if (--head_req->elsp_submitted > 0)
		return 0;

511 512
	execlists_context_status_change(head_req, INTEL_CONTEXT_SCHEDULE_OUT);

513
	list_del(&head_req->execlist_link);
514
	i915_gem_request_put(head_req);
515

516
	return 1;
517 518
}

519
static u32
520
get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer,
521
		   u32 *context_id)
B
Ben Widawsky 已提交
522
{
523
	struct drm_i915_private *dev_priv = engine->i915;
524
	u32 status;
B
Ben Widawsky 已提交
525

526 527
	read_pointer %= GEN8_CSB_ENTRIES;

528
	status = I915_READ_FW(RING_CONTEXT_STATUS_BUF_LO(engine, read_pointer));
529 530 531

	if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
		return 0;
B
Ben Widawsky 已提交
532

533
	*context_id = I915_READ_FW(RING_CONTEXT_STATUS_BUF_HI(engine,
534 535 536
							      read_pointer));

	return status;
B
Ben Widawsky 已提交
537 538
}

539
/*
540 541 542
 * Check the unread Context Status Buffers and manage the submission of new
 * contexts to the ELSP accordingly.
 */
543
static void intel_lrc_irq_handler(unsigned long data)
544
{
545
	struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
546
	struct drm_i915_private *dev_priv = engine->i915;
547
	u32 status_pointer;
548
	unsigned int read_pointer, write_pointer;
549 550
	u32 csb[GEN8_CSB_ENTRIES][2];
	unsigned int csb_read = 0, i;
551 552
	unsigned int submit_contexts = 0;

553
	intel_uncore_forcewake_get(dev_priv, engine->fw_domains);
554

555
	status_pointer = I915_READ_FW(RING_CONTEXT_STATUS_PTR(engine));
556

557
	read_pointer = engine->next_context_status_buffer;
558
	write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
559
	if (read_pointer > write_pointer)
560
		write_pointer += GEN8_CSB_ENTRIES;
561 562

	while (read_pointer < write_pointer) {
563 564 565 566 567 568
		if (WARN_ON_ONCE(csb_read == GEN8_CSB_ENTRIES))
			break;
		csb[csb_read][0] = get_context_status(engine, ++read_pointer,
						      &csb[csb_read][1]);
		csb_read++;
	}
B
Ben Widawsky 已提交
569

570 571 572 573 574 575 576 577
	engine->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES;

	/* Update the read pointer to the old write pointer. Manual ringbuffer
	 * management ftw </sarcasm> */
	I915_WRITE_FW(RING_CONTEXT_STATUS_PTR(engine),
		      _MASKED_FIELD(GEN8_CSB_READ_PTR_MASK,
				    engine->next_context_status_buffer << 8));

578
	intel_uncore_forcewake_put(dev_priv, engine->fw_domains);
579 580 581 582 583 584 585

	spin_lock(&engine->execlist_lock);

	for (i = 0; i < csb_read; i++) {
		if (unlikely(csb[i][0] & GEN8_CTX_STATUS_PREEMPTED)) {
			if (csb[i][0] & GEN8_CTX_STATUS_LITE_RESTORE) {
				if (execlists_check_remove_request(engine, csb[i][1]))
586 587 588 589 590
					WARN(1, "Lite Restored request removed from queue\n");
			} else
				WARN(1, "Preemption without Lite Restore\n");
		}

591
		if (csb[i][0] & (GEN8_CTX_STATUS_ACTIVE_IDLE |
592 593
		    GEN8_CTX_STATUS_ELEMENT_SWITCH))
			submit_contexts +=
594
				execlists_check_remove_request(engine, csb[i][1]);
595 596
	}

597
	if (submit_contexts) {
598
		if (!engine->disable_lite_restore_wa ||
599
		    (csb[i][0] & GEN8_CTX_STATUS_ACTIVE_IDLE))
600
			execlists_unqueue(engine);
601
	}
602

603
	spin_unlock(&engine->execlist_lock);
604 605 606

	if (unlikely(submit_contexts > 2))
		DRM_ERROR("More than two context complete events?\n");
607 608
}

609
static void execlists_submit_request(struct drm_i915_gem_request *request)
610
{
611
	struct intel_engine_cs *engine = request->engine;
612
	struct drm_i915_gem_request *cursor;
613
	int num_elements = 0;
614

615
	spin_lock_bh(&engine->execlist_lock);
616

617
	list_for_each_entry(cursor, &engine->execlist_queue, execlist_link)
618 619 620 621
		if (++num_elements > 2)
			break;

	if (num_elements > 2) {
622
		struct drm_i915_gem_request *tail_req;
623

624
		tail_req = list_last_entry(&engine->execlist_queue,
625
					   struct drm_i915_gem_request,
626 627
					   execlist_link);

628
		if (request->ctx == tail_req->ctx) {
629
			WARN(tail_req->elsp_submitted != 0,
630
				"More than 2 already-submitted reqs queued\n");
631
			list_del(&tail_req->execlist_link);
632
			i915_gem_request_put(tail_req);
633 634 635
		}
	}

636
	i915_gem_request_get(request);
637
	list_add_tail(&request->execlist_link, &engine->execlist_queue);
638
	request->ctx_hw_id = request->ctx->hw_id;
639
	if (num_elements == 0)
640
		execlists_unqueue(engine);
641

642
	spin_unlock_bh(&engine->execlist_lock);
643 644
}

645
int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
646
{
647
	struct intel_engine_cs *engine = request->engine;
648
	struct intel_context *ce = &request->ctx->engine[engine->id];
649
	int ret;
650

651 652 653 654
	/* Flush enough space to reduce the likelihood of waiting after
	 * we start building the request - in which case we will just
	 * have to repeat work.
	 */
655
	request->reserved_space += EXECLISTS_REQUEST_SIZE;
656

657
	if (!ce->state) {
658 659 660 661 662
		ret = execlists_context_deferred_alloc(request->ctx, engine);
		if (ret)
			return ret;
	}

663
	request->ring = ce->ring;
664

665 666 667 668 669 670
	if (i915.enable_guc_submission) {
		/*
		 * Check that the GuC has space for the request before
		 * going any further, as the i915_add_request() call
		 * later on mustn't fail ...
		 */
671
		ret = i915_guc_wq_check_space(request);
672 673 674 675
		if (ret)
			return ret;
	}

676 677 678
	ret = intel_lr_context_pin(request->ctx, engine);
	if (ret)
		return ret;
D
Dave Gordon 已提交
679

680 681 682 683
	ret = intel_ring_begin(request, 0);
	if (ret)
		goto err_unpin;

684
	if (!ce->initialised) {
685 686 687 688
		ret = engine->init_context(request);
		if (ret)
			goto err_unpin;

689
		ce->initialised = true;
690 691 692 693 694 695 696 697 698
	}

	/* Note that after this point, we have committed to using
	 * this request as it is being used to both track the
	 * state of engine initialisation and liveness of the
	 * golden renderstate above. Think twice before you try
	 * to cancel/unwind this request now.
	 */

699
	request->reserved_space -= EXECLISTS_REQUEST_SIZE;
700 701 702
	return 0;

err_unpin:
703
	intel_lr_context_unpin(request->ctx, engine);
D
Dave Gordon 已提交
704
	return ret;
705 706 707
}

/*
708
 * intel_logical_ring_advance() - advance the tail and prepare for submission
709
 * @request: Request to advance the logical ringbuffer of.
710 711 712 713 714 715
 *
 * The tail is updated in our logical ringbuffer struct, not in the actual context. What
 * really happens during submission is that the context and current tail will be placed
 * on a queue waiting for the ELSP to be ready to accept a new context submission. At that
 * point, the tail *inside* the context is updated and the ELSP written to.
 */
716
static int
717
intel_logical_ring_advance(struct drm_i915_gem_request *request)
718
{
719
	struct intel_ring *ring = request->ring;
720
	struct intel_engine_cs *engine = request->engine;
721

722 723
	intel_ring_advance(ring);
	request->tail = ring->tail;
724

725 726 727 728 729 730
	/*
	 * Here we add two extra NOOPs as padding to avoid
	 * lite restore of a context with HEAD==TAIL.
	 *
	 * Caller must reserve WA_TAIL_DWORDS for us!
	 */
731 732 733
	intel_ring_emit(ring, MI_NOOP);
	intel_ring_emit(ring, MI_NOOP);
	intel_ring_advance(ring);
734

735 736 737 738 739 740 741 742
	/* We keep the previous context alive until we retire the following
	 * request. This ensures that any the context object is still pinned
	 * for any residual writes the HW makes into it on the context switch
	 * into the next object following the breadcrumb. Otherwise, we may
	 * retire the context too early.
	 */
	request->previous_context = engine->last_context;
	engine->last_context = request->ctx;
743
	return 0;
744 745
}

746
void intel_execlists_cancel_requests(struct intel_engine_cs *engine)
747
{
748
	struct drm_i915_gem_request *req, *tmp;
749
	LIST_HEAD(cancel_list);
750

751
	WARN_ON(!mutex_is_locked(&engine->i915->drm.struct_mutex));
752

753
	spin_lock_bh(&engine->execlist_lock);
754
	list_replace_init(&engine->execlist_queue, &cancel_list);
755
	spin_unlock_bh(&engine->execlist_lock);
756

757
	list_for_each_entry_safe(req, tmp, &cancel_list, execlist_link) {
758
		list_del(&req->execlist_link);
759
		i915_gem_request_put(req);
760 761 762
	}
}

763
void intel_logical_ring_stop(struct intel_engine_cs *engine)
764
{
765
	struct drm_i915_private *dev_priv = engine->i915;
766 767
	int ret;

768
	if (!intel_engine_initialized(engine))
769 770
		return;

771
	ret = intel_engine_idle(engine);
772
	if (ret)
773
		DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
774
			  engine->name, ret);
775 776

	/* TODO: Is this correct with Execlists enabled? */
777
	I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
778 779 780 781
	if (intel_wait_for_register(dev_priv,
				    RING_MI_MODE(engine->mmio_base),
				    MODE_IDLE, MODE_IDLE,
				    1000)) {
782
		DRM_ERROR("%s :timed out trying to stop ring\n", engine->name);
783 784
		return;
	}
785
	I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
786 787
}

788
static int intel_lr_context_pin(struct i915_gem_context *ctx,
789
				struct intel_engine_cs *engine)
790
{
791
	struct drm_i915_private *dev_priv = ctx->i915;
792
	struct intel_context *ce = &ctx->engine[engine->id];
793 794
	void *vaddr;
	u32 *lrc_reg_state;
795
	int ret;
796

797
	lockdep_assert_held(&ctx->i915->drm.struct_mutex);
798

799
	if (ce->pin_count++)
800 801
		return 0;

802 803
	ret = i915_gem_obj_ggtt_pin(ce->state, GEN8_LR_CONTEXT_ALIGN,
				    PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
804
	if (ret)
805
		goto err;
806

807
	vaddr = i915_gem_object_pin_map(ce->state);
808 809
	if (IS_ERR(vaddr)) {
		ret = PTR_ERR(vaddr);
810 811 812
		goto unpin_ctx_obj;
	}

813 814
	lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;

815
	ret = intel_ring_pin(ce->ring);
816
	if (ret)
817
		goto unpin_map;
818

819
	ce->lrc_vma = i915_gem_obj_to_ggtt(ce->state);
820
	intel_lr_context_descriptor_update(ctx, engine);
821

822
	lrc_reg_state[CTX_RING_BUFFER_START+1] = ce->ring->vma->node.start;
823 824
	ce->lrc_reg_state = lrc_reg_state;
	ce->state->dirty = true;
825

826 827 828
	/* Invalidate GuC TLB. */
	if (i915.enable_guc_submission)
		I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
829

830
	i915_gem_context_get(ctx);
831
	return 0;
832

833
unpin_map:
834
	i915_gem_object_unpin_map(ce->state);
835
unpin_ctx_obj:
836
	i915_gem_object_ggtt_unpin(ce->state);
837
err:
838
	ce->pin_count = 0;
839 840 841
	return ret;
}

842
void intel_lr_context_unpin(struct i915_gem_context *ctx,
843
			    struct intel_engine_cs *engine)
844
{
845
	struct intel_context *ce = &ctx->engine[engine->id];
846

847
	lockdep_assert_held(&ctx->i915->drm.struct_mutex);
848
	GEM_BUG_ON(ce->pin_count == 0);
849

850
	if (--ce->pin_count)
851
		return;
852

853
	intel_ring_unpin(ce->ring);
854

855 856
	i915_gem_object_unpin_map(ce->state);
	i915_gem_object_ggtt_unpin(ce->state);
857

858 859 860
	ce->lrc_vma = NULL;
	ce->lrc_desc = 0;
	ce->lrc_reg_state = NULL;
861

862
	i915_gem_context_put(ctx);
863 864
}

865
static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
866 867
{
	int ret, i;
868
	struct intel_ring *ring = req->ring;
869
	struct i915_workarounds *w = &req->i915->workarounds;
870

871
	if (w->count == 0)
872 873
		return 0;

874
	ret = req->engine->emit_flush(req, EMIT_BARRIER);
875 876 877
	if (ret)
		return ret;

878
	ret = intel_ring_begin(req, w->count * 2 + 2);
879 880 881
	if (ret)
		return ret;

882
	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
883
	for (i = 0; i < w->count; i++) {
884 885
		intel_ring_emit_reg(ring, w->reg[i].addr);
		intel_ring_emit(ring, w->reg[i].value);
886
	}
887
	intel_ring_emit(ring, MI_NOOP);
888

889
	intel_ring_advance(ring);
890

891
	ret = req->engine->emit_flush(req, EMIT_BARRIER);
892 893 894 895 896 897
	if (ret)
		return ret;

	return 0;
}

898
#define wa_ctx_emit(batch, index, cmd)					\
899
	do {								\
900 901
		int __index = (index)++;				\
		if (WARN_ON(__index >= (PAGE_SIZE / sizeof(uint32_t)))) { \
902 903
			return -ENOSPC;					\
		}							\
904
		batch[__index] = (cmd);					\
905 906
	} while (0)

V
Ville Syrjälä 已提交
907
#define wa_ctx_emit_reg(batch, index, reg) \
908
	wa_ctx_emit((batch), (index), i915_mmio_reg_offset(reg))
909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925

/*
 * In this WA we need to set GEN8_L3SQCREG4[21:21] and reset it after
 * PIPE_CONTROL instruction. This is required for the flush to happen correctly
 * but there is a slight complication as this is applied in WA batch where the
 * values are only initialized once so we cannot take register value at the
 * beginning and reuse it further; hence we save its value to memory, upload a
 * constant value with bit21 set and then we restore it back with the saved value.
 * To simplify the WA, a constant value is formed by using the default value
 * of this register. This shouldn't be a problem because we are only modifying
 * it for a short period and this batch in non-premptible. We can ofcourse
 * use additional instructions that read the actual value of the register
 * at that time and set our bit of interest but it makes the WA complicated.
 *
 * This WA is also required for Gen9 so extracting as a function avoids
 * code duplication.
 */
926
static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
927
						uint32_t *batch,
928 929 930 931
						uint32_t index)
{
	uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);

932
	/*
933
	 * WaDisableLSQCROPERFforOCL:skl,kbl
934 935 936 937
	 * This WA is implemented in skl_init_clock_gating() but since
	 * this batch updates GEN8_L3SQCREG4 with default value we need to
	 * set this bit here to retain the WA during flush.
	 */
938 939
	if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_E0) ||
	    IS_KBL_REVID(engine->i915, 0, KBL_REVID_E0))
940 941
		l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;

942
	wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
943
				   MI_SRM_LRM_GLOBAL_GTT));
V
Ville Syrjälä 已提交
944
	wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
945
	wa_ctx_emit(batch, index, engine->scratch.gtt_offset + 256);
946 947 948
	wa_ctx_emit(batch, index, 0);

	wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
V
Ville Syrjälä 已提交
949
	wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
950 951 952 953 954 955 956 957 958 959
	wa_ctx_emit(batch, index, l3sqc4_flush);

	wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
	wa_ctx_emit(batch, index, (PIPE_CONTROL_CS_STALL |
				   PIPE_CONTROL_DC_FLUSH_ENABLE));
	wa_ctx_emit(batch, index, 0);
	wa_ctx_emit(batch, index, 0);
	wa_ctx_emit(batch, index, 0);
	wa_ctx_emit(batch, index, 0);

960
	wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8 |
961
				   MI_SRM_LRM_GLOBAL_GTT));
V
Ville Syrjälä 已提交
962
	wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
963
	wa_ctx_emit(batch, index, engine->scratch.gtt_offset + 256);
964
	wa_ctx_emit(batch, index, 0);
965 966 967 968

	return index;
}

969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987
static inline uint32_t wa_ctx_start(struct i915_wa_ctx_bb *wa_ctx,
				    uint32_t offset,
				    uint32_t start_alignment)
{
	return wa_ctx->offset = ALIGN(offset, start_alignment);
}

static inline int wa_ctx_end(struct i915_wa_ctx_bb *wa_ctx,
			     uint32_t offset,
			     uint32_t size_alignment)
{
	wa_ctx->size = offset - wa_ctx->offset;

	WARN(wa_ctx->size % size_alignment,
	     "wa_ctx_bb failed sanity checks: size %d is not aligned to %d\n",
	     wa_ctx->size, size_alignment);
	return 0;
}

988 989 990 991 992 993
/*
 * Typically we only have one indirect_ctx and per_ctx batch buffer which are
 * initialized at the beginning and shared across all contexts but this field
 * helps us to have multiple batches at different offsets and select them based
 * on a criteria. At the moment this batch always start at the beginning of the page
 * and at this point we don't have multiple wa_ctx batch buffers.
994
 *
995 996
 * The number of WA applied are not known at the beginning; we use this field
 * to return the no of DWORDS written.
997
 *
998 999 1000 1001
 * It is to be noted that this batch does not contain MI_BATCH_BUFFER_END
 * so it adds NOOPs as padding to make it cacheline aligned.
 * MI_BATCH_BUFFER_END will be added to perctx batch and both of them together
 * makes a complete batch buffer.
1002
 */
1003
static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine,
1004
				    struct i915_wa_ctx_bb *wa_ctx,
1005
				    uint32_t *batch,
1006 1007
				    uint32_t *offset)
{
1008
	uint32_t scratch_addr;
1009 1010
	uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);

1011
	/* WaDisableCtxRestoreArbitration:bdw,chv */
1012
	wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
1013

1014
	/* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
1015
	if (IS_BROADWELL(engine->i915)) {
1016
		int rc = gen8_emit_flush_coherentl3_wa(engine, batch, index);
1017 1018 1019
		if (rc < 0)
			return rc;
		index = rc;
1020 1021
	}

1022 1023
	/* WaClearSlmSpaceAtContextSwitch:bdw,chv */
	/* Actual scratch location is at 128 bytes offset */
1024
	scratch_addr = engine->scratch.gtt_offset + 2*CACHELINE_BYTES;
1025

1026 1027 1028 1029 1030 1031 1032 1033 1034
	wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
	wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
				   PIPE_CONTROL_GLOBAL_GTT_IVB |
				   PIPE_CONTROL_CS_STALL |
				   PIPE_CONTROL_QW_WRITE));
	wa_ctx_emit(batch, index, scratch_addr);
	wa_ctx_emit(batch, index, 0);
	wa_ctx_emit(batch, index, 0);
	wa_ctx_emit(batch, index, 0);
1035

1036 1037
	/* Pad to end of cacheline */
	while (index % CACHELINE_DWORDS)
1038
		wa_ctx_emit(batch, index, MI_NOOP);
1039 1040 1041 1042 1043 1044 1045 1046 1047 1048

	/*
	 * MI_BATCH_BUFFER_END is not required in Indirect ctx BB because
	 * execution depends on the length specified in terms of cache lines
	 * in the register CTX_RCS_INDIRECT_CTX
	 */

	return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
}

1049 1050 1051
/*
 *  This batch is started immediately after indirect_ctx batch. Since we ensure
 *  that indirect_ctx ends on a cacheline this batch is aligned automatically.
1052
 *
1053
 *  The number of DWORDS written are returned using this field.
1054 1055 1056 1057
 *
 *  This batch is terminated with MI_BATCH_BUFFER_END and so we need not add padding
 *  to align it with cacheline as padding after MI_BATCH_BUFFER_END is redundant.
 */
1058
static int gen8_init_perctx_bb(struct intel_engine_cs *engine,
1059
			       struct i915_wa_ctx_bb *wa_ctx,
1060
			       uint32_t *batch,
1061 1062 1063 1064
			       uint32_t *offset)
{
	uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);

1065
	/* WaDisableCtxRestoreArbitration:bdw,chv */
1066
	wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
1067

1068
	wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
1069 1070 1071 1072

	return wa_ctx_end(wa_ctx, *offset = index, 1);
}

1073
static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
1074
				    struct i915_wa_ctx_bb *wa_ctx,
1075
				    uint32_t *batch,
1076 1077
				    uint32_t *offset)
{
1078
	int ret;
1079 1080
	uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);

1081
	/* WaDisableCtxRestoreArbitration:skl,bxt */
1082 1083
	if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_D0) ||
	    IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
1084
		wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
1085

1086
	/* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
1087
	ret = gen8_emit_flush_coherentl3_wa(engine, batch, index);
1088 1089 1090 1091
	if (ret < 0)
		return ret;
	index = ret;

1092 1093 1094 1095 1096 1097 1098
	/* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl */
	wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
	wa_ctx_emit_reg(batch, index, COMMON_SLICE_CHICKEN2);
	wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(
			    GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE));
	wa_ctx_emit(batch, index, MI_NOOP);

1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114
	/* WaClearSlmSpaceAtContextSwitch:kbl */
	/* Actual scratch location is at 128 bytes offset */
	if (IS_KBL_REVID(engine->i915, 0, KBL_REVID_A0)) {
		uint32_t scratch_addr
			= engine->scratch.gtt_offset + 2*CACHELINE_BYTES;

		wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
		wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
					   PIPE_CONTROL_GLOBAL_GTT_IVB |
					   PIPE_CONTROL_CS_STALL |
					   PIPE_CONTROL_QW_WRITE));
		wa_ctx_emit(batch, index, scratch_addr);
		wa_ctx_emit(batch, index, 0);
		wa_ctx_emit(batch, index, 0);
		wa_ctx_emit(batch, index, 0);
	}
1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139

	/* WaMediaPoolStateCmdInWABB:bxt */
	if (HAS_POOLED_EU(engine->i915)) {
		/*
		 * EU pool configuration is setup along with golden context
		 * during context initialization. This value depends on
		 * device type (2x6 or 3x6) and needs to be updated based
		 * on which subslice is disabled especially for 2x6
		 * devices, however it is safe to load default
		 * configuration of 3x6 device instead of masking off
		 * corresponding bits because HW ignores bits of a disabled
		 * subslice and drops down to appropriate config. Please
		 * see render_state_setup() in i915_gem_render_state.c for
		 * possible configurations, to avoid duplication they are
		 * not shown here again.
		 */
		u32 eu_pool_config = 0x00777000;
		wa_ctx_emit(batch, index, GEN9_MEDIA_POOL_STATE);
		wa_ctx_emit(batch, index, GEN9_MEDIA_POOL_ENABLE);
		wa_ctx_emit(batch, index, eu_pool_config);
		wa_ctx_emit(batch, index, 0);
		wa_ctx_emit(batch, index, 0);
		wa_ctx_emit(batch, index, 0);
	}

1140 1141 1142 1143 1144 1145 1146
	/* Pad to end of cacheline */
	while (index % CACHELINE_DWORDS)
		wa_ctx_emit(batch, index, MI_NOOP);

	return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
}

1147
static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
1148
			       struct i915_wa_ctx_bb *wa_ctx,
1149
			       uint32_t *batch,
1150 1151 1152 1153
			       uint32_t *offset)
{
	uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);

1154
	/* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
1155 1156
	if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_B0) ||
	    IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) {
1157
		wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
V
Ville Syrjälä 已提交
1158
		wa_ctx_emit_reg(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0);
1159 1160 1161 1162 1163
		wa_ctx_emit(batch, index,
			    _MASKED_BIT_ENABLE(DISABLE_PIXEL_MASK_CAMMING));
		wa_ctx_emit(batch, index, MI_NOOP);
	}

1164
	/* WaClearTdlStateAckDirtyBits:bxt */
1165
	if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_B0)) {
1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182
		wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(4));

		wa_ctx_emit_reg(batch, index, GEN8_STATE_ACK);
		wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS));

		wa_ctx_emit_reg(batch, index, GEN9_STATE_ACK_SLICE1);
		wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS));

		wa_ctx_emit_reg(batch, index, GEN9_STATE_ACK_SLICE2);
		wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS));

		wa_ctx_emit_reg(batch, index, GEN7_ROW_CHICKEN2);
		/* dummy write to CS, mask bits are 0 to ensure the register is not modified */
		wa_ctx_emit(batch, index, 0x0);
		wa_ctx_emit(batch, index, MI_NOOP);
	}

1183
	/* WaDisableCtxRestoreArbitration:skl,bxt */
1184 1185
	if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_D0) ||
	    IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
1186 1187
		wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);

1188 1189 1190 1191 1192
	wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);

	return wa_ctx_end(wa_ctx, *offset = index, 1);
}

1193
static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size)
1194 1195 1196
{
	int ret;

1197 1198
	engine->wa_ctx.obj = i915_gem_object_create(&engine->i915->drm,
						    PAGE_ALIGN(size));
1199
	if (IS_ERR(engine->wa_ctx.obj)) {
1200
		DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n");
1201 1202 1203
		ret = PTR_ERR(engine->wa_ctx.obj);
		engine->wa_ctx.obj = NULL;
		return ret;
1204 1205
	}

1206
	ret = i915_gem_obj_ggtt_pin(engine->wa_ctx.obj, PAGE_SIZE, 0);
1207 1208 1209
	if (ret) {
		DRM_DEBUG_DRIVER("pin LRC WA ctx backing obj failed: %d\n",
				 ret);
1210
		i915_gem_object_put(engine->wa_ctx.obj);
1211 1212 1213 1214 1215 1216
		return ret;
	}

	return 0;
}

1217
static void lrc_destroy_wa_ctx_obj(struct intel_engine_cs *engine)
1218
{
1219 1220
	if (engine->wa_ctx.obj) {
		i915_gem_object_ggtt_unpin(engine->wa_ctx.obj);
1221
		i915_gem_object_put(engine->wa_ctx.obj);
1222
		engine->wa_ctx.obj = NULL;
1223 1224 1225
	}
}

1226
static int intel_init_workaround_bb(struct intel_engine_cs *engine)
1227 1228 1229 1230 1231
{
	int ret;
	uint32_t *batch;
	uint32_t offset;
	struct page *page;
1232
	struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
1233

1234
	WARN_ON(engine->id != RCS);
1235

1236
	/* update this when WA for higher Gen are added */
1237
	if (INTEL_GEN(engine->i915) > 9) {
1238
		DRM_ERROR("WA batch buffer is not initialized for Gen%d\n",
1239
			  INTEL_GEN(engine->i915));
1240
		return 0;
1241
	}
1242

1243
	/* some WA perform writes to scratch page, ensure it is valid */
1244 1245
	if (engine->scratch.obj == NULL) {
		DRM_ERROR("scratch page not allocated for %s\n", engine->name);
1246 1247 1248
		return -EINVAL;
	}

1249
	ret = lrc_setup_wa_ctx_obj(engine, PAGE_SIZE);
1250 1251 1252 1253 1254
	if (ret) {
		DRM_DEBUG_DRIVER("Failed to setup context WA page: %d\n", ret);
		return ret;
	}

1255
	page = i915_gem_object_get_dirty_page(wa_ctx->obj, 0);
1256 1257 1258
	batch = kmap_atomic(page);
	offset = 0;

1259
	if (IS_GEN8(engine->i915)) {
1260
		ret = gen8_init_indirectctx_bb(engine,
1261 1262 1263 1264 1265 1266
					       &wa_ctx->indirect_ctx,
					       batch,
					       &offset);
		if (ret)
			goto out;

1267
		ret = gen8_init_perctx_bb(engine,
1268 1269 1270 1271 1272
					  &wa_ctx->per_ctx,
					  batch,
					  &offset);
		if (ret)
			goto out;
1273
	} else if (IS_GEN9(engine->i915)) {
1274
		ret = gen9_init_indirectctx_bb(engine,
1275 1276 1277 1278 1279 1280
					       &wa_ctx->indirect_ctx,
					       batch,
					       &offset);
		if (ret)
			goto out;

1281
		ret = gen9_init_perctx_bb(engine,
1282 1283 1284 1285 1286
					  &wa_ctx->per_ctx,
					  batch,
					  &offset);
		if (ret)
			goto out;
1287 1288 1289 1290 1291
	}

out:
	kunmap_atomic(batch);
	if (ret)
1292
		lrc_destroy_wa_ctx_obj(engine);
1293 1294 1295 1296

	return ret;
}

1297 1298
static void lrc_init_hws(struct intel_engine_cs *engine)
{
1299
	struct drm_i915_private *dev_priv = engine->i915;
1300 1301 1302 1303 1304 1305

	I915_WRITE(RING_HWS_PGA(engine->mmio_base),
		   (u32)engine->status_page.gfx_addr);
	POSTING_READ(RING_HWS_PGA(engine->mmio_base));
}

1306
static int gen8_init_common_ring(struct intel_engine_cs *engine)
1307
{
1308
	struct drm_i915_private *dev_priv = engine->i915;
1309
	unsigned int next_context_status_buffer_hw;
1310

1311
	lrc_init_hws(engine);
1312

1313 1314 1315
	I915_WRITE_IMR(engine,
		       ~(engine->irq_enable_mask | engine->irq_keep_mask));
	I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff);
1316

1317
	I915_WRITE(RING_MODE_GEN7(engine),
1318 1319
		   _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
		   _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
1320
	POSTING_READ(RING_MODE_GEN7(engine));
1321 1322 1323 1324 1325 1326 1327 1328 1329 1330

	/*
	 * Instead of resetting the Context Status Buffer (CSB) read pointer to
	 * zero, we need to read the write pointer from hardware and use its
	 * value because "this register is power context save restored".
	 * Effectively, these states have been observed:
	 *
	 *      | Suspend-to-idle (freeze) | Suspend-to-RAM (mem) |
	 * BDW  | CSB regs not reset       | CSB regs reset       |
	 * CHT  | CSB regs not reset       | CSB regs not reset   |
1331 1332
	 * SKL  |         ?                |         ?            |
	 * BXT  |         ?                |         ?            |
1333
	 */
1334
	next_context_status_buffer_hw =
1335
		GEN8_CSB_WRITE_PTR(I915_READ(RING_CONTEXT_STATUS_PTR(engine)));
1336 1337 1338 1339 1340 1341 1342 1343 1344

	/*
	 * When the CSB registers are reset (also after power-up / gpu reset),
	 * CSB write pointer is set to all 1's, which is not valid, use '5' in
	 * this special case, so the first element read is CSB[0].
	 */
	if (next_context_status_buffer_hw == GEN8_CSB_PTR_MASK)
		next_context_status_buffer_hw = (GEN8_CSB_ENTRIES - 1);

1345 1346
	engine->next_context_status_buffer = next_context_status_buffer_hw;
	DRM_DEBUG_DRIVER("Execlists enabled for %s\n", engine->name);
1347

1348
	intel_engine_init_hangcheck(engine);
1349

1350
	return intel_mocs_init_engine(engine);
1351 1352
}

1353
static int gen8_init_render_ring(struct intel_engine_cs *engine)
1354
{
1355
	struct drm_i915_private *dev_priv = engine->i915;
1356 1357
	int ret;

1358
	ret = gen8_init_common_ring(engine);
1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371
	if (ret)
		return ret;

	/* We need to disable the AsyncFlip performance optimisations in order
	 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
	 * programmed to '1' on all products.
	 *
	 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
	 */
	I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));

	I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));

1372
	return init_workarounds_ring(engine);
1373 1374
}

1375
static int gen9_init_render_ring(struct intel_engine_cs *engine)
1376 1377 1378
{
	int ret;

1379
	ret = gen8_init_common_ring(engine);
1380 1381 1382
	if (ret)
		return ret;

1383
	return init_workarounds_ring(engine);
1384 1385
}

1386 1387 1388
static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
{
	struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
1389
	struct intel_ring *ring = req->ring;
1390
	struct intel_engine_cs *engine = req->engine;
1391 1392 1393
	const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
	int i, ret;

1394
	ret = intel_ring_begin(req, num_lri_cmds * 2 + 2);
1395 1396 1397
	if (ret)
		return ret;

1398
	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_lri_cmds));
1399 1400 1401
	for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
		const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);

1402 1403 1404 1405
		intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(engine, i));
		intel_ring_emit(ring, upper_32_bits(pd_daddr));
		intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(engine, i));
		intel_ring_emit(ring, lower_32_bits(pd_daddr));
1406 1407
	}

1408 1409
	intel_ring_emit(ring, MI_NOOP);
	intel_ring_advance(ring);
1410 1411 1412 1413

	return 0;
}

1414
static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
1415 1416
			      u64 offset, u32 len,
			      unsigned int dispatch_flags)
1417
{
1418
	struct intel_ring *ring = req->ring;
1419
	bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
1420 1421
	int ret;

1422 1423 1424 1425
	/* Don't rely in hw updating PDPs, specially in lite-restore.
	 * Ideally, we should set Force PD Restore in ctx descriptor,
	 * but we can't. Force Restore would be a second option, but
	 * it is unsafe in case of lite-restore (because the ctx is
1426 1427
	 * not idle). PML4 is allocated during ppgtt init so this is
	 * not needed in 48-bit.*/
1428
	if (req->ctx->ppgtt &&
1429
	    (intel_engine_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings)) {
1430
		if (!USES_FULL_48BIT_PPGTT(req->i915) &&
1431
		    !intel_vgpu_active(req->i915)) {
1432 1433 1434 1435
			ret = intel_logical_ring_emit_pdps(req);
			if (ret)
				return ret;
		}
1436

1437
		req->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(req->engine);
1438 1439
	}

1440
	ret = intel_ring_begin(req, 4);
1441 1442 1443 1444
	if (ret)
		return ret;

	/* FIXME(BDW): Address space and security selectors. */
1445 1446 1447 1448 1449 1450 1451 1452
	intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 |
			(ppgtt<<8) |
			(dispatch_flags & I915_DISPATCH_RS ?
			 MI_BATCH_RESOURCE_STREAMER : 0));
	intel_ring_emit(ring, lower_32_bits(offset));
	intel_ring_emit(ring, upper_32_bits(offset));
	intel_ring_emit(ring, MI_NOOP);
	intel_ring_advance(ring);
1453 1454 1455 1456

	return 0;
}

1457
static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine)
1458
{
1459
	struct drm_i915_private *dev_priv = engine->i915;
1460 1461 1462
	I915_WRITE_IMR(engine,
		       ~(engine->irq_enable_mask | engine->irq_keep_mask));
	POSTING_READ_FW(RING_IMR(engine->mmio_base));
1463 1464
}

1465
static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine)
1466
{
1467
	struct drm_i915_private *dev_priv = engine->i915;
1468
	I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
1469 1470
}

1471
static int gen8_emit_flush(struct drm_i915_gem_request *request, u32 mode)
1472
{
1473 1474
	struct intel_ring *ring = request->ring;
	u32 cmd;
1475 1476
	int ret;

1477
	ret = intel_ring_begin(request, 4);
1478 1479 1480 1481 1482
	if (ret)
		return ret;

	cmd = MI_FLUSH_DW + 1;

1483 1484 1485 1486 1487 1488 1489
	/* We always require a command barrier so that subsequent
	 * commands, such as breadcrumb interrupts, are strictly ordered
	 * wrt the contents of the write cache being flushed to memory
	 * (and thus being coherent from the CPU).
	 */
	cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;

1490
	if (mode & EMIT_INVALIDATE) {
1491
		cmd |= MI_INVALIDATE_TLB;
1492
		if (request->engine->id == VCS)
1493
			cmd |= MI_INVALIDATE_BSD;
1494 1495
	}

1496 1497 1498 1499 1500 1501 1502
	intel_ring_emit(ring, cmd);
	intel_ring_emit(ring,
			I915_GEM_HWS_SCRATCH_ADDR |
			MI_FLUSH_DW_USE_GTT);
	intel_ring_emit(ring, 0); /* upper addr */
	intel_ring_emit(ring, 0); /* value */
	intel_ring_advance(ring);
1503 1504 1505 1506

	return 0;
}

1507
static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
1508
				  u32 mode)
1509
{
1510
	struct intel_ring *ring = request->ring;
1511
	struct intel_engine_cs *engine = request->engine;
1512
	u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
M
Mika Kuoppala 已提交
1513
	bool vf_flush_wa = false, dc_flush_wa = false;
1514 1515
	u32 flags = 0;
	int ret;
M
Mika Kuoppala 已提交
1516
	int len;
1517 1518 1519

	flags |= PIPE_CONTROL_CS_STALL;

1520
	if (mode & EMIT_FLUSH) {
1521 1522
		flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
		flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
1523
		flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
1524
		flags |= PIPE_CONTROL_FLUSH_ENABLE;
1525 1526
	}

1527
	if (mode & EMIT_INVALIDATE) {
1528 1529 1530 1531 1532 1533 1534 1535 1536
		flags |= PIPE_CONTROL_TLB_INVALIDATE;
		flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
		flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
		flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
		flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
		flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
		flags |= PIPE_CONTROL_QW_WRITE;
		flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;

1537 1538 1539 1540
		/*
		 * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
		 * pipe control.
		 */
1541
		if (IS_GEN9(request->i915))
1542
			vf_flush_wa = true;
M
Mika Kuoppala 已提交
1543 1544 1545 1546

		/* WaForGAMHang:kbl */
		if (IS_KBL_REVID(request->i915, 0, KBL_REVID_B0))
			dc_flush_wa = true;
1547
	}
1548

M
Mika Kuoppala 已提交
1549 1550 1551 1552 1553 1554 1555 1556 1557
	len = 6;

	if (vf_flush_wa)
		len += 6;

	if (dc_flush_wa)
		len += 12;

	ret = intel_ring_begin(request, len);
1558 1559 1560
	if (ret)
		return ret;

1561
	if (vf_flush_wa) {
1562 1563 1564 1565 1566 1567
		intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
		intel_ring_emit(ring, 0);
		intel_ring_emit(ring, 0);
		intel_ring_emit(ring, 0);
		intel_ring_emit(ring, 0);
		intel_ring_emit(ring, 0);
1568 1569
	}

M
Mika Kuoppala 已提交
1570
	if (dc_flush_wa) {
1571 1572 1573 1574 1575 1576
		intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
		intel_ring_emit(ring, PIPE_CONTROL_DC_FLUSH_ENABLE);
		intel_ring_emit(ring, 0);
		intel_ring_emit(ring, 0);
		intel_ring_emit(ring, 0);
		intel_ring_emit(ring, 0);
M
Mika Kuoppala 已提交
1577 1578
	}

1579 1580 1581 1582 1583 1584
	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
	intel_ring_emit(ring, flags);
	intel_ring_emit(ring, scratch_addr);
	intel_ring_emit(ring, 0);
	intel_ring_emit(ring, 0);
	intel_ring_emit(ring, 0);
M
Mika Kuoppala 已提交
1585 1586

	if (dc_flush_wa) {
1587 1588 1589 1590 1591 1592
		intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
		intel_ring_emit(ring, PIPE_CONTROL_CS_STALL);
		intel_ring_emit(ring, 0);
		intel_ring_emit(ring, 0);
		intel_ring_emit(ring, 0);
		intel_ring_emit(ring, 0);
M
Mika Kuoppala 已提交
1593 1594
	}

1595
	intel_ring_advance(ring);
1596 1597 1598 1599

	return 0;
}

1600
static void bxt_a_seqno_barrier(struct intel_engine_cs *engine)
1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611
{
	/*
	 * On BXT A steppings there is a HW coherency issue whereby the
	 * MI_STORE_DATA_IMM storing the completed request's seqno
	 * occasionally doesn't invalidate the CPU cache. Work around this by
	 * clflushing the corresponding cacheline whenever the caller wants
	 * the coherency to be guaranteed. Note that this cacheline is known
	 * to be clean at this point, since we only write it in
	 * bxt_a_set_seqno(), where we also do a clflush after the write. So
	 * this clflush in practice becomes an invalidate operation.
	 */
1612
	intel_flush_status_page(engine, I915_GEM_HWS_INDEX);
1613 1614
}

1615 1616 1617 1618 1619 1620 1621
/*
 * Reserve space for 2 NOOPs at the end of each request to be
 * used as a workaround for not being allowed to do lite
 * restore with HEAD==TAIL (WaIdleLiteRestore).
 */
#define WA_TAIL_DWORDS 2

1622
static int gen8_emit_request(struct drm_i915_gem_request *request)
1623
{
1624
	struct intel_ring *ring = request->ring;
1625 1626
	int ret;

1627
	ret = intel_ring_begin(request, 6 + WA_TAIL_DWORDS);
1628 1629 1630
	if (ret)
		return ret;

1631 1632
	/* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
	BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5));
1633

1634 1635 1636 1637 1638 1639 1640 1641
	intel_ring_emit(ring, (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
	intel_ring_emit(ring,
			intel_hws_seqno_address(request->engine) |
			MI_FLUSH_DW_USE_GTT);
	intel_ring_emit(ring, 0);
	intel_ring_emit(ring, request->fence.seqno);
	intel_ring_emit(ring, MI_USER_INTERRUPT);
	intel_ring_emit(ring, MI_NOOP);
1642
	return intel_logical_ring_advance(request);
1643
}
1644

1645 1646
static int gen8_emit_request_render(struct drm_i915_gem_request *request)
{
1647
	struct intel_ring *ring = request->ring;
1648
	int ret;
1649

1650
	ret = intel_ring_begin(request, 8 + WA_TAIL_DWORDS);
1651 1652 1653
	if (ret)
		return ret;

1654 1655 1656
	/* We're using qword write, seqno should be aligned to 8 bytes. */
	BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1);

1657 1658 1659 1660
	/* w/a for post sync ops following a GPGPU operation we
	 * need a prior CS_STALL, which is emitted by the flush
	 * following the batch.
	 */
1661 1662 1663 1664 1665 1666 1667 1668
	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
	intel_ring_emit(ring,
			(PIPE_CONTROL_GLOBAL_GTT_IVB |
			 PIPE_CONTROL_CS_STALL |
			 PIPE_CONTROL_QW_WRITE));
	intel_ring_emit(ring, intel_hws_seqno_address(request->engine));
	intel_ring_emit(ring, 0);
	intel_ring_emit(ring, i915_gem_request_get_seqno(request));
1669
	/* We're thrashing one dword of HWS. */
1670 1671 1672
	intel_ring_emit(ring, 0);
	intel_ring_emit(ring, MI_USER_INTERRUPT);
	intel_ring_emit(ring, MI_NOOP);
1673
	return intel_logical_ring_advance(request);
1674 1675
}

1676
static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
1677 1678 1679
{
	int ret;

1680
	ret = intel_logical_ring_workarounds_emit(req);
1681 1682 1683
	if (ret)
		return ret;

1684 1685 1686 1687 1688 1689 1690 1691
	ret = intel_rcs_context_init_mocs(req);
	/*
	 * Failing to program the MOCS is non-fatal.The system will not
	 * run at peak performance. So generate an error and carry on.
	 */
	if (ret)
		DRM_ERROR("MOCS failed to program: expect performance issues.\n");

1692
	return i915_gem_render_state_init(req);
1693 1694
}

1695 1696
/**
 * intel_logical_ring_cleanup() - deallocate the Engine Command Streamer
1697
 * @engine: Engine Command Streamer.
1698
 */
1699
void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
1700
{
1701
	struct drm_i915_private *dev_priv;
1702

1703
	if (!intel_engine_initialized(engine))
1704 1705
		return;

1706 1707 1708 1709 1710 1711 1712
	/*
	 * Tasklet cannot be active at this point due intel_mark_active/idle
	 * so this is just for documentation.
	 */
	if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->irq_tasklet.state)))
		tasklet_kill(&engine->irq_tasklet);

1713
	dev_priv = engine->i915;
1714

1715 1716 1717
	if (engine->buffer) {
		intel_logical_ring_stop(engine);
		WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
1718
	}
1719

1720 1721
	if (engine->cleanup)
		engine->cleanup(engine);
1722

1723
	intel_engine_cleanup_cmd_parser(engine);
1724
	i915_gem_batch_pool_fini(&engine->batch_pool);
1725

1726 1727
	intel_engine_fini_breadcrumbs(engine);

1728
	if (engine->status_page.obj) {
1729
		i915_gem_object_unpin_map(engine->status_page.obj);
1730
		engine->status_page.obj = NULL;
1731
	}
1732
	intel_lr_context_unpin(dev_priv->kernel_context, engine);
1733

1734 1735 1736
	engine->idle_lite_restore_wa = 0;
	engine->disable_lite_restore_wa = false;
	engine->ctx_desc_template = 0;
1737

1738
	lrc_destroy_wa_ctx_obj(engine);
1739
	engine->i915 = NULL;
1740 1741
}

1742 1743 1744 1745 1746
void intel_execlists_enable_submission(struct drm_i915_private *dev_priv)
{
	struct intel_engine_cs *engine;

	for_each_engine(engine, dev_priv)
1747
		engine->submit_request = execlists_submit_request;
1748 1749
}

1750
static void
1751
logical_ring_default_vfuncs(struct intel_engine_cs *engine)
1752 1753
{
	/* Default vfuncs which can be overriden by each engine. */
1754 1755
	engine->init_hw = gen8_init_common_ring;
	engine->emit_flush = gen8_emit_flush;
1756
	engine->emit_request = gen8_emit_request;
1757
	engine->submit_request = execlists_submit_request;
1758

1759 1760
	engine->irq_enable = gen8_logical_ring_enable_irq;
	engine->irq_disable = gen8_logical_ring_disable_irq;
1761
	engine->emit_bb_start = gen8_emit_bb_start;
1762
	if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
1763
		engine->irq_seqno_barrier = bxt_a_seqno_barrier;
1764 1765
}

1766
static inline void
1767
logical_ring_default_irqs(struct intel_engine_cs *engine)
1768
{
1769
	unsigned shift = engine->irq_shift;
1770 1771
	engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
	engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
1772 1773
}

1774
static int
1775 1776 1777
lrc_setup_hws(struct intel_engine_cs *engine,
	      struct drm_i915_gem_object *dctx_obj)
{
1778
	void *hws;
1779 1780 1781 1782

	/* The HWSP is part of the default context object in LRC mode. */
	engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(dctx_obj) +
				       LRC_PPHWSP_PN * PAGE_SIZE;
1783 1784 1785 1786
	hws = i915_gem_object_pin_map(dctx_obj);
	if (IS_ERR(hws))
		return PTR_ERR(hws);
	engine->status_page.page_addr = hws + LRC_PPHWSP_PN * PAGE_SIZE;
1787
	engine->status_page.obj = dctx_obj;
1788 1789

	return 0;
1790 1791
}

1792 1793 1794 1795 1796 1797
static void
logical_ring_setup(struct intel_engine_cs *engine)
{
	struct drm_i915_private *dev_priv = engine->i915;
	enum forcewake_domains fw_domains;

1798 1799
	intel_engine_setup_common(engine);

1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824
	/* Intentionally left blank. */
	engine->buffer = NULL;

	fw_domains = intel_uncore_forcewake_for_reg(dev_priv,
						    RING_ELSP(engine),
						    FW_REG_WRITE);

	fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
						     RING_CONTEXT_STATUS_PTR(engine),
						     FW_REG_READ | FW_REG_WRITE);

	fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
						     RING_CONTEXT_STATUS_BUF_BASE(engine),
						     FW_REG_READ);

	engine->fw_domains = fw_domains;

	tasklet_init(&engine->irq_tasklet,
		     intel_lrc_irq_handler, (unsigned long)engine);

	logical_ring_init_platform_invariants(engine);
	logical_ring_default_vfuncs(engine);
	logical_ring_default_irqs(engine);
}

1825 1826 1827 1828 1829 1830
static int
logical_ring_init(struct intel_engine_cs *engine)
{
	struct i915_gem_context *dctx = engine->i915->kernel_context;
	int ret;

1831
	ret = intel_engine_init_common(engine);
1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860
	if (ret)
		goto error;

	ret = execlists_context_deferred_alloc(dctx, engine);
	if (ret)
		goto error;

	/* As this is the default context, always pin it */
	ret = intel_lr_context_pin(dctx, engine);
	if (ret) {
		DRM_ERROR("Failed to pin context for %s: %d\n",
			  engine->name, ret);
		goto error;
	}

	/* And setup the hardware status page. */
	ret = lrc_setup_hws(engine, dctx->engine[engine->id].state);
	if (ret) {
		DRM_ERROR("Failed to set up hws %s: %d\n", engine->name, ret);
		goto error;
	}

	return 0;

error:
	intel_logical_ring_cleanup(engine);
	return ret;
}

1861
int logical_render_ring_init(struct intel_engine_cs *engine)
1862 1863 1864 1865
{
	struct drm_i915_private *dev_priv = engine->i915;
	int ret;

1866 1867
	logical_ring_setup(engine);

1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880
	if (HAS_L3_DPF(dev_priv))
		engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;

	/* Override some for render ring. */
	if (INTEL_GEN(dev_priv) >= 9)
		engine->init_hw = gen9_init_render_ring;
	else
		engine->init_hw = gen8_init_render_ring;
	engine->init_context = gen8_init_rcs_context;
	engine->cleanup = intel_fini_pipe_control;
	engine->emit_flush = gen8_emit_flush_render;
	engine->emit_request = gen8_emit_request_render;

1881
	ret = intel_init_pipe_control(engine, 4096);
1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903
	if (ret)
		return ret;

	ret = intel_init_workaround_bb(engine);
	if (ret) {
		/*
		 * We continue even if we fail to initialize WA batch
		 * because we only expect rare glitches but nothing
		 * critical to prevent us from using GPU
		 */
		DRM_ERROR("WA batch buffer initialization failed: %d\n",
			  ret);
	}

	ret = logical_ring_init(engine);
	if (ret) {
		lrc_destroy_wa_ctx_obj(engine);
	}

	return ret;
}

1904
int logical_xcs_ring_init(struct intel_engine_cs *engine)
1905 1906 1907 1908
{
	logical_ring_setup(engine);

	return logical_ring_init(engine);
1909 1910
}

1911
static u32
1912
make_rpcs(struct drm_i915_private *dev_priv)
1913 1914 1915 1916 1917 1918 1919
{
	u32 rpcs = 0;

	/*
	 * No explicit RPCS request is needed to ensure full
	 * slice/subslice/EU enablement prior to Gen9.
	*/
1920
	if (INTEL_GEN(dev_priv) < 9)
1921 1922 1923 1924 1925 1926 1927 1928
		return 0;

	/*
	 * Starting in Gen9, render power gating can leave
	 * slice/subslice/EU in a partially enabled state. We
	 * must make an explicit request through RPCS for full
	 * enablement.
	*/
1929
	if (INTEL_INFO(dev_priv)->has_slice_pg) {
1930
		rpcs |= GEN8_RPCS_S_CNT_ENABLE;
1931
		rpcs |= INTEL_INFO(dev_priv)->slice_total <<
1932 1933 1934 1935
			GEN8_RPCS_S_CNT_SHIFT;
		rpcs |= GEN8_RPCS_ENABLE;
	}

1936
	if (INTEL_INFO(dev_priv)->has_subslice_pg) {
1937
		rpcs |= GEN8_RPCS_SS_CNT_ENABLE;
1938
		rpcs |= INTEL_INFO(dev_priv)->subslice_per_slice <<
1939 1940 1941 1942
			GEN8_RPCS_SS_CNT_SHIFT;
		rpcs |= GEN8_RPCS_ENABLE;
	}

1943 1944
	if (INTEL_INFO(dev_priv)->has_eu_pg) {
		rpcs |= INTEL_INFO(dev_priv)->eu_per_subslice <<
1945
			GEN8_RPCS_EU_MIN_SHIFT;
1946
		rpcs |= INTEL_INFO(dev_priv)->eu_per_subslice <<
1947 1948 1949 1950 1951 1952 1953
			GEN8_RPCS_EU_MAX_SHIFT;
		rpcs |= GEN8_RPCS_ENABLE;
	}

	return rpcs;
}

1954
static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
1955 1956 1957
{
	u32 indirect_ctx_offset;

1958
	switch (INTEL_GEN(engine->i915)) {
1959
	default:
1960
		MISSING_CASE(INTEL_GEN(engine->i915));
1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974
		/* fall through */
	case 9:
		indirect_ctx_offset =
			GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
		break;
	case 8:
		indirect_ctx_offset =
			GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
		break;
	}

	return indirect_ctx_offset;
}

1975
static int
1976
populate_lr_context(struct i915_gem_context *ctx,
1977
		    struct drm_i915_gem_object *ctx_obj,
1978
		    struct intel_engine_cs *engine,
1979
		    struct intel_ring *ring)
1980
{
1981
	struct drm_i915_private *dev_priv = ctx->i915;
1982
	struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
1983 1984
	void *vaddr;
	u32 *reg_state;
1985 1986
	int ret;

1987 1988 1989
	if (!ppgtt)
		ppgtt = dev_priv->mm.aliasing_ppgtt;

1990 1991 1992 1993 1994 1995
	ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true);
	if (ret) {
		DRM_DEBUG_DRIVER("Could not set to CPU domain\n");
		return ret;
	}

1996 1997 1998 1999
	vaddr = i915_gem_object_pin_map(ctx_obj);
	if (IS_ERR(vaddr)) {
		ret = PTR_ERR(vaddr);
		DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
2000 2001
		return ret;
	}
2002
	ctx_obj->dirty = true;
2003 2004 2005

	/* The second page of the context object contains some fields which must
	 * be set up prior to the first execution. */
2006
	reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
2007 2008 2009 2010 2011 2012

	/* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM
	 * commands followed by (reg, value) pairs. The values we are setting here are
	 * only for the first context restore: on a subsequent save, the GPU will
	 * recreate this batchbuffer with new values (including all the missing
	 * MI_LOAD_REGISTER_IMM commands that we are not initializing here). */
2013
	reg_state[CTX_LRI_HEADER_0] =
2014 2015 2016
		MI_LOAD_REGISTER_IMM(engine->id == RCS ? 14 : 11) | MI_LRI_FORCE_POSTED;
	ASSIGN_CTX_REG(reg_state, CTX_CONTEXT_CONTROL,
		       RING_CONTEXT_CONTROL(engine),
2017 2018
		       _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
					  CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
2019
					  (HAS_RESOURCE_STREAMER(dev_priv) ?
2020
					    CTX_CTRL_RS_CTX_ENABLE : 0)));
2021 2022 2023 2024
	ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(engine->mmio_base),
		       0);
	ASSIGN_CTX_REG(reg_state, CTX_RING_TAIL, RING_TAIL(engine->mmio_base),
		       0);
2025 2026 2027
	/* Ring buffer start address is not known until the buffer is pinned.
	 * It is written to the context image in execlists_update_context()
	 */
2028 2029 2030 2031
	ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_START,
		       RING_START(engine->mmio_base), 0);
	ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL,
		       RING_CTL(engine->mmio_base),
2032
		       ((ring->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID);
2033 2034 2035 2036 2037 2038
	ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U,
		       RING_BBADDR_UDW(engine->mmio_base), 0);
	ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L,
		       RING_BBADDR(engine->mmio_base), 0);
	ASSIGN_CTX_REG(reg_state, CTX_BB_STATE,
		       RING_BBSTATE(engine->mmio_base),
2039
		       RING_BB_PPGTT);
2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054
	ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_U,
		       RING_SBBADDR_UDW(engine->mmio_base), 0);
	ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_L,
		       RING_SBBADDR(engine->mmio_base), 0);
	ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_STATE,
		       RING_SBBSTATE(engine->mmio_base), 0);
	if (engine->id == RCS) {
		ASSIGN_CTX_REG(reg_state, CTX_BB_PER_CTX_PTR,
			       RING_BB_PER_CTX_PTR(engine->mmio_base), 0);
		ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX,
			       RING_INDIRECT_CTX(engine->mmio_base), 0);
		ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX_OFFSET,
			       RING_INDIRECT_CTX_OFFSET(engine->mmio_base), 0);
		if (engine->wa_ctx.obj) {
			struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
2055 2056 2057 2058 2059 2060 2061
			uint32_t ggtt_offset = i915_gem_obj_ggtt_offset(wa_ctx->obj);

			reg_state[CTX_RCS_INDIRECT_CTX+1] =
				(ggtt_offset + wa_ctx->indirect_ctx.offset * sizeof(uint32_t)) |
				(wa_ctx->indirect_ctx.size / CACHELINE_DWORDS);

			reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] =
2062
				intel_lr_indirect_ctx_offset(engine) << 6;
2063 2064 2065 2066 2067

			reg_state[CTX_BB_PER_CTX_PTR+1] =
				(ggtt_offset + wa_ctx->per_ctx.offset * sizeof(uint32_t)) |
				0x01;
		}
2068
	}
2069
	reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9) | MI_LRI_FORCE_POSTED;
2070 2071
	ASSIGN_CTX_REG(reg_state, CTX_CTX_TIMESTAMP,
		       RING_CTX_TIMESTAMP(engine->mmio_base), 0);
2072
	/* PDP values well be assigned later if needed */
2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088
	ASSIGN_CTX_REG(reg_state, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(engine, 3),
		       0);
	ASSIGN_CTX_REG(reg_state, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(engine, 3),
		       0);
	ASSIGN_CTX_REG(reg_state, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(engine, 2),
		       0);
	ASSIGN_CTX_REG(reg_state, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(engine, 2),
		       0);
	ASSIGN_CTX_REG(reg_state, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(engine, 1),
		       0);
	ASSIGN_CTX_REG(reg_state, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(engine, 1),
		       0);
	ASSIGN_CTX_REG(reg_state, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0),
		       0);
	ASSIGN_CTX_REG(reg_state, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0),
		       0);
2089

2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101
	if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
		/* 64b PPGTT (48bit canonical)
		 * PDP0_DESCRIPTOR contains the base address to PML4 and
		 * other PDP Descriptors are ignored.
		 */
		ASSIGN_CTX_PML4(ppgtt, reg_state);
	} else {
		/* 32b PPGTT
		 * PDP*_DESCRIPTOR contains the base address of space supported.
		 * With dynamic page allocation, PDPs may not be allocated at
		 * this point. Point the unallocated PDPs to the scratch page
		 */
2102
		execlists_update_context_pdps(ppgtt, reg_state);
2103 2104
	}

2105
	if (engine->id == RCS) {
2106
		reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
2107
		ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
2108
			       make_rpcs(dev_priv));
2109 2110
	}

2111
	i915_gem_object_unpin_map(ctx_obj);
2112 2113 2114 2115

	return 0;
}

2116 2117
/**
 * intel_lr_context_size() - return the size of the context for an engine
2118
 * @engine: which engine to find the context size for
2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129
 *
 * Each engine may require a different amount of space for a context image,
 * so when allocating (or copying) an image, this function can be used to
 * find the right size for the specific engine.
 *
 * Return: size (in bytes) of an engine-specific context image
 *
 * Note: this size includes the HWSP, which is part of the context image
 * in LRC mode, but does not include the "shared data page" used with
 * GuC submission. The caller should account for this if using the GuC.
 */
2130
uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
2131 2132 2133
{
	int ret = 0;

2134
	WARN_ON(INTEL_GEN(engine->i915) < 8);
2135

2136
	switch (engine->id) {
2137
	case RCS:
2138
		if (INTEL_GEN(engine->i915) >= 9)
2139 2140 2141
			ret = GEN9_LR_CONTEXT_RENDER_SIZE;
		else
			ret = GEN8_LR_CONTEXT_RENDER_SIZE;
2142 2143 2144 2145 2146 2147 2148 2149 2150 2151
		break;
	case VCS:
	case BCS:
	case VECS:
	case VCS2:
		ret = GEN8_LR_CONTEXT_OTHER_SIZE;
		break;
	}

	return ret;
2152 2153
}

2154
static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
2155
					    struct intel_engine_cs *engine)
2156
{
2157
	struct drm_i915_gem_object *ctx_obj;
2158
	struct intel_context *ce = &ctx->engine[engine->id];
2159
	uint32_t context_size;
2160
	struct intel_ring *ring;
2161 2162
	int ret;

2163
	WARN_ON(ce->state);
2164

2165
	context_size = round_up(intel_lr_context_size(engine), 4096);
2166

2167 2168 2169
	/* One extra page as the sharing data between driver and GuC */
	context_size += PAGE_SIZE * LRC_PPHWSP_PN;

2170
	ctx_obj = i915_gem_object_create(&ctx->i915->drm, context_size);
2171
	if (IS_ERR(ctx_obj)) {
2172
		DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n");
2173
		return PTR_ERR(ctx_obj);
2174 2175
	}

2176
	ring = intel_engine_create_ring(engine, ctx->ring_size);
2177 2178
	if (IS_ERR(ring)) {
		ret = PTR_ERR(ring);
2179
		goto error_deref_obj;
2180 2181
	}

2182
	ret = populate_lr_context(ctx, ctx_obj, engine, ring);
2183 2184
	if (ret) {
		DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
2185
		goto error_ring_free;
2186 2187
	}

2188
	ce->ring = ring;
2189 2190
	ce->state = ctx_obj;
	ce->initialised = engine->init_context == NULL;
2191 2192

	return 0;
2193

2194
error_ring_free:
2195
	intel_ring_free(ring);
2196
error_deref_obj:
2197
	i915_gem_object_put(ctx_obj);
2198
	ce->ring = NULL;
2199
	ce->state = NULL;
2200
	return ret;
2201
}
2202

2203
void intel_lr_context_reset(struct drm_i915_private *dev_priv,
2204
			    struct i915_gem_context *ctx)
2205
{
2206
	struct intel_engine_cs *engine;
2207

2208
	for_each_engine(engine, dev_priv) {
2209 2210
		struct intel_context *ce = &ctx->engine[engine->id];
		struct drm_i915_gem_object *ctx_obj = ce->state;
2211
		void *vaddr;
2212 2213 2214 2215 2216
		uint32_t *reg_state;

		if (!ctx_obj)
			continue;

2217 2218
		vaddr = i915_gem_object_pin_map(ctx_obj);
		if (WARN_ON(IS_ERR(vaddr)))
2219
			continue;
2220 2221 2222

		reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
		ctx_obj->dirty = true;
2223 2224 2225 2226

		reg_state[CTX_RING_HEAD+1] = 0;
		reg_state[CTX_RING_TAIL+1] = 0;

2227
		i915_gem_object_unpin_map(ctx_obj);
2228

2229 2230
		ce->ring->head = 0;
		ce->ring->tail = 0;
2231 2232
	}
}