i915_guc_submission.c 35.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
/*
 * Copyright © 2014 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 */
#include <linux/circ_buf.h>
#include "i915_drv.h"
26
#include "intel_uc.h"
27

28 29
#include <trace/events/dma_fence.h>

30
/**
A
Alex Dai 已提交
31
 * DOC: GuC-based command submission
32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50
 *
 * i915_guc_client:
 * We use the term client to avoid confusion with contexts. A i915_guc_client is
 * equivalent to GuC object guc_context_desc. This context descriptor is
 * allocated from a pool of 1024 entries. Kernel driver will allocate doorbell
 * and workqueue for it. Also the process descriptor (guc_process_desc), which
 * is mapped to client space. So the client can write Work Item then ring the
 * doorbell.
 *
 * To simplify the implementation, we allocate one gem object that contains all
 * pages for doorbell, process descriptor and workqueue.
 *
 * The Scratch registers:
 * There are 16 MMIO-based registers start from 0xC180. The kernel driver writes
 * a value to the action register (SOFT_SCRATCH_0) along with any data. It then
 * triggers an interrupt on the GuC via another register write (0xC4C8).
 * Firmware writes a success/fail code back to the action register after
 * processes the request. The kernel driver polls waiting for this update and
 * then proceeds.
51
 * See intel_guc_send()
52 53 54 55 56 57 58 59 60 61 62
 *
 * Doorbells:
 * Doorbells are interrupts to uKernel. A doorbell is a single cache line (QW)
 * mapped into process space.
 *
 * Work Items:
 * There are several types of work items that the host may place into a
 * workqueue, each with its own requirements and limitations. Currently only
 * WQ_TYPE_INORDER is needed to support legacy submission via GuC, which
 * represents in-order queue. The kernel driver packs ring tail pointer and an
 * ELSP context descriptor dword into Work Item.
63
 * See guc_wq_item_append()
64 65 66 67 68 69 70
 *
 */

/*
 * Tell the GuC to allocate or deallocate a specific doorbell
 */

71 72
static int guc_allocate_doorbell(struct intel_guc *guc,
				 struct i915_guc_client *client)
73
{
74 75 76 77
	u32 action[] = {
		INTEL_GUC_ACTION_ALLOCATE_DOORBELL,
		client->ctx_index
	};
78

79
	return intel_guc_send(guc, action, ARRAY_SIZE(action));
80 81
}

82 83
static int guc_release_doorbell(struct intel_guc *guc,
				struct i915_guc_client *client)
84
{
85 86 87 88
	u32 action[] = {
		INTEL_GUC_ACTION_DEALLOCATE_DOORBELL,
		client->ctx_index
	};
89

90
	return intel_guc_send(guc, action, ARRAY_SIZE(action));
91 92
}

93 94 95 96 97 98 99
/*
 * Initialise, update, or clear doorbell data shared with the GuC
 *
 * These functions modify shared data and so need access to the mapped
 * client object which contains the page being used for the doorbell
 */

100 101 102
static int guc_update_doorbell_id(struct intel_guc *guc,
				  struct i915_guc_client *client,
				  u16 new_id)
103
{
104
	struct sg_table *sg = guc->ctx_pool_vma->pages;
105
	void *doorbell_bitmap = guc->doorbell_bitmap;
106
	struct guc_doorbell_info *doorbell;
107 108
	struct guc_context_desc desc;
	size_t len;
109

110
	doorbell = client->vaddr + client->doorbell_offset;
111

112 113 114 115
	if (client->doorbell_id != GUC_INVALID_DOORBELL_ID &&
	    test_bit(client->doorbell_id, doorbell_bitmap)) {
		/* Deactivate the old doorbell */
		doorbell->db_status = GUC_DOORBELL_DISABLED;
116
		(void)guc_release_doorbell(guc, client);
117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137
		__clear_bit(client->doorbell_id, doorbell_bitmap);
	}

	/* Update the GuC's idea of the doorbell ID */
	len = sg_pcopy_to_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
			     sizeof(desc) * client->ctx_index);
	if (len != sizeof(desc))
		return -EFAULT;
	desc.db_id = new_id;
	len = sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
			     sizeof(desc) * client->ctx_index);
	if (len != sizeof(desc))
		return -EFAULT;

	client->doorbell_id = new_id;
	if (new_id == GUC_INVALID_DOORBELL_ID)
		return 0;

	/* Activate the new doorbell */
	__set_bit(new_id, doorbell_bitmap);
	doorbell->db_status = GUC_DOORBELL_ENABLED;
138
	doorbell->cookie = client->doorbell_cookie;
139
	return guc_allocate_doorbell(guc, client);
140 141
}

142 143 144
static void guc_disable_doorbell(struct intel_guc *guc,
				 struct i915_guc_client *client)
{
145
	(void)guc_update_doorbell_id(guc, client, GUC_INVALID_DOORBELL_ID);
146 147 148 149 150

	/* XXX: wait for any interrupts */
	/* XXX: wait for workqueue to drain */
}

151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176
static uint16_t
select_doorbell_register(struct intel_guc *guc, uint32_t priority)
{
	/*
	 * The bitmap tracks which doorbell registers are currently in use.
	 * It is split into two halves; the first half is used for normal
	 * priority contexts, the second half for high-priority ones.
	 * Note that logically higher priorities are numerically less than
	 * normal ones, so the test below means "is it high-priority?"
	 */
	const bool hi_pri = (priority <= GUC_CTX_PRIORITY_HIGH);
	const uint16_t half = GUC_MAX_DOORBELLS / 2;
	const uint16_t start = hi_pri ? half : 0;
	const uint16_t end = start + half;
	uint16_t id;

	id = find_next_zero_bit(guc->doorbell_bitmap, end, start);
	if (id == end)
		id = GUC_INVALID_DOORBELL_ID;

	DRM_DEBUG_DRIVER("assigned %s priority doorbell id 0x%x\n",
			hi_pri ? "high" : "normal", id);

	return id;
}

177 178 179 180
/*
 * Select, assign and relase doorbell cachelines
 *
 * These functions track which doorbell cachelines are in use.
181
 * The data they manipulate is protected by the intel_guc_send lock.
182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203
 */

static uint32_t select_doorbell_cacheline(struct intel_guc *guc)
{
	const uint32_t cacheline_size = cache_line_size();
	uint32_t offset;

	/* Doorbell uses a single cache line within a page */
	offset = offset_in_page(guc->db_cacheline);

	/* Moving to next cache line to reduce contention */
	guc->db_cacheline += cacheline_size;

	DRM_DEBUG_DRIVER("selected doorbell cacheline 0x%x, next 0x%x, linesize %u\n",
			offset, guc->db_cacheline, cacheline_size);

	return offset;
}

/*
 * Initialise the process descriptor shared with the GuC firmware.
 */
204
static void guc_proc_desc_init(struct intel_guc *guc,
205 206 207 208
			       struct i915_guc_client *client)
{
	struct guc_process_desc *desc;

209
	desc = client->vaddr + client->proc_desc_offset;
210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235

	memset(desc, 0, sizeof(*desc));

	/*
	 * XXX: pDoorbell and WQVBaseAddress are pointers in process address
	 * space for ring3 clients (set them as in mmap_ioctl) or kernel
	 * space for kernel clients (map on demand instead? May make debug
	 * easier to have it mapped).
	 */
	desc->wq_base_addr = 0;
	desc->db_base_addr = 0;

	desc->context_id = client->ctx_index;
	desc->wq_size_bytes = client->wq_size;
	desc->wq_status = WQ_STATUS_ACTIVE;
	desc->priority = client->priority;
}

/*
 * Initialise/clear the context descriptor shared with the GuC firmware.
 *
 * This descriptor tells the GuC where (in GGTT space) to find the important
 * data structures relating to this client (doorbell, process descriptor,
 * write queue, etc).
 */

236
static void guc_ctx_desc_init(struct intel_guc *guc,
237 238
			      struct i915_guc_client *client)
{
239
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
240
	struct intel_engine_cs *engine;
241
	struct i915_gem_context *ctx = client->owner;
242 243
	struct guc_context_desc desc;
	struct sg_table *sg;
244
	unsigned int tmp;
245
	u32 gfx_addr;
246 247 248 249 250 251 252 253

	memset(&desc, 0, sizeof(desc));

	desc.attribute = GUC_CTX_DESC_ATTR_ACTIVE | GUC_CTX_DESC_ATTR_KERNEL;
	desc.context_id = client->ctx_index;
	desc.priority = client->priority;
	desc.db_id = client->doorbell_id;

254
	for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
255
		struct intel_context *ce = &ctx->engine[engine->id];
256 257
		uint32_t guc_engine_id = engine->guc_id;
		struct guc_execlist_context *lrc = &desc.lrc[guc_engine_id];
258 259 260 261 262 263 264 265

		/* TODO: We have a design issue to be solved here. Only when we
		 * receive the first batch, we know which engine is used by the
		 * user. But here GuC expects the lrc and ring to be pinned. It
		 * is not an issue for default context, which is the only one
		 * for now who owns a GuC client. But for future owner of GuC
		 * client, need to make sure lrc is pinned prior to enter here.
		 */
266
		if (!ce->state)
267 268
			break;	/* XXX: continue? */

269
		lrc->context_desc = lower_32_bits(ce->lrc_desc);
270 271

		/* The state page is after PPHWSP */
272
		lrc->ring_lcra =
273
			guc_ggtt_offset(ce->state) + LRC_STATE_PN * PAGE_SIZE;
274
		lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
275
				(guc_engine_id << GUC_ELC_ENGINE_OFFSET);
276

277
		lrc->ring_begin = guc_ggtt_offset(ce->ring->vma);
278 279
		lrc->ring_end = lrc->ring_begin + ce->ring->size - 1;
		lrc->ring_next_free_location = lrc->ring_begin;
280 281
		lrc->ring_current_tail_pointer_value = 0;

282
		desc.engines_used |= (1 << guc_engine_id);
283 284
	}

285 286
	DRM_DEBUG_DRIVER("Host engines 0x%x => GuC engines used 0x%x\n",
			client->engines, desc.engines_used);
287 288
	WARN_ON(desc.engines_used == 0);

289
	/*
290 291
	 * The doorbell, process descriptor, and workqueue are all parts
	 * of the client object, which the GuC will reference via the GGTT
292
	 */
293
	gfx_addr = guc_ggtt_offset(client->vma);
294
	desc.db_trigger_phy = sg_dma_address(client->vma->pages->sgl) +
295
				client->doorbell_offset;
296 297
	desc.db_trigger_cpu =
		(uintptr_t)client->vaddr + client->doorbell_offset;
298 299 300
	desc.db_trigger_uk = gfx_addr + client->doorbell_offset;
	desc.process_desc = gfx_addr + client->proc_desc_offset;
	desc.wq_addr = gfx_addr + client->wq_offset;
301 302 303
	desc.wq_size = client->wq_size;

	/*
304
	 * XXX: Take LRCs from an existing context if this is not an
305 306 307 308 309
	 * IsKMDCreatedContext client
	 */
	desc.desc_private = (uintptr_t)client;

	/* Pool context is pinned already */
310
	sg = guc->ctx_pool_vma->pages;
311 312 313 314
	sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
			     sizeof(desc) * client->ctx_index);
}

315
static void guc_ctx_desc_fini(struct intel_guc *guc,
316 317 318 319 320 321 322
			      struct i915_guc_client *client)
{
	struct guc_context_desc desc;
	struct sg_table *sg;

	memset(&desc, 0, sizeof(desc));

323
	sg = guc->ctx_pool_vma->pages;
324 325 326 327
	sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
			     sizeof(desc) * client->ctx_index);
}

328
/**
329
 * i915_guc_wq_reserve() - reserve space in the GuC's workqueue
330 331 332 333 334 335 336
 * @request:	request associated with the commands
 *
 * Return:	0 if space is available
 *		-EAGAIN if space is not currently available
 *
 * This function must be called (and must return 0) before a request
 * is submitted to the GuC via i915_guc_submit() below. Once a result
337 338
 * of 0 has been returned, it must be balanced by a corresponding
 * call to submit().
339
 *
340
 * Reservation allows the caller to determine in advance that space
341 342 343
 * will be available for the next submission before committing resources
 * to it, and helps avoid late failures with complicated recovery paths.
 */
344
int i915_guc_wq_reserve(struct drm_i915_gem_request *request)
345
{
346
	const size_t wqi_size = sizeof(struct guc_wq_item);
347 348 349
	struct i915_guc_client *client = request->i915->guc.execbuf_client;
	struct guc_process_desc *desc = client->vaddr +
					client->proc_desc_offset;
350
	u32 freespace;
351
	int ret;
352

353
	spin_lock_irq(&client->wq_lock);
354 355
	freespace = CIRC_SPACE(client->wq_tail, desc->head, client->wq_size);
	freespace -= client->wq_rsvd;
356
	if (likely(freespace >= wqi_size)) {
357
		client->wq_rsvd += wqi_size;
358 359
		ret = 0;
	} else {
360
		client->no_wq_space++;
361 362
		ret = -EAGAIN;
	}
363
	spin_unlock_irq(&client->wq_lock);
364

365
	return ret;
366 367
}

368 369 370 371 372 373 374 375 376
static void guc_client_update_wq_rsvd(struct i915_guc_client *client, int size)
{
	unsigned long flags;

	spin_lock_irqsave(&client->wq_lock, flags);
	client->wq_rsvd += size;
	spin_unlock_irqrestore(&client->wq_lock, flags);
}

377 378
void i915_guc_wq_unreserve(struct drm_i915_gem_request *request)
{
379
	const int wqi_size = sizeof(struct guc_wq_item);
380
	struct i915_guc_client *client = request->i915->guc.execbuf_client;
381

382
	GEM_BUG_ON(READ_ONCE(client->wq_rsvd) < wqi_size);
383
	guc_client_update_wq_rsvd(client, -wqi_size);
384 385
}

386
/* Construct a Work Item and append it to the GuC's Work Queue */
387
static void guc_wq_item_append(struct i915_guc_client *client,
388
			       struct drm_i915_gem_request *rq)
389
{
390 391 392
	/* wqi_len is in DWords, and does not include the one-word header */
	const size_t wqi_size = sizeof(struct guc_wq_item);
	const u32 wqi_len = wqi_size/sizeof(u32) - 1;
393
	struct intel_engine_cs *engine = rq->engine;
394
	struct guc_process_desc *desc;
395
	struct guc_wq_item *wqi;
396
	u32 freespace, tail, wq_off;
397

398
	desc = client->vaddr + client->proc_desc_offset;
399

400
	/* Free space is guaranteed, see i915_guc_wq_reserve() above */
401
	freespace = CIRC_SPACE(client->wq_tail, desc->head, client->wq_size);
402 403 404 405 406 407 408
	GEM_BUG_ON(freespace < wqi_size);

	/* The GuC firmware wants the tail index in QWords, not bytes */
	tail = rq->tail;
	GEM_BUG_ON(tail & 7);
	tail >>= 3;
	GEM_BUG_ON(tail > WQ_RING_TAIL_MAX);
409 410 411 412 413 414 415 416

	/* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
	 * should not have the case where structure wqi is across page, neither
	 * wrapped to the beginning. This simplifies the implementation below.
	 *
	 * XXX: if not the case, we need save data to a temp wqi and copy it to
	 * workqueue buffer dw by dw.
	 */
417
	BUILD_BUG_ON(wqi_size != 16);
418
	GEM_BUG_ON(client->wq_rsvd < wqi_size);
419

420
	/* postincrement WQ tail for next time */
421
	wq_off = client->wq_tail;
422
	GEM_BUG_ON(wq_off & (wqi_size - 1));
423 424 425
	client->wq_tail += wqi_size;
	client->wq_tail &= client->wq_size - 1;
	client->wq_rsvd -= wqi_size;
426 427

	/* WQ starts from the page after doorbell / process_desc */
428
	wqi = client->vaddr + wq_off + GUC_DB_SIZE;
429

430
	/* Now fill in the 4-word work queue item */
431
	wqi->header = WQ_TYPE_INORDER |
432
			(wqi_len << WQ_LEN_SHIFT) |
433
			(engine->guc_id << WQ_TARGET_SHIFT) |
434 435 436
			WQ_NO_WCFLUSH_WAIT;

	/* The GuC wants only the low-order word of the context descriptor */
437
	wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx, engine);
438 439

	wqi->ring_tail = tail << WQ_RING_TAIL_SHIFT;
440
	wqi->fence_id = rq->global_seqno;
441 442
}

443
static int guc_ring_doorbell(struct i915_guc_client *client)
444 445 446 447 448 449
{
	struct guc_process_desc *desc;
	union guc_doorbell_qw db_cmp, db_exc, db_ret;
	union guc_doorbell_qw *db;
	int attempt = 2, ret = -EAGAIN;

450
	desc = client->vaddr + client->proc_desc_offset;
451 452

	/* Update the tail so it is visible to GuC */
453
	desc->tail = client->wq_tail;
454 455 456

	/* current cookie */
	db_cmp.db_status = GUC_DOORBELL_ENABLED;
457
	db_cmp.cookie = client->doorbell_cookie;
458 459 460

	/* cookie to be updated */
	db_exc.db_status = GUC_DOORBELL_ENABLED;
461
	db_exc.cookie = client->doorbell_cookie + 1;
462 463 464 465
	if (db_exc.cookie == 0)
		db_exc.cookie = 1;

	/* pointer of current doorbell cacheline */
466
	db = client->vaddr + client->doorbell_offset;
467 468 469 470 471 472 473 474 475

	while (attempt--) {
		/* lets ring the doorbell */
		db_ret.value_qw = atomic64_cmpxchg((atomic64_t *)db,
			db_cmp.value_qw, db_exc.value_qw);

		/* if the exchange was successfully executed */
		if (db_ret.value_qw == db_cmp.value_qw) {
			/* db was successfully rung */
476
			client->doorbell_cookie = db_exc.cookie;
477 478 479 480 481 482 483 484
			ret = 0;
			break;
		}

		/* XXX: doorbell was lost and need to acquire it again */
		if (db_ret.db_status == GUC_DOORBELL_DISABLED)
			break;

485 486
		DRM_WARN("Cookie mismatch. Expected %d, found %d\n",
			 db_cmp.cookie, db_ret.cookie);
487 488 489 490 491 492 493 494 495 496 497

		/* update the cookie to newly read cookie from GuC */
		db_cmp.cookie = db_ret.cookie;
		db_exc.cookie = db_ret.cookie + 1;
		if (db_exc.cookie == 0)
			db_exc.cookie = 1;
	}

	return ret;
}

498
/**
499
 * __i915_guc_submit() - Submit commands through GuC
A
Alex Dai 已提交
500
 * @rq:		request associated with the commands
501
 *
502 503 504
 * The caller must have already called i915_guc_wq_reserve() above with
 * a result of 0 (success), guaranteeing that there is space in the work
 * queue for the new request, so enqueuing the item cannot fail.
505 506
 *
 * Bad Things Will Happen if the caller violates this protocol e.g. calls
507 508
 * submit() when _reserve() says there's no space, or calls _submit()
 * a different number of times from (successful) calls to _reserve().
509 510 511
 *
 * The only error here arises if the doorbell hardware isn't functioning
 * as expected, which really shouln't happen.
512
 */
513
static void __i915_guc_submit(struct drm_i915_gem_request *rq)
514
{
515
	struct drm_i915_private *dev_priv = rq->i915;
516 517
	struct intel_engine_cs *engine = rq->engine;
	unsigned int engine_id = engine->id;
518 519
	struct intel_guc *guc = &rq->i915->guc;
	struct i915_guc_client *client = guc->execbuf_client;
520
	unsigned long flags;
521
	int b_ret;
522

523 524 525 526
	/* WA to flush out the pending GMADR writes to ring buffer. */
	if (i915_vma_is_map_and_fenceable(rq->ring->vma))
		POSTING_READ_FW(GUC_STATUS);

527
	spin_lock_irqsave(&client->wq_lock, flags);
528 529

	guc_wq_item_append(client, rq);
530
	b_ret = guc_ring_doorbell(client);
531

532
	client->submissions[engine_id] += 1;
533 534
	client->retcode = b_ret;
	if (b_ret)
535
		client->b_fail += 1;
536

537
	guc->submissions[engine_id] += 1;
538
	guc->last_seqno[engine_id] = rq->global_seqno;
539

540
	spin_unlock_irqrestore(&client->wq_lock, flags);
541 542
}

543 544
static void i915_guc_submit(struct drm_i915_gem_request *rq)
{
545
	__i915_gem_request_submit(rq);
546 547 548
	__i915_guc_submit(rq);
}

549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578
static void nested_enable_signaling(struct drm_i915_gem_request *rq)
{
	/* If we use dma_fence_enable_sw_signaling() directly, lockdep
	 * detects an ordering issue between the fence lockclass and the
	 * global_timeline. This circular dependency can only occur via 2
	 * different fences (but same fence lockclass), so we use the nesting
	 * annotation here to prevent the warn, equivalent to the nesting
	 * inside i915_gem_request_submit() for when we also enable the
	 * signaler.
	 */

	if (test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
			     &rq->fence.flags))
		return;

	GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags));
	trace_dma_fence_enable_signal(&rq->fence);

	spin_lock_nested(&rq->lock, SINGLE_DEPTH_NESTING);
	intel_engine_enable_signaling(rq);
	spin_unlock(&rq->lock);
}

static bool i915_guc_dequeue(struct intel_engine_cs *engine)
{
	struct execlist_port *port = engine->execlist_port;
	struct drm_i915_gem_request *last = port[0].request;
	struct rb_node *rb;
	bool submit = false;

579 580 581 582 583 584 585 586 587 588 589 590
	/* After execlist_first is updated, the tasklet will be rescheduled.
	 *
	 * If we are currently running (inside the tasklet) and a third
	 * party queues a request and so updates engine->execlist_first under
	 * the spinlock (which we have elided), it will atomically set the
	 * TASKLET_SCHED flag causing the us to be re-executed and pick up
	 * the change in state (the update to TASKLET_SCHED incurs a memory
	 * barrier making this cross-cpu checking safe).
	 */
	if (!READ_ONCE(engine->execlist_first))
		return false;

591
	spin_lock_irq(&engine->timeline->lock);
592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611
	rb = engine->execlist_first;
	while (rb) {
		struct drm_i915_gem_request *rq =
			rb_entry(rb, typeof(*rq), priotree.node);

		if (last && rq->ctx != last->ctx) {
			if (port != engine->execlist_port)
				break;

			i915_gem_request_assign(&port->request, last);
			nested_enable_signaling(last);
			port++;
		}

		rb = rb_next(rb);
		rb_erase(&rq->priotree.node, &engine->execlist_queue);
		RB_CLEAR_NODE(&rq->priotree.node);
		rq->priotree.priority = INT_MAX;

		i915_guc_submit(rq);
612
		trace_i915_gem_request_in(rq, port - engine->execlist_port);
613 614 615 616 617 618 619 620
		last = rq;
		submit = true;
	}
	if (submit) {
		i915_gem_request_assign(&port->request, last);
		nested_enable_signaling(last);
		engine->execlist_first = rb;
	}
621
	spin_unlock_irq(&engine->timeline->lock);
622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648

	return submit;
}

static void i915_guc_irq_handler(unsigned long data)
{
	struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
	struct execlist_port *port = engine->execlist_port;
	struct drm_i915_gem_request *rq;
	bool submit;

	do {
		rq = port[0].request;
		while (rq && i915_gem_request_completed(rq)) {
			trace_i915_gem_request_out(rq);
			i915_gem_request_put(rq);
			port[0].request = port[1].request;
			port[1].request = NULL;
			rq = port[0].request;
		}

		submit = false;
		if (!port[1].request)
			submit = i915_guc_dequeue(engine);
	} while (submit);
}

649 650 651 652 653 654
/*
 * Everything below here is concerned with setup & teardown, and is
 * therefore not part of the somewhat time-critical batch-submission
 * path of i915_guc_submit() above.
 */

655
/**
656
 * intel_guc_allocate_vma() - Allocate a GGTT VMA for GuC usage
657 658
 * @guc:	the guc
 * @size:	size of area to allocate (both virtual space and memory)
659
 *
660 661 662 663 664
 * This is a wrapper to create an object for use with the GuC. In order to
 * use it inside the GuC, an object needs to be pinned lifetime, so we allocate
 * both some backing storage and a range inside the Global GTT. We must pin
 * it in the GGTT somewhere other than than [0, GUC_WOPCM_TOP) because that
 * range is reserved inside GuC.
665
 *
666
 * Return:	A i915_vma if successful, otherwise an ERR_PTR.
667
 */
668
struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
669
{
670
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
671
	struct drm_i915_gem_object *obj;
672 673
	struct i915_vma *vma;
	int ret;
674

675
	obj = i915_gem_object_create(dev_priv, size);
676
	if (IS_ERR(obj))
677
		return ERR_CAST(obj);
678

679
	vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
680 681
	if (IS_ERR(vma))
		goto err;
682

683 684 685 686 687
	ret = i915_vma_pin(vma, 0, PAGE_SIZE,
			   PIN_GLOBAL | PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
	if (ret) {
		vma = ERR_PTR(ret);
		goto err;
688 689
	}

690 691 692 693 694
	return vma;

err:
	i915_gem_object_put(obj);
	return vma;
695 696
}

697 698 699
static void
guc_client_free(struct drm_i915_private *dev_priv,
		struct i915_guc_client *client)
700 701 702 703 704 705 706 707 708 709 710
{
	struct intel_guc *guc = &dev_priv->guc;

	if (!client)
		return;

	/*
	 * XXX: wait for any outstanding submissions before freeing memory.
	 * Be sure to drop any locks
	 */

711
	if (client->vaddr) {
712
		/*
713 714
		 * If we got as far as setting up a doorbell, make sure we
		 * shut it down before unmapping & deallocating the memory.
715
		 */
716
		guc_disable_doorbell(guc, client);
717

718
		i915_gem_object_unpin_map(client->vma->obj);
719 720
	}

721
	i915_vma_unpin_and_release(&client->vma);
722 723

	if (client->ctx_index != GUC_INVALID_CTX_ID) {
724
		guc_ctx_desc_fini(guc, client);
725 726 727 728 729 730
		ida_simple_remove(&guc->ctx_ids, client->ctx_index);
	}

	kfree(client);
}

731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749
/* Check that a doorbell register is in the expected state */
static bool guc_doorbell_check(struct intel_guc *guc, uint16_t db_id)
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
	i915_reg_t drbreg = GEN8_DRBREGL(db_id);
	uint32_t value = I915_READ(drbreg);
	bool enabled = (value & GUC_DOORBELL_ENABLED) != 0;
	bool expected = test_bit(db_id, guc->doorbell_bitmap);

	if (enabled == expected)
		return true;

	DRM_DEBUG_DRIVER("Doorbell %d (reg 0x%x) 0x%x, should be %s\n",
			 db_id, drbreg.reg, value,
			 expected ? "active" : "inactive");

	return false;
}

750
/*
751
 * Borrow the first client to set up & tear down each unused doorbell
752 753 754 755 756
 * in turn, to ensure that all doorbell h/w is (re)initialised.
 */
static void guc_init_doorbell_hw(struct intel_guc *guc)
{
	struct i915_guc_client *client = guc->execbuf_client;
757 758
	uint16_t db_id;
	int i, err;
759

760
	guc_disable_doorbell(guc, client);
761 762

	for (i = 0; i < GUC_MAX_DOORBELLS; ++i) {
763 764
		/* Skip if doorbell is OK */
		if (guc_doorbell_check(guc, i))
765 766
			continue;

767
		err = guc_update_doorbell_id(guc, client, i);
768 769 770
		if (err)
			DRM_DEBUG_DRIVER("Doorbell %d update failed, err %d\n",
					i, err);
771 772
	}

773 774 775
	db_id = select_doorbell_register(guc, client->priority);
	WARN_ON(db_id == GUC_INVALID_DOORBELL_ID);

776 777
	err = guc_update_doorbell_id(guc, client, db_id);
	if (err)
778 779
		DRM_WARN("Failed to restore doorbell to %d, err %d\n",
			 db_id, err);
780

781 782 783
	/* Read back & verify all doorbell registers */
	for (i = 0; i < GUC_MAX_DOORBELLS; ++i)
		(void)guc_doorbell_check(guc, i);
784 785
}

786 787
/**
 * guc_client_alloc() - Allocate an i915_guc_client
788
 * @dev_priv:	driver private data structure
789
 * @engines:	The set of engines to enable for this client
790 791 792 793
 * @priority:	four levels priority _CRITICAL, _HIGH, _NORMAL and _LOW
 * 		The kernel client to replace ExecList submission is created with
 * 		NORMAL priority. Priority of a client for scheduler can be HIGH,
 * 		while a preemption context can use CRITICAL.
A
Alex Dai 已提交
794 795
 * @ctx:	the context that owns the client (we use the default render
 * 		context)
796
 *
797
 * Return:	An i915_guc_client object if success, else NULL.
798
 */
799 800
static struct i915_guc_client *
guc_client_alloc(struct drm_i915_private *dev_priv,
801
		 uint32_t engines,
802 803
		 uint32_t priority,
		 struct i915_gem_context *ctx)
804 805 806
{
	struct i915_guc_client *client;
	struct intel_guc *guc = &dev_priv->guc;
807
	struct i915_vma *vma;
808
	void *vaddr;
809
	uint16_t db_id;
810 811 812 813 814

	client = kzalloc(sizeof(*client), GFP_KERNEL);
	if (!client)
		return NULL;

815
	client->owner = ctx;
816
	client->guc = guc;
817 818 819
	client->engines = engines;
	client->priority = priority;
	client->doorbell_id = GUC_INVALID_DOORBELL_ID;
820 821 822 823 824 825 826 827 828

	client->ctx_index = (uint32_t)ida_simple_get(&guc->ctx_ids, 0,
			GUC_MAX_GPU_CONTEXTS, GFP_KERNEL);
	if (client->ctx_index >= GUC_MAX_GPU_CONTEXTS) {
		client->ctx_index = GUC_INVALID_CTX_ID;
		goto err;
	}

	/* The first page is doorbell/proc_desc. Two followed pages are wq. */
829
	vma = intel_guc_allocate_vma(guc, GUC_DB_SIZE + GUC_WQ_SIZE);
830
	if (IS_ERR(vma))
831 832
		goto err;

833
	/* We'll keep just the first (doorbell/proc) page permanently kmap'd. */
834
	client->vma = vma;
835 836 837 838 839 840

	vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
	if (IS_ERR(vaddr))
		goto err;

	client->vaddr = vaddr;
841 842

	spin_lock_init(&client->wq_lock);
843 844 845
	client->wq_offset = GUC_DB_SIZE;
	client->wq_size = GUC_WQ_SIZE;

846 847 848 849 850
	db_id = select_doorbell_register(guc, client->priority);
	if (db_id == GUC_INVALID_DOORBELL_ID)
		/* XXX: evict a doorbell instead? */
		goto err;

851 852 853 854 855 856 857 858 859 860 861 862
	client->doorbell_offset = select_doorbell_cacheline(guc);

	/*
	 * Since the doorbell only requires a single cacheline, we can save
	 * space by putting the application process descriptor in the same
	 * page. Use the half of the page that doesn't include the doorbell.
	 */
	if (client->doorbell_offset >= (GUC_DB_SIZE / 2))
		client->proc_desc_offset = 0;
	else
		client->proc_desc_offset = (GUC_DB_SIZE / 2);

863 864
	guc_proc_desc_init(guc, client);
	guc_ctx_desc_init(guc, client);
865 866 867 868 869 870 871

	/* For runtime client allocation we need to enable the doorbell. Not
	 * required yet for the static execbuf_client as this special kernel
	 * client is enabled from i915_guc_submission_enable().
	 *
	 * guc_update_doorbell_id(guc, client, db_id);
	 */
872

873 874
	DRM_DEBUG_DRIVER("new priority %u client %p for engine(s) 0x%x: ctx_index %u\n",
		priority, client, client->engines, client->ctx_index);
875 876
	DRM_DEBUG_DRIVER("doorbell id %u, cacheline offset 0x%x\n",
		client->doorbell_id, client->doorbell_offset);
877 878 879 880

	return client;

err:
881
	guc_client_free(dev_priv, client);
882 883 884
	return NULL;
}

885

886

887
static void guc_policies_init(struct guc_policies *policies)
888 889 890 891 892 893 894 895
{
	struct guc_policy *policy;
	u32 p, i;

	policies->dpc_promote_time = 500000;
	policies->max_num_work_items = POLICY_MAX_NUM_WI;

	for (p = 0; p < GUC_CTX_PRIORITY_NUM; p++) {
896
		for (i = GUC_RENDER_ENGINE; i < GUC_MAX_ENGINES_NUM; i++) {
897 898 899 900 901 902 903 904 905 906 907 908
			policy = &policies->policy[p][i];

			policy->execution_quantum = 1000000;
			policy->preemption_time = 500000;
			policy->fault_time = 250000;
			policy->policy_flags = 0;
		}
	}

	policies->is_valid = 1;
}

909
static void guc_addon_create(struct intel_guc *guc)
910 911
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
912
	struct i915_vma *vma;
913 914
	struct page *page;
	/* The ads obj includes the struct itself and buffers passed to GuC */
915 916 917 918 919 920 921 922 923
	struct {
		struct guc_ads ads;
		struct guc_policies policies;
		struct guc_mmio_reg_state reg_state;
		u8 reg_state_buffer[GUC_S3_SAVE_SPACE_PAGES * PAGE_SIZE];
	} __packed *blob;
	struct intel_engine_cs *engine;
	enum intel_engine_id id;
	u32 base;
924

925 926
	vma = guc->ads_vma;
	if (!vma) {
927
		vma = intel_guc_allocate_vma(guc, PAGE_ALIGN(sizeof(*blob)));
928
		if (IS_ERR(vma))
929 930
			return;

931
		guc->ads_vma = vma;
932 933
	}

934
	page = i915_vma_first_page(vma);
935
	blob = kmap(page);
936

937
	/* GuC scheduling policies */
938
	guc_policies_init(&blob->policies);
939

940
	/* MMIO reg state */
941
	for_each_engine(engine, dev_priv, id) {
942
		blob->reg_state.mmio_white_list[engine->guc_id].mmio_start =
943
			engine->mmio_base + GUC_MMIO_WHITE_LIST_START;
944 945

		/* Nothing to be saved or restored for now. */
946
		blob->reg_state.mmio_white_list[engine->guc_id].count = 0;
947 948
	}

949 950 951 952 953 954 955 956 957 958 959 960 961
	/*
	 * The GuC requires a "Golden Context" when it reinitialises
	 * engines after a reset. Here we use the Render ring default
	 * context, which must already exist and be pinned in the GGTT,
	 * so its address won't change after we've told the GuC where
	 * to find it.
	 */
	blob->ads.golden_context_lrca =
		dev_priv->engine[RCS]->status_page.ggtt_offset;

	for_each_engine(engine, dev_priv, id)
		blob->ads.eng_state_size[engine->guc_id] =
			intel_lr_context_size(engine);
962

963 964 965 966
	base = guc_ggtt_offset(vma);
	blob->ads.scheduler_policies = base + ptr_offset(blob, policies);
	blob->ads.reg_state_buffer = base + ptr_offset(blob, reg_state_buffer);
	blob->ads.reg_state_addr = base + ptr_offset(blob, reg_state);
967

968 969 970
	kunmap(page);
}

971 972 973 974
/*
 * Set up the memory resources to be shared with the GuC.  At this point,
 * we require just one object that can be mapped through the GGTT.
 */
975
int i915_guc_submission_init(struct drm_i915_private *dev_priv)
976
{
977 978 979
	const size_t ctxsize = sizeof(struct guc_context_desc);
	const size_t poolsize = GUC_MAX_GPU_CONTEXTS * ctxsize;
	const size_t gemsize = round_up(poolsize, PAGE_SIZE);
980
	struct intel_guc *guc = &dev_priv->guc;
981
	struct i915_vma *vma;
982

983 984 985
	if (!HAS_GUC_SCHED(dev_priv))
		return 0;

986 987
	/* Wipe bitmap & delete client in case of reinitialisation */
	bitmap_clear(guc->doorbell_bitmap, 0, GUC_MAX_DOORBELLS);
988
	i915_guc_submission_disable(dev_priv);
989

990 991 992
	if (!i915.enable_guc_submission)
		return 0; /* not enabled  */

993
	if (guc->ctx_pool_vma)
994 995
		return 0; /* already allocated */

996
	vma = intel_guc_allocate_vma(guc, gemsize);
997 998
	if (IS_ERR(vma))
		return PTR_ERR(vma);
999

1000
	guc->ctx_pool_vma = vma;
1001
	ida_init(&guc->ctx_ids);
1002
	intel_guc_log_create(guc);
1003
	guc_addon_create(guc);
1004

1005 1006 1007 1008 1009 1010 1011 1012 1013
	guc->execbuf_client = guc_client_alloc(dev_priv,
					       INTEL_INFO(dev_priv)->ring_mask,
					       GUC_CTX_PRIORITY_KMD_NORMAL,
					       dev_priv->kernel_context);
	if (!guc->execbuf_client) {
		DRM_ERROR("Failed to create GuC client for execbuf!\n");
		goto err;
	}

1014
	return 0;
1015 1016 1017 1018 1019 1020

err:
	i915_guc_submission_fini(dev_priv);
	return -ENOMEM;
}

1021
static void guc_reset_wq(struct i915_guc_client *client)
1022
{
1023 1024
	struct guc_process_desc *desc = client->vaddr +
					client->proc_desc_offset;
1025 1026 1027 1028

	desc->head = 0;
	desc->tail = 0;

1029
	client->wq_tail = 0;
1030 1031
}

1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049
static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
{
	struct intel_engine_cs *engine;
	enum intel_engine_id id;
	int irqs;

	/* tell all command streamers to forward interrupts (but not vblank) to GuC */
	irqs = _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
	for_each_engine(engine, dev_priv, id)
		I915_WRITE(RING_MODE_GEN7(engine), irqs);

	/* route USER_INTERRUPT to Host, all others are sent to GuC. */
	irqs = GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
	       GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
	/* These three registers have the same bit definitions */
	I915_WRITE(GUC_BCS_RCS_IER, ~irqs);
	I915_WRITE(GUC_VCS2_VCS1_IER, ~irqs);
	I915_WRITE(GUC_WD_VECS_IER, ~irqs);
1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070

	/*
	 * The REDIRECT_TO_GUC bit of the PMINTRMSK register directs all
	 * (unmasked) PM interrupts to the GuC. All other bits of this
	 * register *disable* generation of a specific interrupt.
	 *
	 * 'pm_intrmsk_mbz' indicates bits that are NOT to be set when
	 * writing to the PM interrupt mask register, i.e. interrupts
	 * that must not be disabled.
	 *
	 * If the GuC is handling these interrupts, then we must not let
	 * the PM code disable ANY interrupt that the GuC is expecting.
	 * So for each ENABLED (0) bit in this register, we must SET the
	 * bit in pm_intrmsk_mbz so that it's left enabled for the GuC.
	 * GuC needs ARAT expired interrupt unmasked hence it is set in
	 * pm_intrmsk_mbz.
	 *
	 * Here we CLEAR REDIRECT_TO_GUC bit in pm_intrmsk_mbz, which will
	 * result in the register bit being left SET!
	 */
	dev_priv->rps.pm_intrmsk_mbz |= ARAT_EXPIRED_INTRMSK;
1071
	dev_priv->rps.pm_intrmsk_mbz &= ~GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
1072 1073
}

1074
int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
1075 1076
{
	struct intel_guc *guc = &dev_priv->guc;
1077
	struct i915_guc_client *client = guc->execbuf_client;
1078
	struct intel_engine_cs *engine;
1079
	enum intel_engine_id id;
1080

1081 1082
	if (!client)
		return -ENODEV;
1083

1084
	intel_guc_sample_forcewake(guc);
1085 1086

	guc_reset_wq(client);
1087
	guc_init_doorbell_hw(guc);
A
Alex Dai 已提交
1088

1089
	/* Take over from manual control of ELSP (execlists) */
1090 1091 1092 1093 1094
	guc_interrupts_capture(dev_priv);

	for_each_engine(engine, dev_priv, id) {
		const int wqi_size = sizeof(struct guc_wq_item);
		struct drm_i915_gem_request *rq;
1095

1096 1097 1098 1099 1100 1101 1102 1103 1104
		/* The tasklet was initialised by execlists, and may be in
		 * a state of flux (across a reset) and so we just want to
		 * take over the callback without changing any other state
		 * in the tasklet.
		 */
		engine->irq_tasklet.func = i915_guc_irq_handler;
		clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);

		/* Replay the current set of previously submitted requests */
1105
		spin_lock_irq(&engine->timeline->lock);
1106
		list_for_each_entry(rq, &engine->timeline->requests, link) {
1107
			guc_client_update_wq_rsvd(client, wqi_size);
1108
			__i915_guc_submit(rq);
1109
		}
1110
		spin_unlock_irq(&engine->timeline->lock);
1111 1112
	}

1113 1114 1115
	return 0;
}

1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134
static void guc_interrupts_release(struct drm_i915_private *dev_priv)
{
	struct intel_engine_cs *engine;
	enum intel_engine_id id;
	int irqs;

	/*
	 * tell all command streamers NOT to forward interrupts or vblank
	 * to GuC.
	 */
	irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
	irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
	for_each_engine(engine, dev_priv, id)
		I915_WRITE(RING_MODE_GEN7(engine), irqs);

	/* route all GT interrupts to the host */
	I915_WRITE(GUC_BCS_RCS_IER, 0);
	I915_WRITE(GUC_VCS2_VCS1_IER, 0);
	I915_WRITE(GUC_WD_VECS_IER, 0);
1135

1136
	dev_priv->rps.pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
1137
	dev_priv->rps.pm_intrmsk_mbz &= ~ARAT_EXPIRED_INTRMSK;
1138 1139
}

1140
void i915_guc_submission_disable(struct drm_i915_private *dev_priv)
1141 1142 1143
{
	struct intel_guc *guc = &dev_priv->guc;

1144 1145
	guc_interrupts_release(dev_priv);

1146 1147 1148 1149
	if (!guc->execbuf_client)
		return;

	/* Revert back to manual ELSP submission */
1150
	intel_engines_reset_default_submission(dev_priv);
1151 1152
}

1153
void i915_guc_submission_fini(struct drm_i915_private *dev_priv)
1154 1155
{
	struct intel_guc *guc = &dev_priv->guc;
1156 1157 1158 1159 1160 1161 1162
	struct i915_guc_client *client;

	client = fetch_and_zero(&guc->execbuf_client);
	if (!client)
		return;

	guc_client_free(dev_priv, client);
1163

1164
	i915_vma_unpin_and_release(&guc->ads_vma);
1165
	i915_vma_unpin_and_release(&guc->log.vma);
A
Alex Dai 已提交
1166

1167
	if (guc->ctx_pool_vma)
1168
		ida_destroy(&guc->ctx_ids);
1169
	i915_vma_unpin_and_release(&guc->ctx_pool_vma);
1170
}
1171 1172 1173

/**
 * intel_guc_suspend() - notify GuC entering suspend state
1174
 * @dev_priv:	i915 device private
1175
 */
1176
int intel_guc_suspend(struct drm_i915_private *dev_priv)
1177 1178
{
	struct intel_guc *guc = &dev_priv->guc;
1179
	struct i915_gem_context *ctx;
1180 1181
	u32 data[3];

1182
	if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
1183 1184
		return 0;

1185 1186
	gen9_disable_guc_interrupts(dev_priv);

1187
	ctx = dev_priv->kernel_context;
1188

1189
	data[0] = INTEL_GUC_ACTION_ENTER_S_STATE;
1190 1191 1192
	/* any value greater than GUC_POWER_D0 */
	data[1] = GUC_POWER_D1;
	/* first page is shared data with GuC */
1193
	data[2] = guc_ggtt_offset(ctx->engine[RCS].state);
1194

1195
	return intel_guc_send(guc, data, ARRAY_SIZE(data));
1196 1197 1198 1199 1200
}


/**
 * intel_guc_resume() - notify GuC resuming from suspend state
1201
 * @dev_priv:	i915 device private
1202
 */
1203
int intel_guc_resume(struct drm_i915_private *dev_priv)
1204 1205
{
	struct intel_guc *guc = &dev_priv->guc;
1206
	struct i915_gem_context *ctx;
1207 1208
	u32 data[3];

1209
	if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
1210 1211
		return 0;

1212 1213 1214
	if (i915.guc_log_level >= 0)
		gen9_enable_guc_interrupts(dev_priv);

1215
	ctx = dev_priv->kernel_context;
1216

1217
	data[0] = INTEL_GUC_ACTION_EXIT_S_STATE;
1218 1219
	data[1] = GUC_POWER_D0;
	/* first page is shared data with GuC */
1220
	data[2] = guc_ggtt_offset(ctx->engine[RCS].state);
1221

1222
	return intel_guc_send(guc, data, ARRAY_SIZE(data));
1223
}
1224

1225