i915_guc_submission.c 38.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
/*
 * Copyright © 2014 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 */
#include <linux/circ_buf.h>
#include "i915_drv.h"
26
#include "intel_uc.h"
27

28 29
#include <trace/events/dma_fence.h>

30
/**
A
Alex Dai 已提交
31
 * DOC: GuC-based command submission
32
 *
33 34 35 36 37 38
 * GuC client:
 * A i915_guc_client refers to a submission path through GuC. Currently, there
 * is only one of these (the execbuf_client) and this one is charged with all
 * submissions to the GuC. This struct is the owner of a doorbell, a process
 * descriptor and a workqueue (all of them inside a single gem object that
 * contains all required pages for these elements).
39
 *
40
 * GuC stage descriptor:
41 42 43
 * During initialization, the driver allocates a static pool of 1024 such
 * descriptors, and shares them with the GuC.
 * Currently, there exists a 1:1 mapping between a i915_guc_client and a
44 45 46 47
 * guc_stage_desc (via the client's stage_id), so effectively only one
 * gets used. This stage descriptor lets the GuC know about the doorbell,
 * workqueue and process descriptor. Theoretically, it also lets the GuC
 * know about our HW contexts (context ID, etc...), but we actually
48
 * employ a kind of submission where the GuC uses the LRCA sent via the work
49
 * item instead (the single guc_stage_desc associated to execbuf client
50 51
 * contains information about the default kernel context only, but this is
 * essentially unused). This is called a "proxy" submission.
52 53 54 55 56 57 58 59
 *
 * The Scratch registers:
 * There are 16 MMIO-based registers start from 0xC180. The kernel driver writes
 * a value to the action register (SOFT_SCRATCH_0) along with any data. It then
 * triggers an interrupt on the GuC via another register write (0xC4C8).
 * Firmware writes a success/fail code back to the action register after
 * processes the request. The kernel driver polls waiting for this update and
 * then proceeds.
60
 * See intel_guc_send()
61 62 63 64 65 66 67 68 69 70 71
 *
 * Doorbells:
 * Doorbells are interrupts to uKernel. A doorbell is a single cache line (QW)
 * mapped into process space.
 *
 * Work Items:
 * There are several types of work items that the host may place into a
 * workqueue, each with its own requirements and limitations. Currently only
 * WQ_TYPE_INORDER is needed to support legacy submission via GuC, which
 * represents in-order queue. The kernel driver packs ring tail pointer and an
 * ELSP context descriptor dword into Work Item.
72
 * See guc_wq_item_append()
73
 *
74 75 76 77 78 79 80
 * ADS:
 * The Additional Data Struct (ADS) has pointers for different buffers used by
 * the GuC. One single gem object contains the ADS struct itself (guc_ads), the
 * scheduling policies (guc_policies), a structure describing a collection of
 * register sets (guc_mmio_reg_state) and some extra pages for the GuC to save
 * its internal state for sleep.
 *
81 82
 */

83 84
static inline bool is_high_priority(struct i915_guc_client* client)
{
85
	return client->priority <= GUC_CLIENT_PRIORITY_HIGH;
86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114
}

static int __reserve_doorbell(struct i915_guc_client *client)
{
	unsigned long offset;
	unsigned long end;
	u16 id;

	GEM_BUG_ON(client->doorbell_id != GUC_DOORBELL_INVALID);

	/*
	 * The bitmap tracks which doorbell registers are currently in use.
	 * It is split into two halves; the first half is used for normal
	 * priority contexts, the second half for high-priority ones.
	 */
	offset = 0;
	end = GUC_NUM_DOORBELLS/2;
	if (is_high_priority(client)) {
		offset = end;
		end += offset;
	}

	id = find_next_zero_bit(client->guc->doorbell_bitmap, offset, end);
	if (id == end)
		return -ENOSPC;

	__set_bit(id, client->guc->doorbell_bitmap);
	client->doorbell_id = id;
	DRM_DEBUG_DRIVER("client %u (high prio=%s) reserved doorbell: %d\n",
115
			 client->stage_id, yesno(is_high_priority(client)),
116 117 118 119 120 121 122 123 124 125 126 127
			 id);
	return 0;
}

static void __unreserve_doorbell(struct i915_guc_client *client)
{
	GEM_BUG_ON(client->doorbell_id == GUC_DOORBELL_INVALID);

	__clear_bit(client->doorbell_id, client->guc->doorbell_bitmap);
	client->doorbell_id = GUC_DOORBELL_INVALID;
}

128 129 130 131
/*
 * Tell the GuC to allocate or deallocate a specific doorbell
 */

132
static int __guc_allocate_doorbell(struct intel_guc *guc, u32 stage_id)
133
{
134 135
	u32 action[] = {
		INTEL_GUC_ACTION_ALLOCATE_DOORBELL,
136
		stage_id
137
	};
138

139
	return intel_guc_send(guc, action, ARRAY_SIZE(action));
140 141
}

142
static int __guc_deallocate_doorbell(struct intel_guc *guc, u32 stage_id)
143
{
144 145
	u32 action[] = {
		INTEL_GUC_ACTION_DEALLOCATE_DOORBELL,
146
		stage_id
147
	};
148

149
	return intel_guc_send(guc, action, ARRAY_SIZE(action));
150 151
}

152
static struct guc_stage_desc *__get_stage_desc(struct i915_guc_client *client)
153
{
154
	struct guc_stage_desc *base = client->guc->stage_desc_pool_vaddr;
155

156
	return &base[client->stage_id];
157 158
}

159 160 161 162 163 164 165
/*
 * Initialise, update, or clear doorbell data shared with the GuC
 *
 * These functions modify shared data and so need access to the mapped
 * client object which contains the page being used for the doorbell
 */

166
static void __update_doorbell_desc(struct i915_guc_client *client, u16 new_id)
167
{
168
	struct guc_stage_desc *desc;
169

170
	/* Update the GuC's idea of the doorbell ID */
171
	desc = __get_stage_desc(client);
172
	desc->db_id = new_id;
173
}
174

175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193
static struct guc_doorbell_info *__get_doorbell(struct i915_guc_client *client)
{
	return client->vaddr + client->doorbell_offset;
}

static bool has_doorbell(struct i915_guc_client *client)
{
	if (client->doorbell_id == GUC_DOORBELL_INVALID)
		return false;

	return test_bit(client->doorbell_id, client->guc->doorbell_bitmap);
}

static int __create_doorbell(struct i915_guc_client *client)
{
	struct guc_doorbell_info *doorbell;
	int err;

	doorbell = __get_doorbell(client);
194
	doorbell->db_status = GUC_DOORBELL_ENABLED;
195
	doorbell->cookie = client->doorbell_cookie;
196

197
	err = __guc_allocate_doorbell(client->guc, client->stage_id);
198 199 200 201 202
	if (err) {
		doorbell->db_status = GUC_DOORBELL_DISABLED;
		doorbell->cookie = 0;
	}
	return err;
203 204
}

205
static int __destroy_doorbell(struct i915_guc_client *client)
206
{
207
	struct drm_i915_private *dev_priv = guc_to_i915(client->guc);
208
	struct guc_doorbell_info *doorbell;
209 210 211
	u16 db_id = client->doorbell_id;

	GEM_BUG_ON(db_id >= GUC_DOORBELL_INVALID);
212

213 214 215 216
	doorbell = __get_doorbell(client);
	doorbell->db_status = GUC_DOORBELL_DISABLED;
	doorbell->cookie = 0;

217 218 219 220 221 222
	/* Doorbell release flow requires that we wait for GEN8_DRB_VALID bit
	 * to go to zero after updating db_status before we call the GuC to
	 * release the doorbell */
	if (wait_for_us(!(I915_READ(GEN8_DRBREGL(db_id)) & GEN8_DRB_VALID), 10))
		WARN_ONCE(true, "Doorbell never became invalid after disable\n");

223
	return __guc_deallocate_doorbell(client->guc, client->stage_id);
224 225
}

226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247
static int create_doorbell(struct i915_guc_client *client)
{
	int ret;

	ret = __reserve_doorbell(client);
	if (ret)
		return ret;

	__update_doorbell_desc(client, client->doorbell_id);

	ret = __create_doorbell(client);
	if (ret)
		goto err;

	return 0;

err:
	__update_doorbell_desc(client, GUC_DOORBELL_INVALID);
	__unreserve_doorbell(client);
	return ret;
}

248
static int destroy_doorbell(struct i915_guc_client *client)
249
{
250
	int err;
251

252 253 254 255
	GEM_BUG_ON(!has_doorbell(client));

	/* XXX: wait for any interrupts */
	/* XXX: wait for workqueue to drain */
256

257 258 259
	err = __destroy_doorbell(client);
	if (err)
		return err;
260

261
	__update_doorbell_desc(client, GUC_DOORBELL_INVALID);
262

263 264 265 266
	__unreserve_doorbell(client);

	return 0;
}
267

268
static unsigned long __select_cacheline(struct intel_guc* guc)
269
{
270
	unsigned long offset;
271 272 273 274 275

	/* Doorbell uses a single cache line within a page */
	offset = offset_in_page(guc->db_cacheline);

	/* Moving to next cache line to reduce contention */
276
	guc->db_cacheline += cache_line_size();
277

278 279
	DRM_DEBUG_DRIVER("reserved cacheline 0x%lx, next 0x%x, linesize %u\n",
			offset, guc->db_cacheline, cache_line_size());
280 281 282 283 284 285
	return offset;
}

/*
 * Initialise the process descriptor shared with the GuC firmware.
 */
286
static void guc_proc_desc_init(struct intel_guc *guc,
287 288 289 290
			       struct i915_guc_client *client)
{
	struct guc_process_desc *desc;

291
	desc = client->vaddr + client->proc_desc_offset;
292 293 294 295 296 297 298 299 300 301 302 303

	memset(desc, 0, sizeof(*desc));

	/*
	 * XXX: pDoorbell and WQVBaseAddress are pointers in process address
	 * space for ring3 clients (set them as in mmap_ioctl) or kernel
	 * space for kernel clients (map on demand instead? May make debug
	 * easier to have it mapped).
	 */
	desc->wq_base_addr = 0;
	desc->db_base_addr = 0;

304
	desc->stage_id = client->stage_id;
305 306 307 308 309 310
	desc->wq_size_bytes = client->wq_size;
	desc->wq_status = WQ_STATUS_ACTIVE;
	desc->priority = client->priority;
}

/*
311
 * Initialise/clear the stage descriptor shared with the GuC firmware.
312 313 314 315 316
 *
 * This descriptor tells the GuC where (in GGTT space) to find the important
 * data structures relating to this client (doorbell, process descriptor,
 * write queue, etc).
 */
317 318
static void guc_stage_desc_init(struct intel_guc *guc,
				struct i915_guc_client *client)
319
{
320
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
321
	struct intel_engine_cs *engine;
322
	struct i915_gem_context *ctx = client->owner;
323
	struct guc_stage_desc *desc;
324
	unsigned int tmp;
325
	u32 gfx_addr;
326

327
	desc = __get_stage_desc(client);
328
	memset(desc, 0, sizeof(*desc));
329

330 331
	desc->attribute = GUC_STAGE_DESC_ATTR_ACTIVE | GUC_STAGE_DESC_ATTR_KERNEL;
	desc->stage_id = client->stage_id;
332 333
	desc->priority = client->priority;
	desc->db_id = client->doorbell_id;
334

335
	for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
336
		struct intel_context *ce = &ctx->engine[engine->id];
337
		uint32_t guc_engine_id = engine->guc_id;
338
		struct guc_execlist_context *lrc = &desc->lrc[guc_engine_id];
339 340 341 342 343 344 345 346

		/* TODO: We have a design issue to be solved here. Only when we
		 * receive the first batch, we know which engine is used by the
		 * user. But here GuC expects the lrc and ring to be pinned. It
		 * is not an issue for default context, which is the only one
		 * for now who owns a GuC client. But for future owner of GuC
		 * client, need to make sure lrc is pinned prior to enter here.
		 */
347
		if (!ce->state)
348 349
			break;	/* XXX: continue? */

350
		/*
351
		 * XXX: When this is a GUC_STAGE_DESC_ATTR_KERNEL client (proxy
352 353 354 355 356
		 * submission or, in other words, not using a direct submission
		 * model) the KMD's LRCA is not used for any work submission.
		 * Instead, the GuC uses the LRCA of the user mode context (see
		 * guc_wq_item_append below).
		 */
357
		lrc->context_desc = lower_32_bits(ce->lrc_desc);
358 359

		/* The state page is after PPHWSP */
360
		lrc->ring_lrca =
361
			guc_ggtt_offset(ce->state) + LRC_STATE_PN * PAGE_SIZE;
362 363 364 365

		/* XXX: In direct submission, the GuC wants the HW context id
		 * here. In proxy submission, it wants the stage id */
		lrc->context_id = (client->stage_id << GUC_ELC_CTXID_OFFSET) |
366
				(guc_engine_id << GUC_ELC_ENGINE_OFFSET);
367

368
		lrc->ring_begin = guc_ggtt_offset(ce->ring->vma);
369 370
		lrc->ring_end = lrc->ring_begin + ce->ring->size - 1;
		lrc->ring_next_free_location = lrc->ring_begin;
371 372
		lrc->ring_current_tail_pointer_value = 0;

373
		desc->engines_used |= (1 << guc_engine_id);
374 375
	}

376
	DRM_DEBUG_DRIVER("Host engines 0x%x => GuC engines used 0x%x\n",
377 378
			client->engines, desc->engines_used);
	WARN_ON(desc->engines_used == 0);
379

380
	/*
381 382
	 * The doorbell, process descriptor, and workqueue are all parts
	 * of the client object, which the GuC will reference via the GGTT
383
	 */
384
	gfx_addr = guc_ggtt_offset(client->vma);
385
	desc->db_trigger_phy = sg_dma_address(client->vma->pages->sgl) +
386
				client->doorbell_offset;
387 388 389 390 391
	desc->db_trigger_cpu = (uintptr_t)__get_doorbell(client);
	desc->db_trigger_uk = gfx_addr + client->doorbell_offset;
	desc->process_desc = gfx_addr + client->proc_desc_offset;
	desc->wq_addr = gfx_addr + client->wq_offset;
	desc->wq_size = client->wq_size;
392

393
	desc->desc_private = (uintptr_t)client;
394 395
}

396 397
static void guc_stage_desc_fini(struct intel_guc *guc,
				struct i915_guc_client *client)
398
{
399
	struct guc_stage_desc *desc;
400

401
	desc = __get_stage_desc(client);
402
	memset(desc, 0, sizeof(*desc));
403 404
}

405
/**
406
 * i915_guc_wq_reserve() - reserve space in the GuC's workqueue
407 408 409 410 411 412 413
 * @request:	request associated with the commands
 *
 * Return:	0 if space is available
 *		-EAGAIN if space is not currently available
 *
 * This function must be called (and must return 0) before a request
 * is submitted to the GuC via i915_guc_submit() below. Once a result
414 415
 * of 0 has been returned, it must be balanced by a corresponding
 * call to submit().
416
 *
417
 * Reservation allows the caller to determine in advance that space
418 419 420
 * will be available for the next submission before committing resources
 * to it, and helps avoid late failures with complicated recovery paths.
 */
421
int i915_guc_wq_reserve(struct drm_i915_gem_request *request)
422
{
423
	const size_t wqi_size = sizeof(struct guc_wq_item);
424 425 426
	struct i915_guc_client *client = request->i915->guc.execbuf_client;
	struct guc_process_desc *desc = client->vaddr +
					client->proc_desc_offset;
427
	u32 freespace;
428
	int ret;
429

430
	spin_lock_irq(&client->wq_lock);
431 432
	freespace = CIRC_SPACE(client->wq_tail, desc->head, client->wq_size);
	freespace -= client->wq_rsvd;
433
	if (likely(freespace >= wqi_size)) {
434
		client->wq_rsvd += wqi_size;
435 436
		ret = 0;
	} else {
437
		client->no_wq_space++;
438 439
		ret = -EAGAIN;
	}
440
	spin_unlock_irq(&client->wq_lock);
441

442
	return ret;
443 444
}

445 446 447 448 449 450 451 452 453
static void guc_client_update_wq_rsvd(struct i915_guc_client *client, int size)
{
	unsigned long flags;

	spin_lock_irqsave(&client->wq_lock, flags);
	client->wq_rsvd += size;
	spin_unlock_irqrestore(&client->wq_lock, flags);
}

454 455
void i915_guc_wq_unreserve(struct drm_i915_gem_request *request)
{
456
	const int wqi_size = sizeof(struct guc_wq_item);
457
	struct i915_guc_client *client = request->i915->guc.execbuf_client;
458

459
	GEM_BUG_ON(READ_ONCE(client->wq_rsvd) < wqi_size);
460
	guc_client_update_wq_rsvd(client, -wqi_size);
461 462
}

463
/* Construct a Work Item and append it to the GuC's Work Queue */
464
static void guc_wq_item_append(struct i915_guc_client *client,
465
			       struct drm_i915_gem_request *rq)
466
{
467 468 469
	/* wqi_len is in DWords, and does not include the one-word header */
	const size_t wqi_size = sizeof(struct guc_wq_item);
	const u32 wqi_len = wqi_size/sizeof(u32) - 1;
470
	struct intel_engine_cs *engine = rq->engine;
471
	struct guc_process_desc *desc;
472
	struct guc_wq_item *wqi;
473
	u32 freespace, tail, wq_off;
474

475
	desc = client->vaddr + client->proc_desc_offset;
476

477
	/* Free space is guaranteed, see i915_guc_wq_reserve() above */
478
	freespace = CIRC_SPACE(client->wq_tail, desc->head, client->wq_size);
479 480 481 482 483 484 485
	GEM_BUG_ON(freespace < wqi_size);

	/* The GuC firmware wants the tail index in QWords, not bytes */
	tail = rq->tail;
	GEM_BUG_ON(tail & 7);
	tail >>= 3;
	GEM_BUG_ON(tail > WQ_RING_TAIL_MAX);
486 487 488 489 490 491 492 493

	/* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
	 * should not have the case where structure wqi is across page, neither
	 * wrapped to the beginning. This simplifies the implementation below.
	 *
	 * XXX: if not the case, we need save data to a temp wqi and copy it to
	 * workqueue buffer dw by dw.
	 */
494
	BUILD_BUG_ON(wqi_size != 16);
495
	GEM_BUG_ON(client->wq_rsvd < wqi_size);
496

497
	/* postincrement WQ tail for next time */
498
	wq_off = client->wq_tail;
499
	GEM_BUG_ON(wq_off & (wqi_size - 1));
500 501 502
	client->wq_tail += wqi_size;
	client->wq_tail &= client->wq_size - 1;
	client->wq_rsvd -= wqi_size;
503 504

	/* WQ starts from the page after doorbell / process_desc */
505
	wqi = client->vaddr + wq_off + GUC_DB_SIZE;
506

507
	/* Now fill in the 4-word work queue item */
508
	wqi->header = WQ_TYPE_INORDER |
509
			(wqi_len << WQ_LEN_SHIFT) |
510
			(engine->guc_id << WQ_TARGET_SHIFT) |
511 512 513
			WQ_NO_WCFLUSH_WAIT;

	/* The GuC wants only the low-order word of the context descriptor */
514
	wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx, engine);
515

516
	wqi->submit_element_info = tail << WQ_RING_TAIL_SHIFT;
517
	wqi->fence_id = rq->global_seqno;
518 519
}

520 521 522 523 524 525 526 527 528 529 530
static void guc_reset_wq(struct i915_guc_client *client)
{
	struct guc_process_desc *desc = client->vaddr +
					client->proc_desc_offset;

	desc->head = 0;
	desc->tail = 0;

	client->wq_tail = 0;
}

531
static int guc_ring_doorbell(struct i915_guc_client *client)
532 533 534 535 536 537
{
	struct guc_process_desc *desc;
	union guc_doorbell_qw db_cmp, db_exc, db_ret;
	union guc_doorbell_qw *db;
	int attempt = 2, ret = -EAGAIN;

538
	desc = client->vaddr + client->proc_desc_offset;
539 540

	/* Update the tail so it is visible to GuC */
541
	desc->tail = client->wq_tail;
542 543 544

	/* current cookie */
	db_cmp.db_status = GUC_DOORBELL_ENABLED;
545
	db_cmp.cookie = client->doorbell_cookie;
546 547 548

	/* cookie to be updated */
	db_exc.db_status = GUC_DOORBELL_ENABLED;
549
	db_exc.cookie = client->doorbell_cookie + 1;
550 551 552 553
	if (db_exc.cookie == 0)
		db_exc.cookie = 1;

	/* pointer of current doorbell cacheline */
554
	db = (union guc_doorbell_qw *)__get_doorbell(client);
555 556 557 558 559 560 561 562 563

	while (attempt--) {
		/* lets ring the doorbell */
		db_ret.value_qw = atomic64_cmpxchg((atomic64_t *)db,
			db_cmp.value_qw, db_exc.value_qw);

		/* if the exchange was successfully executed */
		if (db_ret.value_qw == db_cmp.value_qw) {
			/* db was successfully rung */
564
			client->doorbell_cookie = db_exc.cookie;
565 566 567 568 569 570 571 572
			ret = 0;
			break;
		}

		/* XXX: doorbell was lost and need to acquire it again */
		if (db_ret.db_status == GUC_DOORBELL_DISABLED)
			break;

573 574
		DRM_WARN("Cookie mismatch. Expected %d, found %d\n",
			 db_cmp.cookie, db_ret.cookie);
575 576 577 578 579 580 581 582 583 584 585

		/* update the cookie to newly read cookie from GuC */
		db_cmp.cookie = db_ret.cookie;
		db_exc.cookie = db_ret.cookie + 1;
		if (db_exc.cookie == 0)
			db_exc.cookie = 1;
	}

	return ret;
}

586
/**
587
 * __i915_guc_submit() - Submit commands through GuC
A
Alex Dai 已提交
588
 * @rq:		request associated with the commands
589
 *
590 591 592
 * The caller must have already called i915_guc_wq_reserve() above with
 * a result of 0 (success), guaranteeing that there is space in the work
 * queue for the new request, so enqueuing the item cannot fail.
593 594
 *
 * Bad Things Will Happen if the caller violates this protocol e.g. calls
595 596
 * submit() when _reserve() says there's no space, or calls _submit()
 * a different number of times from (successful) calls to _reserve().
597 598 599
 *
 * The only error here arises if the doorbell hardware isn't functioning
 * as expected, which really shouln't happen.
600
 */
601
static void __i915_guc_submit(struct drm_i915_gem_request *rq)
602
{
603
	struct drm_i915_private *dev_priv = rq->i915;
604 605
	struct intel_engine_cs *engine = rq->engine;
	unsigned int engine_id = engine->id;
606 607
	struct intel_guc *guc = &rq->i915->guc;
	struct i915_guc_client *client = guc->execbuf_client;
608
	unsigned long flags;
609
	int b_ret;
610

611 612 613 614
	/* WA to flush out the pending GMADR writes to ring buffer. */
	if (i915_vma_is_map_and_fenceable(rq->ring->vma))
		POSTING_READ_FW(GUC_STATUS);

615
	spin_lock_irqsave(&client->wq_lock, flags);
616 617

	guc_wq_item_append(client, rq);
618
	b_ret = guc_ring_doorbell(client);
619

620
	client->submissions[engine_id] += 1;
621 622
	client->retcode = b_ret;
	if (b_ret)
623
		client->b_fail += 1;
624

625
	guc->submissions[engine_id] += 1;
626
	guc->last_seqno[engine_id] = rq->global_seqno;
627

628
	spin_unlock_irqrestore(&client->wq_lock, flags);
629 630
}

631 632
static void i915_guc_submit(struct drm_i915_gem_request *rq)
{
633
	__i915_gem_request_submit(rq);
634 635 636
	__i915_guc_submit(rq);
}

637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666
static void nested_enable_signaling(struct drm_i915_gem_request *rq)
{
	/* If we use dma_fence_enable_sw_signaling() directly, lockdep
	 * detects an ordering issue between the fence lockclass and the
	 * global_timeline. This circular dependency can only occur via 2
	 * different fences (but same fence lockclass), so we use the nesting
	 * annotation here to prevent the warn, equivalent to the nesting
	 * inside i915_gem_request_submit() for when we also enable the
	 * signaler.
	 */

	if (test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
			     &rq->fence.flags))
		return;

	GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags));
	trace_dma_fence_enable_signal(&rq->fence);

	spin_lock_nested(&rq->lock, SINGLE_DEPTH_NESTING);
	intel_engine_enable_signaling(rq);
	spin_unlock(&rq->lock);
}

static bool i915_guc_dequeue(struct intel_engine_cs *engine)
{
	struct execlist_port *port = engine->execlist_port;
	struct drm_i915_gem_request *last = port[0].request;
	struct rb_node *rb;
	bool submit = false;

667 668 669 670 671 672 673 674 675 676 677 678
	/* After execlist_first is updated, the tasklet will be rescheduled.
	 *
	 * If we are currently running (inside the tasklet) and a third
	 * party queues a request and so updates engine->execlist_first under
	 * the spinlock (which we have elided), it will atomically set the
	 * TASKLET_SCHED flag causing the us to be re-executed and pick up
	 * the change in state (the update to TASKLET_SCHED incurs a memory
	 * barrier making this cross-cpu checking safe).
	 */
	if (!READ_ONCE(engine->execlist_first))
		return false;

679
	spin_lock_irq(&engine->timeline->lock);
680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699
	rb = engine->execlist_first;
	while (rb) {
		struct drm_i915_gem_request *rq =
			rb_entry(rb, typeof(*rq), priotree.node);

		if (last && rq->ctx != last->ctx) {
			if (port != engine->execlist_port)
				break;

			i915_gem_request_assign(&port->request, last);
			nested_enable_signaling(last);
			port++;
		}

		rb = rb_next(rb);
		rb_erase(&rq->priotree.node, &engine->execlist_queue);
		RB_CLEAR_NODE(&rq->priotree.node);
		rq->priotree.priority = INT_MAX;

		i915_guc_submit(rq);
700
		trace_i915_gem_request_in(rq, port - engine->execlist_port);
701 702 703 704 705 706 707 708
		last = rq;
		submit = true;
	}
	if (submit) {
		i915_gem_request_assign(&port->request, last);
		nested_enable_signaling(last);
		engine->execlist_first = rb;
	}
709
	spin_unlock_irq(&engine->timeline->lock);
710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736

	return submit;
}

static void i915_guc_irq_handler(unsigned long data)
{
	struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
	struct execlist_port *port = engine->execlist_port;
	struct drm_i915_gem_request *rq;
	bool submit;

	do {
		rq = port[0].request;
		while (rq && i915_gem_request_completed(rq)) {
			trace_i915_gem_request_out(rq);
			i915_gem_request_put(rq);
			port[0].request = port[1].request;
			port[1].request = NULL;
			rq = port[0].request;
		}

		submit = false;
		if (!port[1].request)
			submit = i915_guc_dequeue(engine);
	} while (submit);
}

737 738 739 740 741 742
/*
 * Everything below here is concerned with setup & teardown, and is
 * therefore not part of the somewhat time-critical batch-submission
 * path of i915_guc_submit() above.
 */

743
/**
744
 * intel_guc_allocate_vma() - Allocate a GGTT VMA for GuC usage
745 746
 * @guc:	the guc
 * @size:	size of area to allocate (both virtual space and memory)
747
 *
748 749 750 751 752
 * This is a wrapper to create an object for use with the GuC. In order to
 * use it inside the GuC, an object needs to be pinned lifetime, so we allocate
 * both some backing storage and a range inside the Global GTT. We must pin
 * it in the GGTT somewhere other than than [0, GUC_WOPCM_TOP) because that
 * range is reserved inside GuC.
753
 *
754
 * Return:	A i915_vma if successful, otherwise an ERR_PTR.
755
 */
756
struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
757
{
758
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
759
	struct drm_i915_gem_object *obj;
760 761
	struct i915_vma *vma;
	int ret;
762

763
	obj = i915_gem_object_create(dev_priv, size);
764
	if (IS_ERR(obj))
765
		return ERR_CAST(obj);
766

767
	vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
768 769
	if (IS_ERR(vma))
		goto err;
770

771 772 773 774 775
	ret = i915_vma_pin(vma, 0, PAGE_SIZE,
			   PIN_GLOBAL | PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
	if (ret) {
		vma = ERR_PTR(ret);
		goto err;
776 777
	}

778 779 780 781 782
	return vma;

err:
	i915_gem_object_put(obj);
	return vma;
783 784
}

785
/* Check that a doorbell register is in the expected state */
786
static bool doorbell_ok(struct intel_guc *guc, u16 db_id)
787 788
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
789 790 791 792 793 794 795
	u32 drbregl;
	bool valid;

	GEM_BUG_ON(db_id >= GUC_DOORBELL_INVALID);

	drbregl = I915_READ(GEN8_DRBREGL(db_id));
	valid = drbregl & GEN8_DRB_VALID;
796

797
	if (test_bit(db_id, guc->doorbell_bitmap) == valid)
798 799
		return true;

800 801
	DRM_DEBUG_DRIVER("Doorbell %d has unexpected state (0x%x): valid=%s\n",
			 db_id, drbregl, yesno(valid));
802 803 804 805

	return false;
}

806 807 808 809 810 811 812 813 814
/*
 * If the GuC thinks that the doorbell is unassigned (e.g. because we reset and
 * reloaded the GuC FW) we can use this function to tell the GuC to reassign the
 * doorbell to the rightful owner.
 */
static int __reset_doorbell(struct i915_guc_client* client, u16 db_id)
{
	int err;

815 816
	__update_doorbell_desc(client, db_id);
	err = __create_doorbell(client);
817 818 819 820 821 822
	if (!err)
		err = __destroy_doorbell(client);

	return err;
}

823
/*
824 825 826 827 828
 * Set up & tear down each unused doorbell in turn, to ensure that all doorbell
 * HW is (re)initialised. For that end, we might have to borrow the first
 * client. Also, tell GuC about all the doorbells in use by all clients.
 * We do this because the KMD, the GuC and the doorbell HW can easily go out of
 * sync (e.g. we can reset the GuC, but not the doorbel HW).
829
 */
830
static int guc_init_doorbell_hw(struct intel_guc *guc)
831 832
{
	struct i915_guc_client *client = guc->execbuf_client;
833 834 835
	bool recreate_first_client = false;
	u16 db_id;
	int ret;
836

837 838 839
	/* For unused doorbells, make sure they are disabled */
	for_each_clear_bit(db_id, guc->doorbell_bitmap, GUC_NUM_DOORBELLS) {
		if (doorbell_ok(guc, db_id))
840 841
			continue;

842 843 844 845 846 847 848 849
		if (has_doorbell(client)) {
			/* Borrow execbuf_client (we will recreate it later) */
			destroy_doorbell(client);
			recreate_first_client = true;
		}

		ret = __reset_doorbell(client, db_id);
		WARN(ret, "Doorbell %u reset failed, err %d\n", db_id, ret);
850 851
	}

852 853 854 855 856 857
	if (recreate_first_client) {
		ret = __reserve_doorbell(client);
		if (unlikely(ret)) {
			DRM_ERROR("Couldn't re-reserve first client db: %d\n", ret);
			return ret;
		}
858

859 860
		__update_doorbell_desc(client, client->doorbell_id);
	}
861

862 863 864 865 866 867 868
	/* Now for every client (and not only execbuf_client) make sure their
	 * doorbells are known by the GuC */
	//for (client = client_list; client != NULL; client = client->next)
	{
		ret = __create_doorbell(client);
		if (ret) {
			DRM_ERROR("Couldn't recreate client %u doorbell: %d\n",
869
				client->stage_id, ret);
870 871 872
			return ret;
		}
	}
873

874 875 876
	/* Read back & verify all (used & unused) doorbell registers */
	for (db_id = 0; db_id < GUC_NUM_DOORBELLS; ++db_id)
		WARN_ON(!doorbell_ok(guc, db_id));
877 878

	return 0;
879 880
}

881 882
/**
 * guc_client_alloc() - Allocate an i915_guc_client
883
 * @dev_priv:	driver private data structure
884
 * @engines:	The set of engines to enable for this client
885 886 887 888
 * @priority:	four levels priority _CRITICAL, _HIGH, _NORMAL and _LOW
 * 		The kernel client to replace ExecList submission is created with
 * 		NORMAL priority. Priority of a client for scheduler can be HIGH,
 * 		while a preemption context can use CRITICAL.
A
Alex Dai 已提交
889 890
 * @ctx:	the context that owns the client (we use the default render
 * 		context)
891
 *
892
 * Return:	An i915_guc_client object if success, else NULL.
893
 */
894 895
static struct i915_guc_client *
guc_client_alloc(struct drm_i915_private *dev_priv,
896
		 uint32_t engines,
897 898
		 uint32_t priority,
		 struct i915_gem_context *ctx)
899 900 901
{
	struct i915_guc_client *client;
	struct intel_guc *guc = &dev_priv->guc;
902
	struct i915_vma *vma;
903
	void *vaddr;
904
	int ret;
905 906 907

	client = kzalloc(sizeof(*client), GFP_KERNEL);
	if (!client)
908
		return ERR_PTR(-ENOMEM);
909 910

	client->guc = guc;
911
	client->owner = ctx;
912 913
	client->engines = engines;
	client->priority = priority;
914 915 916 917
	client->doorbell_id = GUC_DOORBELL_INVALID;
	client->wq_offset = GUC_DB_SIZE;
	client->wq_size = GUC_WQ_SIZE;
	spin_lock_init(&client->wq_lock);
918

919
	ret = ida_simple_get(&guc->stage_ids, 0, GUC_MAX_STAGE_DESCRIPTORS,
920 921 922 923
				GFP_KERNEL);
	if (ret < 0)
		goto err_client;

924
	client->stage_id = ret;
925 926

	/* The first page is doorbell/proc_desc. Two followed pages are wq. */
927
	vma = intel_guc_allocate_vma(guc, GUC_DB_SIZE + GUC_WQ_SIZE);
928 929 930 931
	if (IS_ERR(vma)) {
		ret = PTR_ERR(vma);
		goto err_id;
	}
932

933
	/* We'll keep just the first (doorbell/proc) page permanently kmap'd. */
934
	client->vma = vma;
935 936

	vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
937 938 939 940
	if (IS_ERR(vaddr)) {
		ret = PTR_ERR(vaddr);
		goto err_vma;
	}
941
	client->vaddr = vaddr;
942

943
	client->doorbell_offset = __select_cacheline(guc);
944 945 946 947 948 949 950 951 952 953 954

	/*
	 * Since the doorbell only requires a single cacheline, we can save
	 * space by putting the application process descriptor in the same
	 * page. Use the half of the page that doesn't include the doorbell.
	 */
	if (client->doorbell_offset >= (GUC_DB_SIZE / 2))
		client->proc_desc_offset = 0;
	else
		client->proc_desc_offset = (GUC_DB_SIZE / 2);

955
	guc_proc_desc_init(guc, client);
956
	guc_stage_desc_init(guc, client);
957

958 959 960
	ret = create_doorbell(client);
	if (ret)
		goto err_vaddr;
961

962 963
	DRM_DEBUG_DRIVER("new priority %u client %p for engine(s) 0x%x: stage_id %u\n",
			 priority, client, client->engines, client->stage_id);
964 965
	DRM_DEBUG_DRIVER("doorbell id %u, cacheline offset 0x%lx\n",
			 client->doorbell_id, client->doorbell_offset);
966 967

	return client;
968 969 970

err_vaddr:
	i915_gem_object_unpin_map(client->vma->obj);
971 972 973
err_vma:
	i915_vma_unpin_and_release(&client->vma);
err_id:
974
	ida_simple_remove(&guc->stage_ids, client->stage_id);
975 976 977
err_client:
	kfree(client);
	return ERR_PTR(ret);
978 979
}

980 981 982 983 984 985 986 987 988 989 990
static void guc_client_free(struct i915_guc_client *client)
{
	/*
	 * XXX: wait for any outstanding submissions before freeing memory.
	 * Be sure to drop any locks
	 */

	/* FIXME: in many cases, by the time we get here the GuC has been
	 * reset, so we cannot destroy the doorbell properly. Ignore the
	 * error message for now */
	destroy_doorbell(client);
991
	guc_stage_desc_fini(client->guc, client);
992 993
	i915_gem_object_unpin_map(client->vma->obj);
	i915_vma_unpin_and_release(&client->vma);
994
	ida_simple_remove(&client->guc->stage_ids, client->stage_id);
995 996 997
	kfree(client);
}

998
static void guc_policies_init(struct guc_policies *policies)
999 1000 1001 1002 1003 1004 1005
{
	struct guc_policy *policy;
	u32 p, i;

	policies->dpc_promote_time = 500000;
	policies->max_num_work_items = POLICY_MAX_NUM_WI;

1006
	for (p = 0; p < GUC_CLIENT_PRIORITY_NUM; p++) {
1007
		for (i = GUC_RENDER_ENGINE; i < GUC_MAX_ENGINES_NUM; i++) {
1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019
			policy = &policies->policy[p][i];

			policy->execution_quantum = 1000000;
			policy->preemption_time = 500000;
			policy->fault_time = 250000;
			policy->policy_flags = 0;
		}
	}

	policies->is_valid = 1;
}

1020
static int guc_ads_create(struct intel_guc *guc)
1021 1022
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
1023
	struct i915_vma *vma;
1024 1025
	struct page *page;
	/* The ads obj includes the struct itself and buffers passed to GuC */
1026 1027 1028 1029 1030 1031 1032 1033 1034
	struct {
		struct guc_ads ads;
		struct guc_policies policies;
		struct guc_mmio_reg_state reg_state;
		u8 reg_state_buffer[GUC_S3_SAVE_SPACE_PAGES * PAGE_SIZE];
	} __packed *blob;
	struct intel_engine_cs *engine;
	enum intel_engine_id id;
	u32 base;
1035

1036
	GEM_BUG_ON(guc->ads_vma);
1037

1038 1039 1040 1041 1042
	vma = intel_guc_allocate_vma(guc, PAGE_ALIGN(sizeof(*blob)));
	if (IS_ERR(vma))
		return PTR_ERR(vma);

	guc->ads_vma = vma;
1043

1044
	page = i915_vma_first_page(vma);
1045
	blob = kmap(page);
1046

1047
	/* GuC scheduling policies */
1048
	guc_policies_init(&blob->policies);
1049

1050
	/* MMIO reg state */
1051
	for_each_engine(engine, dev_priv, id) {
1052
		blob->reg_state.mmio_white_list[engine->guc_id].mmio_start =
1053
			engine->mmio_base + GUC_MMIO_WHITE_LIST_START;
1054 1055

		/* Nothing to be saved or restored for now. */
1056
		blob->reg_state.mmio_white_list[engine->guc_id].count = 0;
1057 1058
	}

1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071
	/*
	 * The GuC requires a "Golden Context" when it reinitialises
	 * engines after a reset. Here we use the Render ring default
	 * context, which must already exist and be pinned in the GGTT,
	 * so its address won't change after we've told the GuC where
	 * to find it.
	 */
	blob->ads.golden_context_lrca =
		dev_priv->engine[RCS]->status_page.ggtt_offset;

	for_each_engine(engine, dev_priv, id)
		blob->ads.eng_state_size[engine->guc_id] =
			intel_lr_context_size(engine);
1072

1073 1074 1075 1076
	base = guc_ggtt_offset(vma);
	blob->ads.scheduler_policies = base + ptr_offset(blob, policies);
	blob->ads.reg_state_buffer = base + ptr_offset(blob, reg_state_buffer);
	blob->ads.reg_state_addr = base + ptr_offset(blob, reg_state);
1077

1078
	kunmap(page);
1079 1080 1081 1082

	return 0;
}

1083
static void guc_ads_destroy(struct intel_guc *guc)
1084 1085
{
	i915_vma_unpin_and_release(&guc->ads_vma);
1086 1087
}

1088
/*
1089 1090
 * Set up the memory resources to be shared with the GuC (via the GGTT)
 * at firmware loading time.
1091
 */
1092
int i915_guc_submission_init(struct drm_i915_private *dev_priv)
1093 1094
{
	struct intel_guc *guc = &dev_priv->guc;
1095
	struct i915_vma *vma;
1096
	void *vaddr;
1097
	int ret;
1098

1099
	if (guc->stage_desc_pool)
1100
		return 0;
1101

1102 1103 1104
	vma = intel_guc_allocate_vma(guc,
				PAGE_ALIGN(sizeof(struct guc_stage_desc) *
					GUC_MAX_STAGE_DESCRIPTORS));
1105 1106
	if (IS_ERR(vma))
		return PTR_ERR(vma);
1107

1108
	guc->stage_desc_pool = vma;
1109

1110
	vaddr = i915_gem_object_pin_map(guc->stage_desc_pool->obj, I915_MAP_WB);
1111 1112 1113 1114
	if (IS_ERR(vaddr)) {
		ret = PTR_ERR(vaddr);
		goto err_vma;
	}
1115

1116
	guc->stage_desc_pool_vaddr = vaddr;
1117

1118 1119 1120 1121
	ret = intel_guc_log_create(guc);
	if (ret < 0)
		goto err_vaddr;

1122
	ret = guc_ads_create(guc);
1123 1124 1125
	if (ret < 0)
		goto err_log;

1126
	ida_init(&guc->stage_ids);
1127

1128
	return 0;
1129

1130 1131 1132
err_log:
	intel_guc_log_destroy(guc);
err_vaddr:
1133
	i915_gem_object_unpin_map(guc->stage_desc_pool->obj);
1134
err_vma:
1135
	i915_vma_unpin_and_release(&guc->stage_desc_pool);
1136 1137 1138 1139 1140 1141 1142
	return ret;
}

void i915_guc_submission_fini(struct drm_i915_private *dev_priv)
{
	struct intel_guc *guc = &dev_priv->guc;

1143
	ida_destroy(&guc->stage_ids);
1144
	guc_ads_destroy(guc);
1145
	intel_guc_log_destroy(guc);
1146 1147
	i915_gem_object_unpin_map(guc->stage_desc_pool->obj);
	i915_vma_unpin_and_release(&guc->stage_desc_pool);
1148 1149
}

1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167
static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
{
	struct intel_engine_cs *engine;
	enum intel_engine_id id;
	int irqs;

	/* tell all command streamers to forward interrupts (but not vblank) to GuC */
	irqs = _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
	for_each_engine(engine, dev_priv, id)
		I915_WRITE(RING_MODE_GEN7(engine), irqs);

	/* route USER_INTERRUPT to Host, all others are sent to GuC. */
	irqs = GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
	       GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
	/* These three registers have the same bit definitions */
	I915_WRITE(GUC_BCS_RCS_IER, ~irqs);
	I915_WRITE(GUC_VCS2_VCS1_IER, ~irqs);
	I915_WRITE(GUC_WD_VECS_IER, ~irqs);
1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188

	/*
	 * The REDIRECT_TO_GUC bit of the PMINTRMSK register directs all
	 * (unmasked) PM interrupts to the GuC. All other bits of this
	 * register *disable* generation of a specific interrupt.
	 *
	 * 'pm_intrmsk_mbz' indicates bits that are NOT to be set when
	 * writing to the PM interrupt mask register, i.e. interrupts
	 * that must not be disabled.
	 *
	 * If the GuC is handling these interrupts, then we must not let
	 * the PM code disable ANY interrupt that the GuC is expecting.
	 * So for each ENABLED (0) bit in this register, we must SET the
	 * bit in pm_intrmsk_mbz so that it's left enabled for the GuC.
	 * GuC needs ARAT expired interrupt unmasked hence it is set in
	 * pm_intrmsk_mbz.
	 *
	 * Here we CLEAR REDIRECT_TO_GUC bit in pm_intrmsk_mbz, which will
	 * result in the register bit being left SET!
	 */
	dev_priv->rps.pm_intrmsk_mbz |= ARAT_EXPIRED_INTRMSK;
1189
	dev_priv->rps.pm_intrmsk_mbz &= ~GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
1190 1191
}

1192
int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
1193 1194
{
	struct intel_guc *guc = &dev_priv->guc;
1195
	struct i915_guc_client *client = guc->execbuf_client;
1196
	struct intel_engine_cs *engine;
1197
	enum intel_engine_id id;
1198
	int err;
1199

1200 1201 1202
	if (!client) {
		client = guc_client_alloc(dev_priv,
					  INTEL_INFO(dev_priv)->ring_mask,
1203
					  GUC_CLIENT_PRIORITY_KMD_NORMAL,
1204 1205 1206 1207 1208 1209 1210 1211
					  dev_priv->kernel_context);
		if (IS_ERR(client)) {
			DRM_ERROR("Failed to create GuC client for execbuf!\n");
			return PTR_ERR(client);
		}

		guc->execbuf_client = client;
	}
1212

1213 1214
	err = intel_guc_sample_forcewake(guc);
	if (err)
1215
		goto err_execbuf_client;
1216 1217

	guc_reset_wq(client);
1218

1219 1220
	err = guc_init_doorbell_hw(guc);
	if (err)
1221
		goto err_execbuf_client;
A
Alex Dai 已提交
1222

1223
	/* Take over from manual control of ELSP (execlists) */
1224 1225 1226 1227 1228
	guc_interrupts_capture(dev_priv);

	for_each_engine(engine, dev_priv, id) {
		const int wqi_size = sizeof(struct guc_wq_item);
		struct drm_i915_gem_request *rq;
1229

1230 1231 1232 1233 1234 1235 1236 1237 1238
		/* The tasklet was initialised by execlists, and may be in
		 * a state of flux (across a reset) and so we just want to
		 * take over the callback without changing any other state
		 * in the tasklet.
		 */
		engine->irq_tasklet.func = i915_guc_irq_handler;
		clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);

		/* Replay the current set of previously submitted requests */
1239
		spin_lock_irq(&engine->timeline->lock);
1240
		list_for_each_entry(rq, &engine->timeline->requests, link) {
1241
			guc_client_update_wq_rsvd(client, wqi_size);
1242
			__i915_guc_submit(rq);
1243
		}
1244
		spin_unlock_irq(&engine->timeline->lock);
1245 1246
	}

1247
	return 0;
1248 1249 1250 1251 1252

err_execbuf_client:
	guc_client_free(guc->execbuf_client);
	guc->execbuf_client = NULL;
	return err;
1253 1254
}

1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273
static void guc_interrupts_release(struct drm_i915_private *dev_priv)
{
	struct intel_engine_cs *engine;
	enum intel_engine_id id;
	int irqs;

	/*
	 * tell all command streamers NOT to forward interrupts or vblank
	 * to GuC.
	 */
	irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
	irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
	for_each_engine(engine, dev_priv, id)
		I915_WRITE(RING_MODE_GEN7(engine), irqs);

	/* route all GT interrupts to the host */
	I915_WRITE(GUC_BCS_RCS_IER, 0);
	I915_WRITE(GUC_VCS2_VCS1_IER, 0);
	I915_WRITE(GUC_WD_VECS_IER, 0);
1274

1275
	dev_priv->rps.pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
1276
	dev_priv->rps.pm_intrmsk_mbz &= ~ARAT_EXPIRED_INTRMSK;
1277 1278
}

1279
void i915_guc_submission_disable(struct drm_i915_private *dev_priv)
1280 1281 1282
{
	struct intel_guc *guc = &dev_priv->guc;

1283 1284
	guc_interrupts_release(dev_priv);

1285
	/* Revert back to manual ELSP submission */
1286
	intel_engines_reset_default_submission(dev_priv);
1287 1288 1289

	guc_client_free(guc->execbuf_client);
	guc->execbuf_client = NULL;
1290 1291
}

1292 1293
/**
 * intel_guc_suspend() - notify GuC entering suspend state
1294
 * @dev_priv:	i915 device private
1295
 */
1296
int intel_guc_suspend(struct drm_i915_private *dev_priv)
1297 1298
{
	struct intel_guc *guc = &dev_priv->guc;
1299
	struct i915_gem_context *ctx;
1300 1301
	u32 data[3];

1302
	if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
1303 1304
		return 0;

1305 1306
	gen9_disable_guc_interrupts(dev_priv);

1307
	ctx = dev_priv->kernel_context;
1308

1309
	data[0] = INTEL_GUC_ACTION_ENTER_S_STATE;
1310 1311 1312
	/* any value greater than GUC_POWER_D0 */
	data[1] = GUC_POWER_D1;
	/* first page is shared data with GuC */
1313
	data[2] = guc_ggtt_offset(ctx->engine[RCS].state);
1314

1315
	return intel_guc_send(guc, data, ARRAY_SIZE(data));
1316 1317 1318 1319
}

/**
 * intel_guc_resume() - notify GuC resuming from suspend state
1320
 * @dev_priv:	i915 device private
1321
 */
1322
int intel_guc_resume(struct drm_i915_private *dev_priv)
1323 1324
{
	struct intel_guc *guc = &dev_priv->guc;
1325
	struct i915_gem_context *ctx;
1326 1327
	u32 data[3];

1328
	if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
1329 1330
		return 0;

1331 1332 1333
	if (i915.guc_log_level >= 0)
		gen9_enable_guc_interrupts(dev_priv);

1334
	ctx = dev_priv->kernel_context;
1335

1336
	data[0] = INTEL_GUC_ACTION_EXIT_S_STATE;
1337 1338
	data[1] = GUC_POWER_D0;
	/* first page is shared data with GuC */
1339
	data[2] = guc_ggtt_offset(ctx->engine[RCS].state);
1340

1341
	return intel_guc_send(guc, data, ARRAY_SIZE(data));
1342
}