i915_guc_submission.c 47.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 * Copyright © 2014 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 */
#include <linux/circ_buf.h>
25 26
#include <linux/debugfs.h>
#include <linux/relay.h>
27
#include "i915_drv.h"
28
#include "intel_uc.h"
29

30
/**
A
Alex Dai 已提交
31
 * DOC: GuC-based command submission
32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50
 *
 * i915_guc_client:
 * We use the term client to avoid confusion with contexts. A i915_guc_client is
 * equivalent to GuC object guc_context_desc. This context descriptor is
 * allocated from a pool of 1024 entries. Kernel driver will allocate doorbell
 * and workqueue for it. Also the process descriptor (guc_process_desc), which
 * is mapped to client space. So the client can write Work Item then ring the
 * doorbell.
 *
 * To simplify the implementation, we allocate one gem object that contains all
 * pages for doorbell, process descriptor and workqueue.
 *
 * The Scratch registers:
 * There are 16 MMIO-based registers start from 0xC180. The kernel driver writes
 * a value to the action register (SOFT_SCRATCH_0) along with any data. It then
 * triggers an interrupt on the GuC via another register write (0xC4C8).
 * Firmware writes a success/fail code back to the action register after
 * processes the request. The kernel driver polls waiting for this update and
 * then proceeds.
51
 * See intel_guc_send()
52 53 54 55 56 57 58 59 60 61 62
 *
 * Doorbells:
 * Doorbells are interrupts to uKernel. A doorbell is a single cache line (QW)
 * mapped into process space.
 *
 * Work Items:
 * There are several types of work items that the host may place into a
 * workqueue, each with its own requirements and limitations. Currently only
 * WQ_TYPE_INORDER is needed to support legacy submission via GuC, which
 * represents in-order queue. The kernel driver packs ring tail pointer and an
 * ELSP context descriptor dword into Work Item.
63
 * See guc_wq_item_append()
64 65 66 67 68 69 70
 *
 */

/*
 * Tell the GuC to allocate or deallocate a specific doorbell
 */

71 72
static int guc_allocate_doorbell(struct intel_guc *guc,
				 struct i915_guc_client *client)
73
{
74 75 76 77
	u32 action[] = {
		INTEL_GUC_ACTION_ALLOCATE_DOORBELL,
		client->ctx_index
	};
78

79
	return intel_guc_send(guc, action, ARRAY_SIZE(action));
80 81
}

82 83
static int guc_release_doorbell(struct intel_guc *guc,
				struct i915_guc_client *client)
84
{
85 86 87 88
	u32 action[] = {
		INTEL_GUC_ACTION_DEALLOCATE_DOORBELL,
		client->ctx_index
	};
89

90
	return intel_guc_send(guc, action, ARRAY_SIZE(action));
91 92
}

93 94 95 96 97 98 99
/*
 * Initialise, update, or clear doorbell data shared with the GuC
 *
 * These functions modify shared data and so need access to the mapped
 * client object which contains the page being used for the doorbell
 */

100 101 102
static int guc_update_doorbell_id(struct intel_guc *guc,
				  struct i915_guc_client *client,
				  u16 new_id)
103
{
104
	struct sg_table *sg = guc->ctx_pool_vma->pages;
105
	void *doorbell_bitmap = guc->doorbell_bitmap;
106
	struct guc_doorbell_info *doorbell;
107 108
	struct guc_context_desc desc;
	size_t len;
109

110
	doorbell = client->vaddr + client->doorbell_offset;
111

112 113 114 115
	if (client->doorbell_id != GUC_INVALID_DOORBELL_ID &&
	    test_bit(client->doorbell_id, doorbell_bitmap)) {
		/* Deactivate the old doorbell */
		doorbell->db_status = GUC_DOORBELL_DISABLED;
116
		(void)guc_release_doorbell(guc, client);
117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137
		__clear_bit(client->doorbell_id, doorbell_bitmap);
	}

	/* Update the GuC's idea of the doorbell ID */
	len = sg_pcopy_to_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
			     sizeof(desc) * client->ctx_index);
	if (len != sizeof(desc))
		return -EFAULT;
	desc.db_id = new_id;
	len = sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
			     sizeof(desc) * client->ctx_index);
	if (len != sizeof(desc))
		return -EFAULT;

	client->doorbell_id = new_id;
	if (new_id == GUC_INVALID_DOORBELL_ID)
		return 0;

	/* Activate the new doorbell */
	__set_bit(new_id, doorbell_bitmap);
	doorbell->db_status = GUC_DOORBELL_ENABLED;
138
	doorbell->cookie = client->doorbell_cookie;
139
	return guc_allocate_doorbell(guc, client);
140 141
}

142 143 144
static void guc_disable_doorbell(struct intel_guc *guc,
				 struct i915_guc_client *client)
{
145
	(void)guc_update_doorbell_id(guc, client, GUC_INVALID_DOORBELL_ID);
146 147 148 149 150

	/* XXX: wait for any interrupts */
	/* XXX: wait for workqueue to drain */
}

151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176
static uint16_t
select_doorbell_register(struct intel_guc *guc, uint32_t priority)
{
	/*
	 * The bitmap tracks which doorbell registers are currently in use.
	 * It is split into two halves; the first half is used for normal
	 * priority contexts, the second half for high-priority ones.
	 * Note that logically higher priorities are numerically less than
	 * normal ones, so the test below means "is it high-priority?"
	 */
	const bool hi_pri = (priority <= GUC_CTX_PRIORITY_HIGH);
	const uint16_t half = GUC_MAX_DOORBELLS / 2;
	const uint16_t start = hi_pri ? half : 0;
	const uint16_t end = start + half;
	uint16_t id;

	id = find_next_zero_bit(guc->doorbell_bitmap, end, start);
	if (id == end)
		id = GUC_INVALID_DOORBELL_ID;

	DRM_DEBUG_DRIVER("assigned %s priority doorbell id 0x%x\n",
			hi_pri ? "high" : "normal", id);

	return id;
}

177 178 179 180
/*
 * Select, assign and relase doorbell cachelines
 *
 * These functions track which doorbell cachelines are in use.
181
 * The data they manipulate is protected by the intel_guc_send lock.
182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203
 */

static uint32_t select_doorbell_cacheline(struct intel_guc *guc)
{
	const uint32_t cacheline_size = cache_line_size();
	uint32_t offset;

	/* Doorbell uses a single cache line within a page */
	offset = offset_in_page(guc->db_cacheline);

	/* Moving to next cache line to reduce contention */
	guc->db_cacheline += cacheline_size;

	DRM_DEBUG_DRIVER("selected doorbell cacheline 0x%x, next 0x%x, linesize %u\n",
			offset, guc->db_cacheline, cacheline_size);

	return offset;
}

/*
 * Initialise the process descriptor shared with the GuC firmware.
 */
204
static void guc_proc_desc_init(struct intel_guc *guc,
205 206 207 208
			       struct i915_guc_client *client)
{
	struct guc_process_desc *desc;

209
	desc = client->vaddr + client->proc_desc_offset;
210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235

	memset(desc, 0, sizeof(*desc));

	/*
	 * XXX: pDoorbell and WQVBaseAddress are pointers in process address
	 * space for ring3 clients (set them as in mmap_ioctl) or kernel
	 * space for kernel clients (map on demand instead? May make debug
	 * easier to have it mapped).
	 */
	desc->wq_base_addr = 0;
	desc->db_base_addr = 0;

	desc->context_id = client->ctx_index;
	desc->wq_size_bytes = client->wq_size;
	desc->wq_status = WQ_STATUS_ACTIVE;
	desc->priority = client->priority;
}

/*
 * Initialise/clear the context descriptor shared with the GuC firmware.
 *
 * This descriptor tells the GuC where (in GGTT space) to find the important
 * data structures relating to this client (doorbell, process descriptor,
 * write queue, etc).
 */

236
static void guc_ctx_desc_init(struct intel_guc *guc,
237 238
			      struct i915_guc_client *client)
{
239
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
240
	struct intel_engine_cs *engine;
241
	struct i915_gem_context *ctx = client->owner;
242 243
	struct guc_context_desc desc;
	struct sg_table *sg;
244
	unsigned int tmp;
245
	u32 gfx_addr;
246 247 248 249 250 251 252 253

	memset(&desc, 0, sizeof(desc));

	desc.attribute = GUC_CTX_DESC_ATTR_ACTIVE | GUC_CTX_DESC_ATTR_KERNEL;
	desc.context_id = client->ctx_index;
	desc.priority = client->priority;
	desc.db_id = client->doorbell_id;

254
	for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
255
		struct intel_context *ce = &ctx->engine[engine->id];
256 257
		uint32_t guc_engine_id = engine->guc_id;
		struct guc_execlist_context *lrc = &desc.lrc[guc_engine_id];
258 259 260 261 262 263 264 265

		/* TODO: We have a design issue to be solved here. Only when we
		 * receive the first batch, we know which engine is used by the
		 * user. But here GuC expects the lrc and ring to be pinned. It
		 * is not an issue for default context, which is the only one
		 * for now who owns a GuC client. But for future owner of GuC
		 * client, need to make sure lrc is pinned prior to enter here.
		 */
266
		if (!ce->state)
267 268
			break;	/* XXX: continue? */

269
		lrc->context_desc = lower_32_bits(ce->lrc_desc);
270 271

		/* The state page is after PPHWSP */
272
		lrc->ring_lcra =
273
			i915_ggtt_offset(ce->state) + LRC_STATE_PN * PAGE_SIZE;
274
		lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
275
				(guc_engine_id << GUC_ELC_ENGINE_OFFSET);
276

277
		lrc->ring_begin = i915_ggtt_offset(ce->ring->vma);
278 279
		lrc->ring_end = lrc->ring_begin + ce->ring->size - 1;
		lrc->ring_next_free_location = lrc->ring_begin;
280 281
		lrc->ring_current_tail_pointer_value = 0;

282
		desc.engines_used |= (1 << guc_engine_id);
283 284
	}

285 286
	DRM_DEBUG_DRIVER("Host engines 0x%x => GuC engines used 0x%x\n",
			client->engines, desc.engines_used);
287 288
	WARN_ON(desc.engines_used == 0);

289
	/*
290 291
	 * The doorbell, process descriptor, and workqueue are all parts
	 * of the client object, which the GuC will reference via the GGTT
292
	 */
293
	gfx_addr = i915_ggtt_offset(client->vma);
294
	desc.db_trigger_phy = sg_dma_address(client->vma->pages->sgl) +
295
				client->doorbell_offset;
296 297
	desc.db_trigger_cpu =
		(uintptr_t)client->vaddr + client->doorbell_offset;
298 299 300
	desc.db_trigger_uk = gfx_addr + client->doorbell_offset;
	desc.process_desc = gfx_addr + client->proc_desc_offset;
	desc.wq_addr = gfx_addr + client->wq_offset;
301 302 303
	desc.wq_size = client->wq_size;

	/*
304
	 * XXX: Take LRCs from an existing context if this is not an
305 306 307 308 309
	 * IsKMDCreatedContext client
	 */
	desc.desc_private = (uintptr_t)client;

	/* Pool context is pinned already */
310
	sg = guc->ctx_pool_vma->pages;
311 312 313 314
	sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
			     sizeof(desc) * client->ctx_index);
}

315
static void guc_ctx_desc_fini(struct intel_guc *guc,
316 317 318 319 320 321 322
			      struct i915_guc_client *client)
{
	struct guc_context_desc desc;
	struct sg_table *sg;

	memset(&desc, 0, sizeof(desc));

323
	sg = guc->ctx_pool_vma->pages;
324 325 326 327
	sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
			     sizeof(desc) * client->ctx_index);
}

328
/**
329
 * i915_guc_wq_reserve() - reserve space in the GuC's workqueue
330 331 332 333 334 335 336
 * @request:	request associated with the commands
 *
 * Return:	0 if space is available
 *		-EAGAIN if space is not currently available
 *
 * This function must be called (and must return 0) before a request
 * is submitted to the GuC via i915_guc_submit() below. Once a result
337 338
 * of 0 has been returned, it must be balanced by a corresponding
 * call to submit().
339
 *
340
 * Reservation allows the caller to determine in advance that space
341 342 343
 * will be available for the next submission before committing resources
 * to it, and helps avoid late failures with complicated recovery paths.
 */
344
int i915_guc_wq_reserve(struct drm_i915_gem_request *request)
345
{
346
	const size_t wqi_size = sizeof(struct guc_wq_item);
347
	struct i915_guc_client *gc = request->i915->guc.execbuf_client;
348
	struct guc_process_desc *desc = gc->vaddr + gc->proc_desc_offset;
349
	u32 freespace;
350
	int ret;
351

352
	spin_lock(&gc->wq_lock);
353
	freespace = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size);
354 355 356 357 358 359 360 361 362
	freespace -= gc->wq_rsvd;
	if (likely(freespace >= wqi_size)) {
		gc->wq_rsvd += wqi_size;
		ret = 0;
	} else {
		gc->no_wq_space++;
		ret = -EAGAIN;
	}
	spin_unlock(&gc->wq_lock);
363

364
	return ret;
365 366
}

367 368 369 370 371 372 373 374 375 376 377 378
void i915_guc_wq_unreserve(struct drm_i915_gem_request *request)
{
	const size_t wqi_size = sizeof(struct guc_wq_item);
	struct i915_guc_client *gc = request->i915->guc.execbuf_client;

	GEM_BUG_ON(READ_ONCE(gc->wq_rsvd) < wqi_size);

	spin_lock(&gc->wq_lock);
	gc->wq_rsvd -= wqi_size;
	spin_unlock(&gc->wq_lock);
}

379 380 381
/* Construct a Work Item and append it to the GuC's Work Queue */
static void guc_wq_item_append(struct i915_guc_client *gc,
			       struct drm_i915_gem_request *rq)
382
{
383 384 385
	/* wqi_len is in DWords, and does not include the one-word header */
	const size_t wqi_size = sizeof(struct guc_wq_item);
	const u32 wqi_len = wqi_size/sizeof(u32) - 1;
386
	struct intel_engine_cs *engine = rq->engine;
387
	struct guc_process_desc *desc;
388
	struct guc_wq_item *wqi;
389
	u32 freespace, tail, wq_off;
390

391
	desc = gc->vaddr + gc->proc_desc_offset;
392

393
	/* Free space is guaranteed, see i915_guc_wq_reserve() above */
394 395 396 397 398 399 400 401
	freespace = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size);
	GEM_BUG_ON(freespace < wqi_size);

	/* The GuC firmware wants the tail index in QWords, not bytes */
	tail = rq->tail;
	GEM_BUG_ON(tail & 7);
	tail >>= 3;
	GEM_BUG_ON(tail > WQ_RING_TAIL_MAX);
402 403 404 405 406 407 408 409

	/* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
	 * should not have the case where structure wqi is across page, neither
	 * wrapped to the beginning. This simplifies the implementation below.
	 *
	 * XXX: if not the case, we need save data to a temp wqi and copy it to
	 * workqueue buffer dw by dw.
	 */
410
	BUILD_BUG_ON(wqi_size != 16);
411
	GEM_BUG_ON(gc->wq_rsvd < wqi_size);
412

413 414
	/* postincrement WQ tail for next time */
	wq_off = gc->wq_tail;
415
	GEM_BUG_ON(wq_off & (wqi_size - 1));
416 417
	gc->wq_tail += wqi_size;
	gc->wq_tail &= gc->wq_size - 1;
418
	gc->wq_rsvd -= wqi_size;
419 420

	/* WQ starts from the page after doorbell / process_desc */
421
	wqi = gc->vaddr + wq_off + GUC_DB_SIZE;
422

423
	/* Now fill in the 4-word work queue item */
424
	wqi->header = WQ_TYPE_INORDER |
425
			(wqi_len << WQ_LEN_SHIFT) |
426
			(engine->guc_id << WQ_TARGET_SHIFT) |
427 428 429
			WQ_NO_WCFLUSH_WAIT;

	/* The GuC wants only the low-order word of the context descriptor */
430
	wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx, engine);
431 432

	wqi->ring_tail = tail << WQ_RING_TAIL_SHIFT;
433
	wqi->fence_id = rq->global_seqno;
434 435
}

436 437 438 439 440 441 442
static int guc_ring_doorbell(struct i915_guc_client *gc)
{
	struct guc_process_desc *desc;
	union guc_doorbell_qw db_cmp, db_exc, db_ret;
	union guc_doorbell_qw *db;
	int attempt = 2, ret = -EAGAIN;

443
	desc = gc->vaddr + gc->proc_desc_offset;
444 445 446 447 448 449

	/* Update the tail so it is visible to GuC */
	desc->tail = gc->wq_tail;

	/* current cookie */
	db_cmp.db_status = GUC_DOORBELL_ENABLED;
450
	db_cmp.cookie = gc->doorbell_cookie;
451 452 453

	/* cookie to be updated */
	db_exc.db_status = GUC_DOORBELL_ENABLED;
454
	db_exc.cookie = gc->doorbell_cookie + 1;
455 456 457 458
	if (db_exc.cookie == 0)
		db_exc.cookie = 1;

	/* pointer of current doorbell cacheline */
459
	db = gc->vaddr + gc->doorbell_offset;
460 461 462 463 464 465 466 467 468

	while (attempt--) {
		/* lets ring the doorbell */
		db_ret.value_qw = atomic64_cmpxchg((atomic64_t *)db,
			db_cmp.value_qw, db_exc.value_qw);

		/* if the exchange was successfully executed */
		if (db_ret.value_qw == db_cmp.value_qw) {
			/* db was successfully rung */
469
			gc->doorbell_cookie = db_exc.cookie;
470 471 472 473 474 475 476 477
			ret = 0;
			break;
		}

		/* XXX: doorbell was lost and need to acquire it again */
		if (db_ret.db_status == GUC_DOORBELL_DISABLED)
			break;

478 479
		DRM_WARN("Cookie mismatch. Expected %d, found %d\n",
			 db_cmp.cookie, db_ret.cookie);
480 481 482 483 484 485 486 487 488 489 490

		/* update the cookie to newly read cookie from GuC */
		db_cmp.cookie = db_ret.cookie;
		db_exc.cookie = db_ret.cookie + 1;
		if (db_exc.cookie == 0)
			db_exc.cookie = 1;
	}

	return ret;
}

491
/**
492
 * __i915_guc_submit() - Submit commands through GuC
A
Alex Dai 已提交
493
 * @rq:		request associated with the commands
494
 *
495 496 497
 * The caller must have already called i915_guc_wq_reserve() above with
 * a result of 0 (success), guaranteeing that there is space in the work
 * queue for the new request, so enqueuing the item cannot fail.
498 499
 *
 * Bad Things Will Happen if the caller violates this protocol e.g. calls
500 501
 * submit() when _reserve() says there's no space, or calls _submit()
 * a different number of times from (successful) calls to _reserve().
502 503 504
 *
 * The only error here arises if the doorbell hardware isn't functioning
 * as expected, which really shouln't happen.
505
 */
506
static void __i915_guc_submit(struct drm_i915_gem_request *rq)
507
{
508
	struct drm_i915_private *dev_priv = rq->i915;
509 510
	struct intel_engine_cs *engine = rq->engine;
	unsigned int engine_id = engine->id;
511 512
	struct intel_guc *guc = &rq->i915->guc;
	struct i915_guc_client *client = guc->execbuf_client;
513
	int b_ret;
514

515
	spin_lock(&client->wq_lock);
516
	guc_wq_item_append(client, rq);
517 518 519 520 521

	/* WA to flush out the pending GMADR writes to ring buffer. */
	if (i915_vma_is_map_and_fenceable(rq->ring->vma))
		POSTING_READ_FW(GUC_STATUS);

522
	b_ret = guc_ring_doorbell(client);
523

524
	client->submissions[engine_id] += 1;
525 526
	client->retcode = b_ret;
	if (b_ret)
527
		client->b_fail += 1;
528

529
	guc->submissions[engine_id] += 1;
530
	guc->last_seqno[engine_id] = rq->global_seqno;
531
	spin_unlock(&client->wq_lock);
532 533
}

534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550
static void i915_guc_submit(struct drm_i915_gem_request *rq)
{
	struct intel_engine_cs *engine = rq->engine;

	/* We keep the previous context alive until we retire the following
	 * request. This ensures that any the context object is still pinned
	 * for any residual writes the HW makes into it on the context switch
	 * into the next object following the breadcrumb. Otherwise, we may
	 * retire the context too early.
	 */
	rq->previous_context = engine->last_context;
	engine->last_context = rq->ctx;

	i915_gem_request_submit(rq);
	__i915_guc_submit(rq);
}

551 552 553 554 555 556
/*
 * Everything below here is concerned with setup & teardown, and is
 * therefore not part of the somewhat time-critical batch-submission
 * path of i915_guc_submit() above.
 */

557
/**
558 559 560
 * guc_allocate_vma() - Allocate a GGTT VMA for GuC usage
 * @guc:	the guc
 * @size:	size of area to allocate (both virtual space and memory)
561
 *
562 563 564 565 566
 * This is a wrapper to create an object for use with the GuC. In order to
 * use it inside the GuC, an object needs to be pinned lifetime, so we allocate
 * both some backing storage and a range inside the Global GTT. We must pin
 * it in the GGTT somewhere other than than [0, GUC_WOPCM_TOP) because that
 * range is reserved inside GuC.
567
 *
568
 * Return:	A i915_vma if successful, otherwise an ERR_PTR.
569
 */
570
static struct i915_vma *guc_allocate_vma(struct intel_guc *guc, u32 size)
571
{
572
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
573
	struct drm_i915_gem_object *obj;
574 575
	struct i915_vma *vma;
	int ret;
576

577
	obj = i915_gem_object_create(&dev_priv->drm, size);
578
	if (IS_ERR(obj))
579
		return ERR_CAST(obj);
580

581 582 583
	vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL);
	if (IS_ERR(vma))
		goto err;
584

585 586 587 588 589
	ret = i915_vma_pin(vma, 0, PAGE_SIZE,
			   PIN_GLOBAL | PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
	if (ret) {
		vma = ERR_PTR(ret);
		goto err;
590 591 592 593 594
	}

	/* Invalidate GuC TLB to let GuC take the latest updates to GTT. */
	I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);

595 596 597 598 599
	return vma;

err:
	i915_gem_object_put(obj);
	return vma;
600 601
}

602 603 604
static void
guc_client_free(struct drm_i915_private *dev_priv,
		struct i915_guc_client *client)
605 606 607 608 609 610 611 612 613 614 615
{
	struct intel_guc *guc = &dev_priv->guc;

	if (!client)
		return;

	/*
	 * XXX: wait for any outstanding submissions before freeing memory.
	 * Be sure to drop any locks
	 */

616
	if (client->vaddr) {
617
		/*
618 619
		 * If we got as far as setting up a doorbell, make sure we
		 * shut it down before unmapping & deallocating the memory.
620
		 */
621
		guc_disable_doorbell(guc, client);
622

623
		i915_gem_object_unpin_map(client->vma->obj);
624 625
	}

626
	i915_vma_unpin_and_release(&client->vma);
627 628

	if (client->ctx_index != GUC_INVALID_CTX_ID) {
629
		guc_ctx_desc_fini(guc, client);
630 631 632 633 634 635
		ida_simple_remove(&guc->ctx_ids, client->ctx_index);
	}

	kfree(client);
}

636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654
/* Check that a doorbell register is in the expected state */
static bool guc_doorbell_check(struct intel_guc *guc, uint16_t db_id)
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
	i915_reg_t drbreg = GEN8_DRBREGL(db_id);
	uint32_t value = I915_READ(drbreg);
	bool enabled = (value & GUC_DOORBELL_ENABLED) != 0;
	bool expected = test_bit(db_id, guc->doorbell_bitmap);

	if (enabled == expected)
		return true;

	DRM_DEBUG_DRIVER("Doorbell %d (reg 0x%x) 0x%x, should be %s\n",
			 db_id, drbreg.reg, value,
			 expected ? "active" : "inactive");

	return false;
}

655
/*
656
 * Borrow the first client to set up & tear down each unused doorbell
657 658 659 660 661
 * in turn, to ensure that all doorbell h/w is (re)initialised.
 */
static void guc_init_doorbell_hw(struct intel_guc *guc)
{
	struct i915_guc_client *client = guc->execbuf_client;
662 663
	uint16_t db_id;
	int i, err;
664

665
	guc_disable_doorbell(guc, client);
666 667

	for (i = 0; i < GUC_MAX_DOORBELLS; ++i) {
668 669
		/* Skip if doorbell is OK */
		if (guc_doorbell_check(guc, i))
670 671
			continue;

672
		err = guc_update_doorbell_id(guc, client, i);
673 674 675
		if (err)
			DRM_DEBUG_DRIVER("Doorbell %d update failed, err %d\n",
					i, err);
676 677
	}

678 679 680
	db_id = select_doorbell_register(guc, client->priority);
	WARN_ON(db_id == GUC_INVALID_DOORBELL_ID);

681 682
	err = guc_update_doorbell_id(guc, client, db_id);
	if (err)
683 684
		DRM_WARN("Failed to restore doorbell to %d, err %d\n",
			 db_id, err);
685

686 687 688
	/* Read back & verify all doorbell registers */
	for (i = 0; i < GUC_MAX_DOORBELLS; ++i)
		(void)guc_doorbell_check(guc, i);
689 690
}

691 692
/**
 * guc_client_alloc() - Allocate an i915_guc_client
693
 * @dev_priv:	driver private data structure
694
 * @engines:	The set of engines to enable for this client
695 696 697 698
 * @priority:	four levels priority _CRITICAL, _HIGH, _NORMAL and _LOW
 * 		The kernel client to replace ExecList submission is created with
 * 		NORMAL priority. Priority of a client for scheduler can be HIGH,
 * 		while a preemption context can use CRITICAL.
A
Alex Dai 已提交
699 700
 * @ctx:	the context that owns the client (we use the default render
 * 		context)
701
 *
702
 * Return:	An i915_guc_client object if success, else NULL.
703
 */
704 705
static struct i915_guc_client *
guc_client_alloc(struct drm_i915_private *dev_priv,
706
		 uint32_t engines,
707 708
		 uint32_t priority,
		 struct i915_gem_context *ctx)
709 710 711
{
	struct i915_guc_client *client;
	struct intel_guc *guc = &dev_priv->guc;
712
	struct i915_vma *vma;
713
	void *vaddr;
714
	uint16_t db_id;
715 716 717 718 719

	client = kzalloc(sizeof(*client), GFP_KERNEL);
	if (!client)
		return NULL;

720
	client->owner = ctx;
721
	client->guc = guc;
722 723 724
	client->engines = engines;
	client->priority = priority;
	client->doorbell_id = GUC_INVALID_DOORBELL_ID;
725 726 727 728 729 730 731 732 733

	client->ctx_index = (uint32_t)ida_simple_get(&guc->ctx_ids, 0,
			GUC_MAX_GPU_CONTEXTS, GFP_KERNEL);
	if (client->ctx_index >= GUC_MAX_GPU_CONTEXTS) {
		client->ctx_index = GUC_INVALID_CTX_ID;
		goto err;
	}

	/* The first page is doorbell/proc_desc. Two followed pages are wq. */
734 735
	vma = guc_allocate_vma(guc, GUC_DB_SIZE + GUC_WQ_SIZE);
	if (IS_ERR(vma))
736 737
		goto err;

738
	/* We'll keep just the first (doorbell/proc) page permanently kmap'd. */
739
	client->vma = vma;
740 741 742 743 744 745

	vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
	if (IS_ERR(vaddr))
		goto err;

	client->vaddr = vaddr;
746 747

	spin_lock_init(&client->wq_lock);
748 749 750
	client->wq_offset = GUC_DB_SIZE;
	client->wq_size = GUC_WQ_SIZE;

751 752 753 754 755
	db_id = select_doorbell_register(guc, client->priority);
	if (db_id == GUC_INVALID_DOORBELL_ID)
		/* XXX: evict a doorbell instead? */
		goto err;

756 757 758 759 760 761 762 763 764 765 766 767
	client->doorbell_offset = select_doorbell_cacheline(guc);

	/*
	 * Since the doorbell only requires a single cacheline, we can save
	 * space by putting the application process descriptor in the same
	 * page. Use the half of the page that doesn't include the doorbell.
	 */
	if (client->doorbell_offset >= (GUC_DB_SIZE / 2))
		client->proc_desc_offset = 0;
	else
		client->proc_desc_offset = (GUC_DB_SIZE / 2);

768 769
	guc_proc_desc_init(guc, client);
	guc_ctx_desc_init(guc, client);
770 771 772 773 774 775 776

	/* For runtime client allocation we need to enable the doorbell. Not
	 * required yet for the static execbuf_client as this special kernel
	 * client is enabled from i915_guc_submission_enable().
	 *
	 * guc_update_doorbell_id(guc, client, db_id);
	 */
777

778 779
	DRM_DEBUG_DRIVER("new priority %u client %p for engine(s) 0x%x: ctx_index %u\n",
		priority, client, client->engines, client->ctx_index);
780 781
	DRM_DEBUG_DRIVER("doorbell id %u, cacheline offset 0x%x\n",
		client->doorbell_id, client->doorbell_offset);
782 783 784 785

	return client;

err:
786
	guc_client_free(dev_priv, client);
787 788 789
	return NULL;
}

790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827
/*
 * Sub buffer switch callback. Called whenever relay has to switch to a new
 * sub buffer, relay stays on the same sub buffer if 0 is returned.
 */
static int subbuf_start_callback(struct rchan_buf *buf,
				 void *subbuf,
				 void *prev_subbuf,
				 size_t prev_padding)
{
	/* Use no-overwrite mode by default, where relay will stop accepting
	 * new data if there are no empty sub buffers left.
	 * There is no strict synchronization enforced by relay between Consumer
	 * and Producer. In overwrite mode, there is a possibility of getting
	 * inconsistent/garbled data, the producer could be writing on to the
	 * same sub buffer from which Consumer is reading. This can't be avoided
	 * unless Consumer is fast enough and can always run in tandem with
	 * Producer.
	 */
	if (relay_buf_full(buf))
		return 0;

	return 1;
}

/*
 * file_create() callback. Creates relay file in debugfs.
 */
static struct dentry *create_buf_file_callback(const char *filename,
					       struct dentry *parent,
					       umode_t mode,
					       struct rchan_buf *buf,
					       int *is_global)
{
	struct dentry *buf_file;

	/* This to enable the use of a single buffer for the relay channel and
	 * correspondingly have a single file exposed to User, through which
	 * it can collect the logs in order without any post-processing.
828
	 * Need to set 'is_global' even if parent is NULL for early logging.
829 830 831
	 */
	*is_global = 1;

832 833 834
	if (!parent)
		return NULL;

835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866
	/* Not using the channel filename passed as an argument, since for each
	 * channel relay appends the corresponding CPU number to the filename
	 * passed in relay_open(). This should be fine as relay just needs a
	 * dentry of the file associated with the channel buffer and that file's
	 * name need not be same as the filename passed as an argument.
	 */
	buf_file = debugfs_create_file("guc_log", mode,
				       parent, buf, &relay_file_operations);
	return buf_file;
}

/*
 * file_remove() default callback. Removes relay file in debugfs.
 */
static int remove_buf_file_callback(struct dentry *dentry)
{
	debugfs_remove(dentry);
	return 0;
}

/* relay channel callbacks */
static struct rchan_callbacks relay_callbacks = {
	.subbuf_start = subbuf_start_callback,
	.create_buf_file = create_buf_file_callback,
	.remove_buf_file = remove_buf_file_callback,
};

static void guc_log_remove_relay_file(struct intel_guc *guc)
{
	relay_close(guc->log.relay_chan);
}

867
static int guc_log_create_relay_channel(struct intel_guc *guc)
868 869 870 871 872
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
	struct rchan *guc_log_relay_chan;
	size_t n_subbufs, subbuf_size;

873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900
	/* Keep the size of sub buffers same as shared log buffer */
	subbuf_size = guc->log.vma->obj->base.size;

	/* Store up to 8 snapshots, which is large enough to buffer sufficient
	 * boot time logs and provides enough leeway to User, in terms of
	 * latency, for consuming the logs from relay. Also doesn't take
	 * up too much memory.
	 */
	n_subbufs = 8;

	guc_log_relay_chan = relay_open(NULL, NULL, subbuf_size,
					n_subbufs, &relay_callbacks, dev_priv);
	if (!guc_log_relay_chan) {
		DRM_ERROR("Couldn't create relay chan for GuC logging\n");
		return -ENOMEM;
	}

	GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size);
	guc->log.relay_chan = guc_log_relay_chan;
	return 0;
}

static int guc_log_create_relay_file(struct intel_guc *guc)
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
	struct dentry *log_dir;
	int ret;

901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919
	/* For now create the log file in /sys/kernel/debug/dri/0 dir */
	log_dir = dev_priv->drm.primary->debugfs_root;

	/* If /sys/kernel/debug/dri/0 location do not exist, then debugfs is
	 * not mounted and so can't create the relay file.
	 * The relay API seems to fit well with debugfs only, for availing relay
	 * there are 3 requirements which can be met for debugfs file only in a
	 * straightforward/clean manner :-
	 * i)   Need the associated dentry pointer of the file, while opening the
	 *      relay channel.
	 * ii)  Should be able to use 'relay_file_operations' fops for the file.
	 * iii) Set the 'i_private' field of file's inode to the pointer of
	 *	relay channel buffer.
	 */
	if (!log_dir) {
		DRM_ERROR("Debugfs dir not available yet for GuC log file\n");
		return -ENODEV;
	}

920 921 922 923
	ret = relay_late_setup_files(guc->log.relay_chan, "guc_log", log_dir);
	if (ret) {
		DRM_ERROR("Couldn't associate relay chan with file %d\n", ret);
		return ret;
924 925 926 927 928
	}

	return 0;
}

929 930
static void guc_move_to_next_buf(struct intel_guc *guc)
{
931 932 933 934 935 936 937 938 939 940
	/* Make sure the updates made in the sub buffer are visible when
	 * Consumer sees the following update to offset inside the sub buffer.
	 */
	smp_wmb();

	/* All data has been written, so now move the offset of sub buffer. */
	relay_reserve(guc->log.relay_chan, guc->log.vma->obj->base.size);

	/* Switch to the next sub buffer */
	relay_flush(guc->log.relay_chan);
941 942 943 944
}

static void *guc_get_write_buffer(struct intel_guc *guc)
{
945 946 947 948 949 950 951 952 953 954 955 956
	if (!guc->log.relay_chan)
		return NULL;

	/* Just get the base address of a new sub buffer and copy data into it
	 * ourselves. NULL will be returned in no-overwrite mode, if all sub
	 * buffers are full. Could have used the relay_write() to indirectly
	 * copy the data, but that would have been bit convoluted, as we need to
	 * write to only certain locations inside a sub buffer which cannot be
	 * done without using relay_reserve() along with relay_write(). So its
	 * better to use relay_reserve() alone.
	 */
	return relay_reserve(guc->log.relay_chan, 0);
957 958
}

959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981
static bool
guc_check_log_buf_overflow(struct intel_guc *guc,
			   enum guc_log_buffer_type type, unsigned int full_cnt)
{
	unsigned int prev_full_cnt = guc->log.prev_overflow_count[type];
	bool overflow = false;

	if (full_cnt != prev_full_cnt) {
		overflow = true;

		guc->log.prev_overflow_count[type] = full_cnt;
		guc->log.total_overflow_count[type] += full_cnt - prev_full_cnt;

		if (full_cnt < prev_full_cnt) {
			/* buffer_full_cnt is a 4 bit counter */
			guc->log.total_overflow_count[type] += 16;
		}
		DRM_ERROR_RATELIMITED("GuC log buffer overflow\n");
	}

	return overflow;
}

982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999
static unsigned int guc_get_log_buffer_size(enum guc_log_buffer_type type)
{
	switch (type) {
	case GUC_ISR_LOG_BUFFER:
		return (GUC_LOG_ISR_PAGES + 1) * PAGE_SIZE;
	case GUC_DPC_LOG_BUFFER:
		return (GUC_LOG_DPC_PAGES + 1) * PAGE_SIZE;
	case GUC_CRASH_DUMP_LOG_BUFFER:
		return (GUC_LOG_CRASH_PAGES + 1) * PAGE_SIZE;
	default:
		MISSING_CASE(type);
	}

	return 0;
}

static void guc_read_update_log_buffer(struct intel_guc *guc)
{
1000
	unsigned int buffer_size, read_offset, write_offset, bytes_to_copy, full_cnt;
1001 1002 1003 1004
	struct guc_log_buffer_state *log_buf_state, *log_buf_snapshot_state;
	struct guc_log_buffer_state log_buf_state_local;
	enum guc_log_buffer_type type;
	void *src_data, *dst_data;
1005
	bool new_overflow;
1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027

	if (WARN_ON(!guc->log.buf_addr))
		return;

	/* Get the pointer to shared GuC log buffer */
	log_buf_state = src_data = guc->log.buf_addr;

	/* Get the pointer to local buffer to store the logs */
	log_buf_snapshot_state = dst_data = guc_get_write_buffer(guc);

	/* Actual logs are present from the 2nd page */
	src_data += PAGE_SIZE;
	dst_data += PAGE_SIZE;

	for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
		/* Make a copy of the state structure, inside GuC log buffer
		 * (which is uncached mapped), on the stack to avoid reading
		 * from it multiple times.
		 */
		memcpy(&log_buf_state_local, log_buf_state,
		       sizeof(struct guc_log_buffer_state));
		buffer_size = guc_get_log_buffer_size(type);
1028
		read_offset = log_buf_state_local.read_ptr;
1029
		write_offset = log_buf_state_local.sampled_write_ptr;
1030 1031 1032 1033
		full_cnt = log_buf_state_local.buffer_full_cnt;

		/* Bookkeeping stuff */
		guc->log.flush_count[type] += log_buf_state_local.flush_to_file;
1034
		new_overflow = guc_check_log_buf_overflow(guc, type, full_cnt);
1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056

		/* Update the state of shared log buffer */
		log_buf_state->read_ptr = write_offset;
		log_buf_state->flush_to_file = 0;
		log_buf_state++;

		if (unlikely(!log_buf_snapshot_state))
			continue;

		/* First copy the state structure in snapshot buffer */
		memcpy(log_buf_snapshot_state, &log_buf_state_local,
		       sizeof(struct guc_log_buffer_state));

		/* The write pointer could have been updated by GuC firmware,
		 * after sending the flush interrupt to Host, for consistency
		 * set write pointer value to same value of sampled_write_ptr
		 * in the snapshot buffer.
		 */
		log_buf_snapshot_state->write_ptr = write_offset;
		log_buf_snapshot_state++;

		/* Now copy the actual logs. */
1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070
		if (unlikely(new_overflow)) {
			/* copy the whole buffer in case of overflow */
			read_offset = 0;
			write_offset = buffer_size;
		} else if (unlikely((read_offset > buffer_size) ||
				    (write_offset > buffer_size))) {
			DRM_ERROR("invalid log buffer state\n");
			/* copy whole buffer as offsets are unreliable */
			read_offset = 0;
			write_offset = buffer_size;
		}

		/* Just copy the newly written data */
		if (read_offset > write_offset) {
1071
			i915_memcpy_from_wc(dst_data, src_data, write_offset);
1072 1073 1074 1075
			bytes_to_copy = buffer_size - read_offset;
		} else {
			bytes_to_copy = write_offset - read_offset;
		}
1076 1077
		i915_memcpy_from_wc(dst_data + read_offset,
				    src_data + read_offset, bytes_to_copy);
1078 1079 1080 1081 1082 1083 1084

		src_data += buffer_size;
		dst_data += buffer_size;
	}

	if (log_buf_snapshot_state)
		guc_move_to_next_buf(guc);
1085 1086 1087 1088 1089
	else {
		/* Used rate limited to avoid deluge of messages, logs might be
		 * getting consumed by User at a slow rate.
		 */
		DRM_ERROR_RATELIMITED("no sub-buffer to capture logs\n");
1090
		guc->log.capture_miss_count++;
1091
	}
1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115
}

static void guc_capture_logs_work(struct work_struct *work)
{
	struct drm_i915_private *dev_priv =
		container_of(work, struct drm_i915_private, guc.log.flush_work);

	i915_guc_capture_logs(dev_priv);
}

static void guc_log_cleanup(struct intel_guc *guc)
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);

	lockdep_assert_held(&dev_priv->drm.struct_mutex);

	/* First disable the flush interrupt */
	gen9_disable_guc_interrupts(dev_priv);

	if (guc->log.flush_wq)
		destroy_workqueue(guc->log.flush_wq);

	guc->log.flush_wq = NULL;

1116 1117 1118 1119 1120
	if (guc->log.relay_chan)
		guc_log_remove_relay_file(guc);

	guc->log.relay_chan = NULL;

1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139
	if (guc->log.buf_addr)
		i915_gem_object_unpin_map(guc->log.vma->obj);

	guc->log.buf_addr = NULL;
}

static int guc_log_create_extras(struct intel_guc *guc)
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
	void *vaddr;
	int ret;

	lockdep_assert_held(&dev_priv->drm.struct_mutex);

	/* Nothing to do */
	if (i915.guc_log_level < 0)
		return 0;

	if (!guc->log.buf_addr) {
1140 1141 1142 1143 1144
		/* Create a WC (Uncached for read) vmalloc mapping of log
		 * buffer pages, so that we can directly get the data
		 * (up-to-date) from memory.
		 */
		vaddr = i915_gem_object_pin_map(guc->log.vma->obj, I915_MAP_WC);
1145 1146 1147 1148 1149 1150 1151 1152 1153
		if (IS_ERR(vaddr)) {
			ret = PTR_ERR(vaddr);
			DRM_ERROR("Couldn't map log buffer pages %d\n", ret);
			return ret;
		}

		guc->log.buf_addr = vaddr;
	}

1154 1155 1156 1157 1158 1159 1160 1161 1162 1163
	if (!guc->log.relay_chan) {
		/* Create a relay channel, so that we have buffers for storing
		 * the GuC firmware logs, the channel will be linked with a file
		 * later on when debugfs is registered.
		 */
		ret = guc_log_create_relay_channel(guc);
		if (ret)
			return ret;
	}

1164 1165 1166
	if (!guc->log.flush_wq) {
		INIT_WORK(&guc->log.flush_work, guc_capture_logs_work);

1167 1168 1169 1170 1171 1172 1173 1174 1175 1176
		 /*
		 * GuC log buffer flush work item has to do register access to
		 * send the ack to GuC and this work item, if not synced before
		 * suspend, can potentially get executed after the GFX device is
		 * suspended.
		 * By marking the WQ as freezable, we don't have to bother about
		 * flushing of this work item from the suspend hooks, the pending
		 * work item if any will be either executed before the suspend
		 * or scheduled later on resume. This way the handling of work
		 * item can be kept same between system suspend & rpm suspend.
1177
		 */
1178 1179
		guc->log.flush_wq = alloc_ordered_workqueue("i915-guc_log",
							    WQ_HIGHPRI | WQ_FREEZABLE);
1180 1181 1182 1183 1184 1185 1186 1187 1188
		if (guc->log.flush_wq == NULL) {
			DRM_ERROR("Couldn't allocate the wq for GuC logging\n");
			return -ENOMEM;
		}
	}

	return 0;
}

1189
static void guc_log_create(struct intel_guc *guc)
A
Alex Dai 已提交
1190
{
1191
	struct i915_vma *vma;
A
Alex Dai 已提交
1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203
	unsigned long offset;
	uint32_t size, flags;

	if (i915.guc_log_level > GUC_LOG_VERBOSITY_MAX)
		i915.guc_log_level = GUC_LOG_VERBOSITY_MAX;

	/* The first page is to save log buffer state. Allocate one
	 * extra page for others in case for overlap */
	size = (1 + GUC_LOG_DPC_PAGES + 1 +
		GUC_LOG_ISR_PAGES + 1 +
		GUC_LOG_CRASH_PAGES + 1) << PAGE_SHIFT;

1204
	vma = guc->log.vma;
1205
	if (!vma) {
1206 1207 1208 1209 1210 1211 1212 1213 1214 1215
		/* We require SSE 4.1 for fast reads from the GuC log buffer and
		 * it should be present on the chipsets supporting GuC based
		 * submisssions.
		 */
		if (WARN_ON(!i915_memcpy_from_wc(NULL, NULL, 0))) {
			/* logging will not be enabled */
			i915.guc_log_level = -1;
			return;
		}

1216 1217
		vma = guc_allocate_vma(guc, size);
		if (IS_ERR(vma)) {
A
Alex Dai 已提交
1218 1219 1220 1221 1222
			/* logging will be off */
			i915.guc_log_level = -1;
			return;
		}

1223
		guc->log.vma = vma;
1224 1225 1226 1227 1228 1229 1230

		if (guc_log_create_extras(guc)) {
			guc_log_cleanup(guc);
			i915_vma_unpin_and_release(&guc->log.vma);
			i915.guc_log_level = -1;
			return;
		}
A
Alex Dai 已提交
1231 1232 1233 1234 1235 1236 1237 1238
	}

	/* each allocated unit is a page */
	flags = GUC_LOG_VALID | GUC_LOG_NOTIFY_ON_HALF_FULL |
		(GUC_LOG_DPC_PAGES << GUC_LOG_DPC_SHIFT) |
		(GUC_LOG_ISR_PAGES << GUC_LOG_ISR_SHIFT) |
		(GUC_LOG_CRASH_PAGES << GUC_LOG_CRASH_SHIFT);

1239
	offset = i915_ggtt_offset(vma) >> PAGE_SHIFT; /* in pages */
1240
	guc->log.flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
A
Alex Dai 已提交
1241 1242
}

1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272
static int guc_log_late_setup(struct intel_guc *guc)
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
	int ret;

	lockdep_assert_held(&dev_priv->drm.struct_mutex);

	if (i915.guc_log_level < 0)
		return -EINVAL;

	/* If log_level was set as -1 at boot time, then setup needed to
	 * handle log buffer flush interrupts would not have been done yet,
	 * so do that now.
	 */
	ret = guc_log_create_extras(guc);
	if (ret)
		goto err;

	ret = guc_log_create_relay_file(guc);
	if (ret)
		goto err;

	return 0;
err:
	guc_log_cleanup(guc);
	/* logging will remain off */
	i915.guc_log_level = -1;
	return ret;
}

1273
static void guc_policies_init(struct guc_policies *policies)
1274 1275 1276 1277 1278 1279 1280 1281
{
	struct guc_policy *policy;
	u32 p, i;

	policies->dpc_promote_time = 500000;
	policies->max_num_work_items = POLICY_MAX_NUM_WI;

	for (p = 0; p < GUC_CTX_PRIORITY_NUM; p++) {
1282
		for (i = GUC_RENDER_ENGINE; i < GUC_MAX_ENGINES_NUM; i++) {
1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294
			policy = &policies->policy[p][i];

			policy->execution_quantum = 1000000;
			policy->preemption_time = 500000;
			policy->fault_time = 250000;
			policy->policy_flags = 0;
		}
	}

	policies->is_valid = 1;
}

1295
static void guc_addon_create(struct intel_guc *guc)
1296 1297
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
1298
	struct i915_vma *vma;
1299
	struct guc_ads *ads;
1300
	struct guc_policies *policies;
1301
	struct guc_mmio_reg_state *reg_state;
1302
	struct intel_engine_cs *engine;
1303
	enum intel_engine_id id;
1304
	struct page *page;
1305
	u32 size;
1306 1307

	/* The ads obj includes the struct itself and buffers passed to GuC */
1308 1309 1310
	size = sizeof(struct guc_ads) + sizeof(struct guc_policies) +
			sizeof(struct guc_mmio_reg_state) +
			GUC_S3_SAVE_SPACE_PAGES * PAGE_SIZE;
1311

1312 1313 1314 1315
	vma = guc->ads_vma;
	if (!vma) {
		vma = guc_allocate_vma(guc, PAGE_ALIGN(size));
		if (IS_ERR(vma))
1316 1317
			return;

1318
		guc->ads_vma = vma;
1319 1320
	}

1321
	page = i915_vma_first_page(vma);
1322 1323 1324 1325 1326 1327 1328 1329 1330
	ads = kmap(page);

	/*
	 * The GuC requires a "Golden Context" when it reinitialises
	 * engines after a reset. Here we use the Render ring default
	 * context, which must already exist and be pinned in the GGTT,
	 * so its address won't change after we've told the GuC where
	 * to find it.
	 */
1331
	engine = dev_priv->engine[RCS];
1332
	ads->golden_context_lrca = engine->status_page.ggtt_offset;
1333

1334
	for_each_engine(engine, dev_priv, id)
1335
		ads->eng_state_size[engine->guc_id] = intel_lr_context_size(engine);
1336

1337 1338
	/* GuC scheduling policies */
	policies = (void *)ads + sizeof(struct guc_ads);
1339
	guc_policies_init(policies);
1340

1341 1342
	ads->scheduler_policies =
		i915_ggtt_offset(vma) + sizeof(struct guc_ads);
1343

1344 1345 1346
	/* MMIO reg state */
	reg_state = (void *)policies + sizeof(struct guc_policies);

1347
	for_each_engine(engine, dev_priv, id) {
1348 1349
		reg_state->mmio_white_list[engine->guc_id].mmio_start =
			engine->mmio_base + GUC_MMIO_WHITE_LIST_START;
1350 1351

		/* Nothing to be saved or restored for now. */
1352
		reg_state->mmio_white_list[engine->guc_id].count = 0;
1353 1354 1355 1356 1357 1358 1359 1360
	}

	ads->reg_state_addr = ads->scheduler_policies +
			sizeof(struct guc_policies);

	ads->reg_state_buffer = ads->reg_state_addr +
			sizeof(struct guc_mmio_reg_state);

1361 1362 1363
	kunmap(page);
}

1364 1365 1366 1367
/*
 * Set up the memory resources to be shared with the GuC.  At this point,
 * we require just one object that can be mapped through the GGTT.
 */
1368
int i915_guc_submission_init(struct drm_i915_private *dev_priv)
1369
{
1370 1371 1372
	const size_t ctxsize = sizeof(struct guc_context_desc);
	const size_t poolsize = GUC_MAX_GPU_CONTEXTS * ctxsize;
	const size_t gemsize = round_up(poolsize, PAGE_SIZE);
1373
	struct intel_guc *guc = &dev_priv->guc;
1374
	struct i915_vma *vma;
1375

1376 1377 1378
	if (!HAS_GUC_SCHED(dev_priv))
		return 0;

1379 1380
	/* Wipe bitmap & delete client in case of reinitialisation */
	bitmap_clear(guc->doorbell_bitmap, 0, GUC_MAX_DOORBELLS);
1381
	i915_guc_submission_disable(dev_priv);
1382

1383 1384 1385
	if (!i915.enable_guc_submission)
		return 0; /* not enabled  */

1386
	if (guc->ctx_pool_vma)
1387 1388
		return 0; /* already allocated */

1389
	vma = guc_allocate_vma(guc, gemsize);
1390 1391
	if (IS_ERR(vma))
		return PTR_ERR(vma);
1392

1393
	guc->ctx_pool_vma = vma;
1394
	ida_init(&guc->ctx_ids);
1395 1396
	guc_log_create(guc);
	guc_addon_create(guc);
1397

1398 1399 1400 1401 1402 1403 1404 1405 1406
	guc->execbuf_client = guc_client_alloc(dev_priv,
					       INTEL_INFO(dev_priv)->ring_mask,
					       GUC_CTX_PRIORITY_KMD_NORMAL,
					       dev_priv->kernel_context);
	if (!guc->execbuf_client) {
		DRM_ERROR("Failed to create GuC client for execbuf!\n");
		goto err;
	}

1407
	return 0;
1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421

err:
	i915_guc_submission_fini(dev_priv);
	return -ENOMEM;
}

static void guc_reset_wq(struct i915_guc_client *gc)
{
	struct guc_process_desc *desc = gc->vaddr + gc->proc_desc_offset;

	desc->head = 0;
	desc->tail = 0;

	gc->wq_tail = 0;
1422 1423
}

1424
int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
1425 1426
{
	struct intel_guc *guc = &dev_priv->guc;
1427
	struct i915_guc_client *client = guc->execbuf_client;
1428
	struct intel_engine_cs *engine;
1429
	enum intel_engine_id id;
1430

1431 1432
	if (!client)
		return -ENODEV;
1433

1434
	intel_guc_sample_forcewake(guc);
1435 1436

	guc_reset_wq(client);
1437
	guc_init_doorbell_hw(guc);
A
Alex Dai 已提交
1438

1439
	/* Take over from manual control of ELSP (execlists) */
1440
	for_each_engine(engine, dev_priv, id) {
1441 1442
		struct drm_i915_gem_request *rq;

1443
		engine->submit_request = i915_guc_submit;
1444
		engine->schedule = NULL;
1445

1446
		/* Replay the current set of previously submitted requests */
1447
		list_for_each_entry(rq, &engine->timeline->requests, link) {
1448
			client->wq_rsvd += sizeof(struct guc_wq_item);
1449
			__i915_guc_submit(rq);
1450
		}
1451 1452
	}

1453 1454 1455
	return 0;
}

1456
void i915_guc_submission_disable(struct drm_i915_private *dev_priv)
1457 1458 1459
{
	struct intel_guc *guc = &dev_priv->guc;

1460 1461 1462 1463 1464
	if (!guc->execbuf_client)
		return;

	/* Revert back to manual ELSP submission */
	intel_execlists_enable_submission(dev_priv);
1465 1466
}

1467
void i915_guc_submission_fini(struct drm_i915_private *dev_priv)
1468 1469
{
	struct intel_guc *guc = &dev_priv->guc;
1470 1471 1472 1473 1474 1475 1476
	struct i915_guc_client *client;

	client = fetch_and_zero(&guc->execbuf_client);
	if (!client)
		return;

	guc_client_free(dev_priv, client);
1477

1478
	i915_vma_unpin_and_release(&guc->ads_vma);
1479
	i915_vma_unpin_and_release(&guc->log.vma);
A
Alex Dai 已提交
1480

1481
	if (guc->ctx_pool_vma)
1482
		ida_destroy(&guc->ctx_ids);
1483
	i915_vma_unpin_and_release(&guc->ctx_pool_vma);
1484
}
1485 1486 1487 1488 1489 1490 1491

/**
 * intel_guc_suspend() - notify GuC entering suspend state
 * @dev:	drm device
 */
int intel_guc_suspend(struct drm_device *dev)
{
1492
	struct drm_i915_private *dev_priv = to_i915(dev);
1493
	struct intel_guc *guc = &dev_priv->guc;
1494
	struct i915_gem_context *ctx;
1495 1496
	u32 data[3];

1497
	if (guc->guc_fw.guc_fw_load_status != GUC_FIRMWARE_SUCCESS)
1498 1499
		return 0;

1500 1501
	gen9_disable_guc_interrupts(dev_priv);

1502
	ctx = dev_priv->kernel_context;
1503

1504
	data[0] = INTEL_GUC_ACTION_ENTER_S_STATE;
1505 1506 1507
	/* any value greater than GUC_POWER_D0 */
	data[1] = GUC_POWER_D1;
	/* first page is shared data with GuC */
1508
	data[2] = i915_ggtt_offset(ctx->engine[RCS].state);
1509

1510
	return intel_guc_send(guc, data, ARRAY_SIZE(data));
1511 1512 1513 1514 1515 1516 1517 1518 1519
}


/**
 * intel_guc_resume() - notify GuC resuming from suspend state
 * @dev:	drm device
 */
int intel_guc_resume(struct drm_device *dev)
{
1520
	struct drm_i915_private *dev_priv = to_i915(dev);
1521
	struct intel_guc *guc = &dev_priv->guc;
1522
	struct i915_gem_context *ctx;
1523 1524
	u32 data[3];

1525
	if (guc->guc_fw.guc_fw_load_status != GUC_FIRMWARE_SUCCESS)
1526 1527
		return 0;

1528 1529 1530
	if (i915.guc_log_level >= 0)
		gen9_enable_guc_interrupts(dev_priv);

1531
	ctx = dev_priv->kernel_context;
1532

1533
	data[0] = INTEL_GUC_ACTION_EXIT_S_STATE;
1534 1535
	data[1] = GUC_POWER_D0;
	/* first page is shared data with GuC */
1536
	data[2] = i915_ggtt_offset(ctx->engine[RCS].state);
1537

1538
	return intel_guc_send(guc, data, ARRAY_SIZE(data));
1539
}
1540 1541 1542 1543 1544 1545 1546 1547 1548

void i915_guc_capture_logs(struct drm_i915_private *dev_priv)
{
	guc_read_update_log_buffer(&dev_priv->guc);

	/* Generally device is expected to be active only at this
	 * time, so get/put should be really quick.
	 */
	intel_runtime_pm_get(dev_priv);
1549
	intel_guc_log_flush_complete(&dev_priv->guc);
1550 1551
	intel_runtime_pm_put(dev_priv);
}
1552

1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566
void i915_guc_flush_logs(struct drm_i915_private *dev_priv)
{
	if (!i915.enable_guc_submission || (i915.guc_log_level < 0))
		return;

	/* First disable the interrupts, will be renabled afterwards */
	gen9_disable_guc_interrupts(dev_priv);

	/* Before initiating the forceful flush, wait for any pending/ongoing
	 * flush to complete otherwise forceful flush may not actually happen.
	 */
	flush_work(&dev_priv->guc.log.flush_work);

	/* Ask GuC to update the log buffer state */
1567
	intel_guc_log_flush(&dev_priv->guc);
1568 1569 1570 1571 1572

	/* GuC would have updated log buffer by now, so capture it */
	i915_guc_capture_logs(dev_priv);
}

1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591
void i915_guc_unregister(struct drm_i915_private *dev_priv)
{
	if (!i915.enable_guc_submission)
		return;

	mutex_lock(&dev_priv->drm.struct_mutex);
	guc_log_cleanup(&dev_priv->guc);
	mutex_unlock(&dev_priv->drm.struct_mutex);
}

void i915_guc_register(struct drm_i915_private *dev_priv)
{
	if (!i915.enable_guc_submission)
		return;

	mutex_lock(&dev_priv->drm.struct_mutex);
	guc_log_late_setup(&dev_priv->guc);
	mutex_unlock(&dev_priv->drm.struct_mutex);
}
1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607

int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val)
{
	union guc_log_control log_param;
	int ret;

	log_param.value = control_val;

	if (log_param.verbosity < GUC_LOG_VERBOSITY_MIN ||
	    log_param.verbosity > GUC_LOG_VERBOSITY_MAX)
		return -EINVAL;

	/* This combination doesn't make sense & won't have any effect */
	if (!log_param.logging_enabled && (i915.guc_log_level < 0))
		return 0;

1608
	ret = intel_guc_log_control(&dev_priv->guc, log_param.value);
1609
	if (ret < 0) {
1610
		DRM_DEBUG_DRIVER("guc_logging_control action failed %d\n", ret);
1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640
		return ret;
	}

	i915.guc_log_level = log_param.verbosity;

	/* If log_level was set as -1 at boot time, then the relay channel file
	 * wouldn't have been created by now and interrupts also would not have
	 * been enabled.
	 */
	if (!dev_priv->guc.log.relay_chan) {
		ret = guc_log_late_setup(&dev_priv->guc);
		if (!ret)
			gen9_enable_guc_interrupts(dev_priv);
	} else if (!log_param.logging_enabled) {
		/* Once logging is disabled, GuC won't generate logs & send an
		 * interrupt. But there could be some data in the log buffer
		 * which is yet to be captured. So request GuC to update the log
		 * buffer state and then collect the left over logs.
		 */
		i915_guc_flush_logs(dev_priv);

		/* As logging is disabled, update log level to reflect that */
		i915.guc_log_level = -1;
	} else {
		/* In case interrupts were disabled, enable them now */
		gen9_enable_guc_interrupts(dev_priv);
	}

	return ret;
}