intel_guc.c 19.0 KB
Newer Older
1
// SPDX-License-Identifier: MIT
2
/*
3
 * Copyright © 2014-2019 Intel Corporation
4 5
 */

6
#include "gt/intel_gt.h"
7 8
#include "gt/intel_gt_irq.h"
#include "gt/intel_gt_pm_irq.h"
9
#include "intel_guc.h"
10
#include "intel_guc_ads.h"
11
#include "intel_guc_submission.h"
12 13
#include "i915_drv.h"

14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34
/**
 * DOC: GuC
 *
 * The GuC is a microcontroller inside the GT HW, introduced in gen9. The GuC is
 * designed to offload some of the functionality usually performed by the host
 * driver; currently the main operations it can take care of are:
 *
 * - Authentication of the HuC, which is required to fully enable HuC usage.
 * - Low latency graphics context scheduling (a.k.a. GuC submission).
 * - GT Power management.
 *
 * The enable_guc module parameter can be used to select which of those
 * operations to enable within GuC. Note that not all the operations are
 * supported on all gen9+ platforms.
 *
 * Enabling the GuC is not mandatory and therefore the firmware is only loaded
 * if at least one of the operations is selected. However, not loading the GuC
 * might result in the loss of some features that do require the GuC (currently
 * just the HuC, but more are expected to land in the future).
 */

35 36
static void gen8_guc_raise_irq(struct intel_guc *guc)
{
37
	struct intel_gt *gt = guc_to_gt(guc);
38

39
	intel_uncore_write(gt->uncore, GUC_SEND_INTERRUPT, GUC_SEND_TRIGGER);
40 41
}

42 43
static void gen11_guc_raise_irq(struct intel_guc *guc)
{
44
	struct intel_gt *gt = guc_to_gt(guc);
45

46
	intel_uncore_write(gt->uncore, GEN11_GUC_HOST_INTERRUPT, 0);
47 48
}

49 50 51 52 53 54 55 56 57 58 59
static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i)
{
	GEM_BUG_ON(!guc->send_regs.base);
	GEM_BUG_ON(!guc->send_regs.count);
	GEM_BUG_ON(i >= guc->send_regs.count);

	return _MMIO(guc->send_regs.base + 4 * i);
}

void intel_guc_init_send_regs(struct intel_guc *guc)
{
60
	struct intel_gt *gt = guc_to_gt(guc);
61 62 63
	enum forcewake_domains fw_domains = 0;
	unsigned int i;

64
	if (INTEL_GEN(gt->i915) >= 11) {
65 66 67 68 69 70 71 72
		guc->send_regs.base =
				i915_mmio_reg_offset(GEN11_SOFT_SCRATCH(0));
		guc->send_regs.count = GEN11_SOFT_SCRATCH_COUNT;
	} else {
		guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0));
		guc->send_regs.count = GUC_MAX_MMIO_MSG_LEN;
		BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT);
	}
73 74

	for (i = 0; i < guc->send_regs.count; i++) {
75
		fw_domains |= intel_uncore_forcewake_for_reg(gt->uncore,
76 77 78 79 80 81
					guc_send_reg(guc, i),
					FW_REG_READ | FW_REG_WRITE);
	}
	guc->send_regs.fw_domains = fw_domains;
}

82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168
static void gen9_reset_guc_interrupts(struct intel_guc *guc)
{
	struct intel_gt *gt = guc_to_gt(guc);

	assert_rpm_wakelock_held(&gt->i915->runtime_pm);

	spin_lock_irq(&gt->irq_lock);
	gen6_gt_pm_reset_iir(gt, gt->pm_guc_events);
	spin_unlock_irq(&gt->irq_lock);
}

static void gen9_enable_guc_interrupts(struct intel_guc *guc)
{
	struct intel_gt *gt = guc_to_gt(guc);

	assert_rpm_wakelock_held(&gt->i915->runtime_pm);

	spin_lock_irq(&gt->irq_lock);
	if (!guc->interrupts.enabled) {
		WARN_ON_ONCE(intel_uncore_read(gt->uncore, GEN8_GT_IIR(2)) &
			     gt->pm_guc_events);
		guc->interrupts.enabled = true;
		gen6_gt_pm_enable_irq(gt, gt->pm_guc_events);
	}
	spin_unlock_irq(&gt->irq_lock);
}

static void gen9_disable_guc_interrupts(struct intel_guc *guc)
{
	struct intel_gt *gt = guc_to_gt(guc);

	assert_rpm_wakelock_held(&gt->i915->runtime_pm);

	spin_lock_irq(&gt->irq_lock);
	guc->interrupts.enabled = false;

	gen6_gt_pm_disable_irq(gt, gt->pm_guc_events);

	spin_unlock_irq(&gt->irq_lock);
	intel_synchronize_irq(gt->i915);

	gen9_reset_guc_interrupts(guc);
}

static void gen11_reset_guc_interrupts(struct intel_guc *guc)
{
	struct intel_gt *gt = guc_to_gt(guc);

	spin_lock_irq(&gt->irq_lock);
	gen11_gt_reset_one_iir(gt, 0, GEN11_GUC);
	spin_unlock_irq(&gt->irq_lock);
}

static void gen11_enable_guc_interrupts(struct intel_guc *guc)
{
	struct intel_gt *gt = guc_to_gt(guc);

	spin_lock_irq(&gt->irq_lock);
	if (!guc->interrupts.enabled) {
		u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST);

		WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GUC));
		intel_uncore_write(gt->uncore,
				   GEN11_GUC_SG_INTR_ENABLE, events);
		intel_uncore_write(gt->uncore,
				   GEN11_GUC_SG_INTR_MASK, ~events);
		guc->interrupts.enabled = true;
	}
	spin_unlock_irq(&gt->irq_lock);
}

static void gen11_disable_guc_interrupts(struct intel_guc *guc)
{
	struct intel_gt *gt = guc_to_gt(guc);

	spin_lock_irq(&gt->irq_lock);
	guc->interrupts.enabled = false;

	intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~0);
	intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, 0);

	spin_unlock_irq(&gt->irq_lock);
	intel_synchronize_irq(gt->i915);

	gen11_reset_guc_interrupts(guc);
}

169 170
void intel_guc_init_early(struct intel_guc *guc)
{
171
	struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
172

173
	intel_guc_fw_init_early(guc);
174
	intel_guc_ct_init_early(&guc->ct);
175
	intel_guc_log_init_early(&guc->log);
176
	intel_guc_submission_init_early(guc);
177 178

	mutex_init(&guc->send_mutex);
179
	spin_lock_init(&guc->irq_lock);
180
	guc->send = intel_guc_send_nop;
181
	guc->handler = intel_guc_to_host_event_handler_nop;
182
	if (INTEL_GEN(i915) >= 11) {
183
		guc->notify = gen11_guc_raise_irq;
184 185 186
		guc->interrupts.reset = gen11_reset_guc_interrupts;
		guc->interrupts.enable = gen11_enable_guc_interrupts;
		guc->interrupts.disable = gen11_disable_guc_interrupts;
187
	} else {
188
		guc->notify = gen8_guc_raise_irq;
189 190 191 192
		guc->interrupts.reset = gen9_reset_guc_interrupts;
		guc->interrupts.enable = gen9_enable_guc_interrupts;
		guc->interrupts.disable = gen9_disable_guc_interrupts;
	}
193 194
}

195
static u32 guc_ctl_debug_flags(struct intel_guc *guc)
196
{
197
	u32 level = intel_guc_log_get_level(&guc->log);
198
	u32 flags = 0;
199

200
	if (!GUC_LOG_LEVEL_IS_VERBOSE(level))
201 202 203 204
		flags |= GUC_LOG_DISABLED;
	else
		flags |= GUC_LOG_LEVEL_TO_VERBOSITY(level) <<
			 GUC_LOG_VERBOSITY_SHIFT;
205

206
	return flags;
207 208
}

209 210 211 212
static u32 guc_ctl_feature_flags(struct intel_guc *guc)
{
	u32 flags = 0;

213
	if (!intel_guc_is_submission_supported(guc))
214 215 216 217
		flags |= GUC_CTL_DISABLE_SCHEDULER;

	return flags;
}
218

219 220 221 222
static u32 guc_ctl_ctxinfo_flags(struct intel_guc *guc)
{
	u32 flags = 0;

223
	if (intel_guc_is_submission_supported(guc)) {
224 225 226 227 228 229 230 231 232 233 234 235
		u32 ctxnum, base;

		base = intel_guc_ggtt_offset(guc, guc->stage_desc_pool);
		ctxnum = GUC_MAX_STAGE_DESCRIPTORS / 16;

		base >>= PAGE_SHIFT;
		flags |= (base << GUC_CTL_BASE_ADDR_SHIFT) |
			(ctxnum << GUC_CTL_CTXNUM_IN16_SHIFT);
	}
	return flags;
}

236 237 238 239 240
static u32 guc_ctl_log_params_flags(struct intel_guc *guc)
{
	u32 offset = intel_guc_ggtt_offset(guc, guc->log.vma) >> PAGE_SHIFT;
	u32 flags;

241 242 243 244
	#if (((CRASH_BUFFER_SIZE) % SZ_1M) == 0)
	#define UNIT SZ_1M
	#define FLAG GUC_LOG_ALLOC_IN_MEGABYTE
	#else
245
	#define UNIT SZ_4K
246 247
	#define FLAG 0
	#endif
248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264

	BUILD_BUG_ON(!CRASH_BUFFER_SIZE);
	BUILD_BUG_ON(!IS_ALIGNED(CRASH_BUFFER_SIZE, UNIT));
	BUILD_BUG_ON(!DPC_BUFFER_SIZE);
	BUILD_BUG_ON(!IS_ALIGNED(DPC_BUFFER_SIZE, UNIT));
	BUILD_BUG_ON(!ISR_BUFFER_SIZE);
	BUILD_BUG_ON(!IS_ALIGNED(ISR_BUFFER_SIZE, UNIT));

	BUILD_BUG_ON((CRASH_BUFFER_SIZE / UNIT - 1) >
			(GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT));
	BUILD_BUG_ON((DPC_BUFFER_SIZE / UNIT - 1) >
			(GUC_LOG_DPC_MASK >> GUC_LOG_DPC_SHIFT));
	BUILD_BUG_ON((ISR_BUFFER_SIZE / UNIT - 1) >
			(GUC_LOG_ISR_MASK >> GUC_LOG_ISR_SHIFT));

	flags = GUC_LOG_VALID |
		GUC_LOG_NOTIFY_ON_HALF_FULL |
265
		FLAG |
266 267 268
		((CRASH_BUFFER_SIZE / UNIT - 1) << GUC_LOG_CRASH_SHIFT) |
		((DPC_BUFFER_SIZE / UNIT - 1) << GUC_LOG_DPC_SHIFT) |
		((ISR_BUFFER_SIZE / UNIT - 1) << GUC_LOG_ISR_SHIFT) |
269 270
		(offset << GUC_LOG_BUF_ADDR_SHIFT);

271
	#undef UNIT
272
	#undef FLAG
273

274 275 276
	return flags;
}

277 278 279 280 281 282 283 284
static u32 guc_ctl_ads_flags(struct intel_guc *guc)
{
	u32 ads = intel_guc_ggtt_offset(guc, guc->ads_vma) >> PAGE_SHIFT;
	u32 flags = ads << GUC_ADS_ADDR_SHIFT;

	return flags;
}

285 286 287 288 289
/*
 * Initialise the GuC parameter block before starting the firmware
 * transfer. These parameters are read by the firmware on startup
 * and cannot be changed thereafter.
 */
290
static void guc_init_params(struct intel_guc *guc)
291
{
292
	u32 *params = guc->params;
293 294
	int i;

295
	BUILD_BUG_ON(sizeof(guc->params) != GUC_CTL_MAX_DWORDS * sizeof(u32));
296

297 298
	params[GUC_CTL_CTXINFO] = guc_ctl_ctxinfo_flags(guc);
	params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
299
	params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc);
300
	params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc);
301
	params[GUC_CTL_ADS] = guc_ctl_ads_flags(guc);
302

303 304
	for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
		DRM_DEBUG_DRIVER("param[%2d] = %#x\n", i, params[i]);
305 306 307 308 309 310 311 312 313 314 315
}

/*
 * Initialise the GuC parameter block before starting the firmware
 * transfer. These parameters are read by the firmware on startup
 * and cannot be changed thereafter.
 */
void intel_guc_write_params(struct intel_guc *guc)
{
	struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
	int i;
316

317 318 319 320 321
	/*
	 * All SOFT_SCRATCH registers are in FORCEWAKE_BLITTER domain and
	 * they are power context saved so it's ok to release forcewake
	 * when we are done here and take it again at xfer time.
	 */
322
	intel_uncore_forcewake_get(uncore, FORCEWAKE_BLITTER);
323

324
	intel_uncore_write(uncore, SOFT_SCRATCH(0), 0);
325 326

	for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
327
		intel_uncore_write(uncore, SOFT_SCRATCH(1 + i), guc->params[i]);
328

329
	intel_uncore_forcewake_put(uncore, FORCEWAKE_BLITTER);
330 331
}

332 333 334 335 336 337 338 339 340 341 342
int intel_guc_init(struct intel_guc *guc)
{
	struct intel_gt *gt = guc_to_gt(guc);
	int ret;

	ret = intel_uc_fw_init(&guc->fw);
	if (ret)
		goto err_fetch;

	ret = intel_guc_log_create(&guc->log);
	if (ret)
343
		goto err_fw;
344 345 346 347 348 349 350 351 352 353

	ret = intel_guc_ads_create(guc);
	if (ret)
		goto err_log;
	GEM_BUG_ON(!guc->ads_vma);

	ret = intel_guc_ct_init(&guc->ct);
	if (ret)
		goto err_ads;

354
	if (intel_guc_is_submission_supported(guc)) {
355 356 357 358 359 360 361 362 363
		/*
		 * This is stuff we need to have available at fw load time
		 * if we are planning to enable submission later
		 */
		ret = intel_guc_submission_init(guc);
		if (ret)
			goto err_ct;
	}

364 365 366 367 368 369 370 371
	/* now that everything is perma-pinned, initialize the parameters */
	guc_init_params(guc);

	/* We need to notify the guc whenever we change the GGTT */
	i915_ggtt_enable_guc(gt->ggtt);

	return 0;

372 373
err_ct:
	intel_guc_ct_fini(&guc->ct);
374 375 376 377 378 379 380 381
err_ads:
	intel_guc_ads_destroy(guc);
err_log:
	intel_guc_log_destroy(&guc->log);
err_fw:
	intel_uc_fw_fini(&guc->fw);
err_fetch:
	intel_uc_fw_cleanup_fetch(&guc->fw);
382
	DRM_DEV_DEBUG_DRIVER(gt->i915->drm.dev, "failed with %d\n", ret);
383 384 385 386 387 388 389
	return ret;
}

void intel_guc_fini(struct intel_guc *guc)
{
	struct intel_gt *gt = guc_to_gt(guc);

390 391 392
	if (!intel_uc_fw_is_available(&guc->fw))
		return;

393 394
	i915_ggtt_disable_guc(gt->ggtt);

395
	if (intel_guc_is_submission_supported(guc))
396 397
		intel_guc_submission_fini(guc);

398 399 400 401 402 403 404 405
	intel_guc_ct_fini(&guc->ct);

	intel_guc_ads_destroy(guc);
	intel_guc_log_destroy(&guc->log);
	intel_uc_fw_fini(&guc->fw);
	intel_uc_fw_cleanup_fetch(&guc->fw);
}

406 407
int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len,
		       u32 *response_buf, u32 response_buf_size)
408 409 410 411 412
{
	WARN(1, "Unexpected send: action=%#x\n", *action);
	return -ENODEV;
}

413 414 415 416 417
void intel_guc_to_host_event_handler_nop(struct intel_guc *guc)
{
	WARN(1, "Unexpected event: no suitable handler\n");
}

418 419 420
/*
 * This function implements the MMIO based host to GuC interface.
 */
421 422
int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
			u32 *response_buf, u32 response_buf_size)
423
{
424
	struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
425 426 427 428 429 430 431
	u32 status;
	int i;
	int ret;

	GEM_BUG_ON(!len);
	GEM_BUG_ON(len > guc->send_regs.count);

432 433 434
	/* We expect only action code */
	GEM_BUG_ON(*action & ~INTEL_GUC_MSG_CODE_MASK);

435
	/* If CT is available, we expect to use MMIO only during init/fini */
436 437
	GEM_BUG_ON(*action != INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER &&
		   *action != INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER);
438 439

	mutex_lock(&guc->send_mutex);
440
	intel_uncore_forcewake_get(uncore, guc->send_regs.fw_domains);
441 442

	for (i = 0; i < len; i++)
443
		intel_uncore_write(uncore, guc_send_reg(guc, i), action[i]);
444

445
	intel_uncore_posting_read(uncore, guc_send_reg(guc, i - 1));
446 447 448 449 450 451 452

	intel_guc_notify(guc);

	/*
	 * No GuC command should ever take longer than 10ms.
	 * Fast commands should still complete in 10us.
	 */
453
	ret = __intel_wait_for_register_fw(uncore,
454
					   guc_send_reg(guc, 0),
455 456 457
					   INTEL_GUC_MSG_TYPE_MASK,
					   INTEL_GUC_MSG_TYPE_RESPONSE <<
					   INTEL_GUC_MSG_TYPE_SHIFT,
458
					   10, 10, &status);
459 460 461
	/* If GuC explicitly returned an error, convert it to -EIO */
	if (!ret && !INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(status))
		ret = -EIO;
462

463
	if (ret) {
464 465
		DRM_ERROR("MMIO: GuC action %#x failed with error %d %#x\n",
			  action[0], ret, status);
466
		goto out;
467 468
	}

469 470 471 472
	if (response_buf) {
		int count = min(response_buf_size, guc->send_regs.count - 1);

		for (i = 0; i < count; i++)
473 474
			response_buf[i] = intel_uncore_read(uncore,
							    guc_send_reg(guc, i + 1));
475 476 477 478 479 480
	}

	/* Use data from the GuC response as our return value */
	ret = INTEL_GUC_MSG_TO_DATA(status);

out:
481
	intel_uncore_forcewake_put(uncore, guc->send_regs.fw_domains);
482 483 484 485 486
	mutex_unlock(&guc->send_mutex);

	return ret;
}

487 488
int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
				       const u32 *payload, u32 len)
489
{
490 491 492 493 494
	u32 msg;

	if (unlikely(!len))
		return -EPROTO;

495
	/* Make sure to handle only enabled messages */
496
	msg = payload[0] & guc->msg_enabled_mask;
497

498
	if (msg & (INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
499
		   INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED))
500
		intel_guc_log_handle_flush_event(&guc->log);
501 502

	return 0;
503 504
}

505 506
int intel_guc_sample_forcewake(struct intel_guc *guc)
{
507
	struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
508 509 510
	u32 action[2];

	action[0] = INTEL_GUC_ACTION_SAMPLE_FORCEWAKE;
511
	/* WaRsDisableCoarsePowerGating:skl,cnl */
512
	if (!HAS_RC6(dev_priv) || NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541
		action[1] = 0;
	else
		/* bit 0 and 1 are for Render and Media domain separately */
		action[1] = GUC_FORCEWAKE_RENDER | GUC_FORCEWAKE_MEDIA;

	return intel_guc_send(guc, action, ARRAY_SIZE(action));
}

/**
 * intel_guc_auth_huc() - Send action to GuC to authenticate HuC ucode
 * @guc: intel_guc structure
 * @rsa_offset: rsa offset w.r.t ggtt base of huc vma
 *
 * Triggers a HuC firmware authentication request to the GuC via intel_guc_send
 * INTEL_GUC_ACTION_AUTHENTICATE_HUC interface. This function is invoked by
 * intel_huc_auth().
 *
 * Return:	non-zero code on error
 */
int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset)
{
	u32 action[] = {
		INTEL_GUC_ACTION_AUTHENTICATE_HUC,
		rsa_offset
	};

	return intel_guc_send(guc, action, ARRAY_SIZE(action));
}

542 543 544
/**
 * intel_guc_suspend() - notify GuC entering suspend state
 * @guc:	the guc
545
 */
546
int intel_guc_suspend(struct intel_guc *guc)
547
{
548
	struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
549 550
	int ret;
	u32 status;
551 552 553 554 555
	u32 action[] = {
		INTEL_GUC_ACTION_ENTER_S_STATE,
		GUC_POWER_D1, /* any value greater than GUC_POWER_D0 */
	};

556 557 558 559 560 561 562
	/*
	 * If GuC communication is enabled but submission is not supported,
	 * we do not need to suspend the GuC.
	 */
	if (!intel_guc_submission_is_enabled(guc))
		return 0;

563 564 565 566 567 568 569 570 571
	/*
	 * The ENTER_S_STATE action queues the save/restore operation in GuC FW
	 * and then returns, so waiting on the H2G is not enough to guarantee
	 * GuC is done. When all the processing is done, GuC writes
	 * INTEL_GUC_SLEEP_STATE_SUCCESS to scratch register 14, so we can poll
	 * on that. Note that GuC does not ensure that the value in the register
	 * is different from INTEL_GUC_SLEEP_STATE_SUCCESS while the action is
	 * in progress so we need to take care of that ourselves as well.
	 */
572

573 574
	intel_uncore_write(uncore, SOFT_SCRATCH(14),
			   INTEL_GUC_SLEEP_STATE_INVALID_MASK);
575

576
	ret = intel_guc_send(guc, action, ARRAY_SIZE(action));
577 578 579
	if (ret)
		return ret;

580
	ret = __intel_wait_for_register(uncore, SOFT_SCRATCH(14),
581 582 583 584 585 586 587 588 589 590 591 592 593 594 595
					INTEL_GUC_SLEEP_STATE_INVALID_MASK,
					0, 0, 10, &status);
	if (ret)
		return ret;

	if (status != INTEL_GUC_SLEEP_STATE_SUCCESS) {
		DRM_ERROR("GuC failed to change sleep state. "
			  "action=0x%x, err=%u\n",
			  action[0], status);
		return -EIO;
	}

	return 0;
}

596 597 598 599 600 601 602 603
/**
 * intel_guc_reset_engine() - ask GuC to reset an engine
 * @guc:	intel_guc structure
 * @engine:	engine to be reset
 */
int intel_guc_reset_engine(struct intel_guc *guc,
			   struct intel_engine_cs *engine)
{
604
	/* XXX: to be implemented with submission interface rework */
605

606
	return -ENODEV;
607 608
}

609 610
/**
 * intel_guc_resume() - notify GuC resuming from suspend state
611
 * @guc:	the guc
612
 */
613
int intel_guc_resume(struct intel_guc *guc)
614
{
615
	u32 action[] = {
616 617 618
		INTEL_GUC_ACTION_EXIT_S_STATE,
		GUC_POWER_D0,
	};
619

620 621 622 623 624 625 626 627
	/*
	 * If GuC communication is enabled but submission is not supported,
	 * we do not need to resume the GuC but we do need to enable the
	 * GuC communication on resume (above).
	 */
	if (!intel_guc_submission_is_enabled(guc))
		return 0;

628
	return intel_guc_send(guc, action, ARRAY_SIZE(action));
629 630
}

631
/**
632
 * DOC: GuC Memory Management
633
 *
634 635 636 637 638 639 640
 * GuC can't allocate any memory for its own usage, so all the allocations must
 * be handled by the host driver. GuC accesses the memory via the GGTT, with the
 * exception of the top and bottom parts of the 4GB address space, which are
 * instead re-mapped by the GuC HW to memory location of the FW itself (WOPCM)
 * or other parts of the HW. The driver must take care not to place objects that
 * the GuC is going to access in these reserved ranges. The layout of the GuC
 * address space is shown below:
641
 *
642
 * ::
643
 *
644 645 646 647 648 649 650 651 652 653 654 655 656 657 658
 *     +===========> +====================+ <== FFFF_FFFF
 *     ^             |      Reserved      |
 *     |             +====================+ <== GUC_GGTT_TOP
 *     |             |                    |
 *     |             |        DRAM        |
 *    GuC            |                    |
 *  Address    +===> +====================+ <== GuC ggtt_pin_bias
 *   Space     ^     |                    |
 *     |       |     |                    |
 *     |      GuC    |        GuC         |
 *     |     WOPCM   |       WOPCM        |
 *     |      Size   |                    |
 *     |       |     |                    |
 *     v       v     |                    |
 *     +=======+===> +====================+ <== 0000_0000
659
 *
660
 * The lower part of GuC Address Space [0, ggtt_pin_bias) is mapped to GuC WOPCM
661
 * while upper part of GuC Address Space [ggtt_pin_bias, GUC_GGTT_TOP) is mapped
662
 * to DRAM. The value of the GuC ggtt_pin_bias is the GuC WOPCM size.
663 664
 */

665 666 667 668 669 670 671 672
/**
 * intel_guc_allocate_vma() - Allocate a GGTT VMA for GuC usage
 * @guc:	the guc
 * @size:	size of area to allocate (both virtual space and memory)
 *
 * This is a wrapper to create an object for use with the GuC. In order to
 * use it inside the GuC, an object needs to be pinned lifetime, so we allocate
 * both some backing storage and a range inside the Global GTT. We must pin
673
 * it in the GGTT somewhere other than than [0, GUC ggtt_pin_bias) because that
674 675 676 677 678 679
 * range is reserved inside GuC.
 *
 * Return:	A i915_vma if successful, otherwise an ERR_PTR.
 */
struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
{
680
	struct intel_gt *gt = guc_to_gt(guc);
681 682
	struct drm_i915_gem_object *obj;
	struct i915_vma *vma;
683
	u64 flags;
684 685
	int ret;

686
	obj = i915_gem_object_create_shmem(gt->i915, size);
687 688 689
	if (IS_ERR(obj))
		return ERR_CAST(obj);

690
	vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
691 692 693
	if (IS_ERR(vma))
		goto err;

694 695
	flags = PIN_GLOBAL | PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
	ret = i915_vma_pin(vma, 0, 0, flags);
696 697 698 699 700
	if (ret) {
		vma = ERR_PTR(ret);
		goto err;
	}

701
	return i915_vma_make_unshrinkable(vma);
702 703 704 705 706

err:
	i915_gem_object_put(obj);
	return vma;
}