intel_guc.c 19.5 KB
Newer Older
1
// SPDX-License-Identifier: MIT
2
/*
3
 * Copyright © 2014-2019 Intel Corporation
4 5
 */

6
#include "gt/intel_gt.h"
7 8
#include "gt/intel_gt_irq.h"
#include "gt/intel_gt_pm_irq.h"
9
#include "intel_guc.h"
10
#include "intel_guc_ads.h"
11
#include "intel_guc_submission.h"
12 13
#include "i915_drv.h"

14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34
/**
 * DOC: GuC
 *
 * The GuC is a microcontroller inside the GT HW, introduced in gen9. The GuC is
 * designed to offload some of the functionality usually performed by the host
 * driver; currently the main operations it can take care of are:
 *
 * - Authentication of the HuC, which is required to fully enable HuC usage.
 * - Low latency graphics context scheduling (a.k.a. GuC submission).
 * - GT Power management.
 *
 * The enable_guc module parameter can be used to select which of those
 * operations to enable within GuC. Note that not all the operations are
 * supported on all gen9+ platforms.
 *
 * Enabling the GuC is not mandatory and therefore the firmware is only loaded
 * if at least one of the operations is selected. However, not loading the GuC
 * might result in the loss of some features that do require the GuC (currently
 * just the HuC, but more are expected to land in the future).
 */

35
void intel_guc_notify(struct intel_guc *guc)
36
{
37
	struct intel_gt *gt = guc_to_gt(guc);
38

39 40 41 42 43 44 45
	/*
	 * On Gen11+, the value written to the register is passes as a payload
	 * to the FW. However, the FW currently treats all values the same way
	 * (H2G interrupt), so we can just write the value that the HW expects
	 * on older gens.
	 */
	intel_uncore_write(gt->uncore, guc->notify_reg, GUC_SEND_TRIGGER);
46 47
}

48 49 50 51 52 53 54 55 56 57 58
static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i)
{
	GEM_BUG_ON(!guc->send_regs.base);
	GEM_BUG_ON(!guc->send_regs.count);
	GEM_BUG_ON(i >= guc->send_regs.count);

	return _MMIO(guc->send_regs.base + 4 * i);
}

void intel_guc_init_send_regs(struct intel_guc *guc)
{
59
	struct intel_gt *gt = guc_to_gt(guc);
60 61 62
	enum forcewake_domains fw_domains = 0;
	unsigned int i;

63
	if (INTEL_GEN(gt->i915) >= 11) {
64 65 66 67 68 69 70 71
		guc->send_regs.base =
				i915_mmio_reg_offset(GEN11_SOFT_SCRATCH(0));
		guc->send_regs.count = GEN11_SOFT_SCRATCH_COUNT;
	} else {
		guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0));
		guc->send_regs.count = GUC_MAX_MMIO_MSG_LEN;
		BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT);
	}
72 73

	for (i = 0; i < guc->send_regs.count; i++) {
74
		fw_domains |= intel_uncore_forcewake_for_reg(gt->uncore,
75 76 77 78 79 80
					guc_send_reg(guc, i),
					FW_REG_READ | FW_REG_WRITE);
	}
	guc->send_regs.fw_domains = fw_domains;
}

81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167
static void gen9_reset_guc_interrupts(struct intel_guc *guc)
{
	struct intel_gt *gt = guc_to_gt(guc);

	assert_rpm_wakelock_held(&gt->i915->runtime_pm);

	spin_lock_irq(&gt->irq_lock);
	gen6_gt_pm_reset_iir(gt, gt->pm_guc_events);
	spin_unlock_irq(&gt->irq_lock);
}

static void gen9_enable_guc_interrupts(struct intel_guc *guc)
{
	struct intel_gt *gt = guc_to_gt(guc);

	assert_rpm_wakelock_held(&gt->i915->runtime_pm);

	spin_lock_irq(&gt->irq_lock);
	if (!guc->interrupts.enabled) {
		WARN_ON_ONCE(intel_uncore_read(gt->uncore, GEN8_GT_IIR(2)) &
			     gt->pm_guc_events);
		guc->interrupts.enabled = true;
		gen6_gt_pm_enable_irq(gt, gt->pm_guc_events);
	}
	spin_unlock_irq(&gt->irq_lock);
}

static void gen9_disable_guc_interrupts(struct intel_guc *guc)
{
	struct intel_gt *gt = guc_to_gt(guc);

	assert_rpm_wakelock_held(&gt->i915->runtime_pm);

	spin_lock_irq(&gt->irq_lock);
	guc->interrupts.enabled = false;

	gen6_gt_pm_disable_irq(gt, gt->pm_guc_events);

	spin_unlock_irq(&gt->irq_lock);
	intel_synchronize_irq(gt->i915);

	gen9_reset_guc_interrupts(guc);
}

static void gen11_reset_guc_interrupts(struct intel_guc *guc)
{
	struct intel_gt *gt = guc_to_gt(guc);

	spin_lock_irq(&gt->irq_lock);
	gen11_gt_reset_one_iir(gt, 0, GEN11_GUC);
	spin_unlock_irq(&gt->irq_lock);
}

static void gen11_enable_guc_interrupts(struct intel_guc *guc)
{
	struct intel_gt *gt = guc_to_gt(guc);

	spin_lock_irq(&gt->irq_lock);
	if (!guc->interrupts.enabled) {
		u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST);

		WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GUC));
		intel_uncore_write(gt->uncore,
				   GEN11_GUC_SG_INTR_ENABLE, events);
		intel_uncore_write(gt->uncore,
				   GEN11_GUC_SG_INTR_MASK, ~events);
		guc->interrupts.enabled = true;
	}
	spin_unlock_irq(&gt->irq_lock);
}

static void gen11_disable_guc_interrupts(struct intel_guc *guc)
{
	struct intel_gt *gt = guc_to_gt(guc);

	spin_lock_irq(&gt->irq_lock);
	guc->interrupts.enabled = false;

	intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~0);
	intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, 0);

	spin_unlock_irq(&gt->irq_lock);
	intel_synchronize_irq(gt->i915);

	gen11_reset_guc_interrupts(guc);
}

168 169
void intel_guc_init_early(struct intel_guc *guc)
{
170
	struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
171

172
	intel_guc_fw_init_early(guc);
173
	intel_guc_ct_init_early(&guc->ct);
174
	intel_guc_log_init_early(&guc->log);
175
	intel_guc_submission_init_early(guc);
176 177

	mutex_init(&guc->send_mutex);
178
	spin_lock_init(&guc->irq_lock);
179
	if (INTEL_GEN(i915) >= 11) {
180
		guc->notify_reg = GEN11_GUC_HOST_INTERRUPT;
181 182 183
		guc->interrupts.reset = gen11_reset_guc_interrupts;
		guc->interrupts.enable = gen11_enable_guc_interrupts;
		guc->interrupts.disable = gen11_disable_guc_interrupts;
184
	} else {
185
		guc->notify_reg = GUC_SEND_INTERRUPT;
186 187 188 189
		guc->interrupts.reset = gen9_reset_guc_interrupts;
		guc->interrupts.enable = gen9_enable_guc_interrupts;
		guc->interrupts.disable = gen9_disable_guc_interrupts;
	}
190 191
}

192
static u32 guc_ctl_debug_flags(struct intel_guc *guc)
193
{
194
	u32 level = intel_guc_log_get_level(&guc->log);
195
	u32 flags = 0;
196

197
	if (!GUC_LOG_LEVEL_IS_VERBOSE(level))
198 199 200 201
		flags |= GUC_LOG_DISABLED;
	else
		flags |= GUC_LOG_LEVEL_TO_VERBOSITY(level) <<
			 GUC_LOG_VERBOSITY_SHIFT;
202

203
	return flags;
204 205
}

206 207 208 209
static u32 guc_ctl_feature_flags(struct intel_guc *guc)
{
	u32 flags = 0;

210
	if (!intel_guc_is_submission_supported(guc))
211 212 213 214
		flags |= GUC_CTL_DISABLE_SCHEDULER;

	return flags;
}
215

216 217 218 219
static u32 guc_ctl_ctxinfo_flags(struct intel_guc *guc)
{
	u32 flags = 0;

220
	if (intel_guc_is_submission_supported(guc)) {
221 222 223 224 225 226 227 228 229 230 231 232
		u32 ctxnum, base;

		base = intel_guc_ggtt_offset(guc, guc->stage_desc_pool);
		ctxnum = GUC_MAX_STAGE_DESCRIPTORS / 16;

		base >>= PAGE_SHIFT;
		flags |= (base << GUC_CTL_BASE_ADDR_SHIFT) |
			(ctxnum << GUC_CTL_CTXNUM_IN16_SHIFT);
	}
	return flags;
}

233 234 235 236 237
static u32 guc_ctl_log_params_flags(struct intel_guc *guc)
{
	u32 offset = intel_guc_ggtt_offset(guc, guc->log.vma) >> PAGE_SHIFT;
	u32 flags;

238 239 240 241
	#if (((CRASH_BUFFER_SIZE) % SZ_1M) == 0)
	#define UNIT SZ_1M
	#define FLAG GUC_LOG_ALLOC_IN_MEGABYTE
	#else
242
	#define UNIT SZ_4K
243 244
	#define FLAG 0
	#endif
245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261

	BUILD_BUG_ON(!CRASH_BUFFER_SIZE);
	BUILD_BUG_ON(!IS_ALIGNED(CRASH_BUFFER_SIZE, UNIT));
	BUILD_BUG_ON(!DPC_BUFFER_SIZE);
	BUILD_BUG_ON(!IS_ALIGNED(DPC_BUFFER_SIZE, UNIT));
	BUILD_BUG_ON(!ISR_BUFFER_SIZE);
	BUILD_BUG_ON(!IS_ALIGNED(ISR_BUFFER_SIZE, UNIT));

	BUILD_BUG_ON((CRASH_BUFFER_SIZE / UNIT - 1) >
			(GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT));
	BUILD_BUG_ON((DPC_BUFFER_SIZE / UNIT - 1) >
			(GUC_LOG_DPC_MASK >> GUC_LOG_DPC_SHIFT));
	BUILD_BUG_ON((ISR_BUFFER_SIZE / UNIT - 1) >
			(GUC_LOG_ISR_MASK >> GUC_LOG_ISR_SHIFT));

	flags = GUC_LOG_VALID |
		GUC_LOG_NOTIFY_ON_HALF_FULL |
262
		FLAG |
263 264 265
		((CRASH_BUFFER_SIZE / UNIT - 1) << GUC_LOG_CRASH_SHIFT) |
		((DPC_BUFFER_SIZE / UNIT - 1) << GUC_LOG_DPC_SHIFT) |
		((ISR_BUFFER_SIZE / UNIT - 1) << GUC_LOG_ISR_SHIFT) |
266 267
		(offset << GUC_LOG_BUF_ADDR_SHIFT);

268
	#undef UNIT
269
	#undef FLAG
270

271 272 273
	return flags;
}

274 275 276 277 278 279 280 281
static u32 guc_ctl_ads_flags(struct intel_guc *guc)
{
	u32 ads = intel_guc_ggtt_offset(guc, guc->ads_vma) >> PAGE_SHIFT;
	u32 flags = ads << GUC_ADS_ADDR_SHIFT;

	return flags;
}

282 283 284 285 286
/*
 * Initialise the GuC parameter block before starting the firmware
 * transfer. These parameters are read by the firmware on startup
 * and cannot be changed thereafter.
 */
287
static void guc_init_params(struct intel_guc *guc)
288
{
289
	u32 *params = guc->params;
290 291
	int i;

292
	BUILD_BUG_ON(sizeof(guc->params) != GUC_CTL_MAX_DWORDS * sizeof(u32));
293

294 295
	params[GUC_CTL_CTXINFO] = guc_ctl_ctxinfo_flags(guc);
	params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
296
	params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc);
297
	params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc);
298
	params[GUC_CTL_ADS] = guc_ctl_ads_flags(guc);
299

300 301
	for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
		DRM_DEBUG_DRIVER("param[%2d] = %#x\n", i, params[i]);
302 303 304 305 306 307 308 309 310 311 312
}

/*
 * Initialise the GuC parameter block before starting the firmware
 * transfer. These parameters are read by the firmware on startup
 * and cannot be changed thereafter.
 */
void intel_guc_write_params(struct intel_guc *guc)
{
	struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
	int i;
313

314 315 316 317 318
	/*
	 * All SOFT_SCRATCH registers are in FORCEWAKE_BLITTER domain and
	 * they are power context saved so it's ok to release forcewake
	 * when we are done here and take it again at xfer time.
	 */
319
	intel_uncore_forcewake_get(uncore, FORCEWAKE_BLITTER);
320

321
	intel_uncore_write(uncore, SOFT_SCRATCH(0), 0);
322 323

	for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
324
		intel_uncore_write(uncore, SOFT_SCRATCH(1 + i), guc->params[i]);
325

326
	intel_uncore_forcewake_put(uncore, FORCEWAKE_BLITTER);
327 328
}

329 330 331 332 333 334 335 336 337 338 339
int intel_guc_init(struct intel_guc *guc)
{
	struct intel_gt *gt = guc_to_gt(guc);
	int ret;

	ret = intel_uc_fw_init(&guc->fw);
	if (ret)
		goto err_fetch;

	ret = intel_guc_log_create(&guc->log);
	if (ret)
340
		goto err_fw;
341 342 343 344 345 346 347 348 349 350

	ret = intel_guc_ads_create(guc);
	if (ret)
		goto err_log;
	GEM_BUG_ON(!guc->ads_vma);

	ret = intel_guc_ct_init(&guc->ct);
	if (ret)
		goto err_ads;

351
	if (intel_guc_is_submission_supported(guc)) {
352 353 354 355 356 357 358 359 360
		/*
		 * This is stuff we need to have available at fw load time
		 * if we are planning to enable submission later
		 */
		ret = intel_guc_submission_init(guc);
		if (ret)
			goto err_ct;
	}

361 362 363 364 365 366 367 368
	/* now that everything is perma-pinned, initialize the parameters */
	guc_init_params(guc);

	/* We need to notify the guc whenever we change the GGTT */
	i915_ggtt_enable_guc(gt->ggtt);

	return 0;

369 370
err_ct:
	intel_guc_ct_fini(&guc->ct);
371 372 373 374 375 376 377 378
err_ads:
	intel_guc_ads_destroy(guc);
err_log:
	intel_guc_log_destroy(&guc->log);
err_fw:
	intel_uc_fw_fini(&guc->fw);
err_fetch:
	intel_uc_fw_cleanup_fetch(&guc->fw);
379
	DRM_DEV_DEBUG_DRIVER(gt->i915->drm.dev, "failed with %d\n", ret);
380 381 382 383 384 385 386
	return ret;
}

void intel_guc_fini(struct intel_guc *guc)
{
	struct intel_gt *gt = guc_to_gt(guc);

387 388 389
	if (!intel_uc_fw_is_available(&guc->fw))
		return;

390 391
	i915_ggtt_disable_guc(gt->ggtt);

392
	if (intel_guc_is_submission_supported(guc))
393 394
		intel_guc_submission_fini(guc);

395 396 397 398 399 400 401 402
	intel_guc_ct_fini(&guc->ct);

	intel_guc_ads_destroy(guc);
	intel_guc_log_destroy(&guc->log);
	intel_uc_fw_fini(&guc->fw);
	intel_uc_fw_cleanup_fetch(&guc->fw);
}

403 404 405
/*
 * This function implements the MMIO based host to GuC interface.
 */
406 407
int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
			u32 *response_buf, u32 response_buf_size)
408
{
409
	struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
410 411 412 413 414 415 416
	u32 status;
	int i;
	int ret;

	GEM_BUG_ON(!len);
	GEM_BUG_ON(len > guc->send_regs.count);

417 418 419
	/* We expect only action code */
	GEM_BUG_ON(*action & ~INTEL_GUC_MSG_CODE_MASK);

420
	/* If CT is available, we expect to use MMIO only during init/fini */
421 422
	GEM_BUG_ON(*action != INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER &&
		   *action != INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER);
423 424

	mutex_lock(&guc->send_mutex);
425
	intel_uncore_forcewake_get(uncore, guc->send_regs.fw_domains);
426 427

	for (i = 0; i < len; i++)
428
		intel_uncore_write(uncore, guc_send_reg(guc, i), action[i]);
429

430
	intel_uncore_posting_read(uncore, guc_send_reg(guc, i - 1));
431 432 433 434 435 436 437

	intel_guc_notify(guc);

	/*
	 * No GuC command should ever take longer than 10ms.
	 * Fast commands should still complete in 10us.
	 */
438
	ret = __intel_wait_for_register_fw(uncore,
439
					   guc_send_reg(guc, 0),
440 441 442
					   INTEL_GUC_MSG_TYPE_MASK,
					   INTEL_GUC_MSG_TYPE_RESPONSE <<
					   INTEL_GUC_MSG_TYPE_SHIFT,
443
					   10, 10, &status);
444 445 446
	/* If GuC explicitly returned an error, convert it to -EIO */
	if (!ret && !INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(status))
		ret = -EIO;
447

448
	if (ret) {
449 450
		DRM_ERROR("MMIO: GuC action %#x failed with error %d %#x\n",
			  action[0], ret, status);
451
		goto out;
452 453
	}

454 455 456 457
	if (response_buf) {
		int count = min(response_buf_size, guc->send_regs.count - 1);

		for (i = 0; i < count; i++)
458 459
			response_buf[i] = intel_uncore_read(uncore,
							    guc_send_reg(guc, i + 1));
460 461 462 463 464 465
	}

	/* Use data from the GuC response as our return value */
	ret = INTEL_GUC_MSG_TO_DATA(status);

out:
466
	intel_uncore_forcewake_put(uncore, guc->send_regs.fw_domains);
467 468 469 470 471
	mutex_unlock(&guc->send_mutex);

	return ret;
}

472 473
int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
				       const u32 *payload, u32 len)
474
{
475 476 477 478 479
	u32 msg;

	if (unlikely(!len))
		return -EPROTO;

480
	/* Make sure to handle only enabled messages */
481
	msg = payload[0] & guc->msg_enabled_mask;
482

483
	if (msg & (INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
484
		   INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED))
485
		intel_guc_log_handle_flush_event(&guc->log);
486 487

	return 0;
488 489
}

490 491
int intel_guc_sample_forcewake(struct intel_guc *guc)
{
492
	struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
493 494 495
	u32 action[2];

	action[0] = INTEL_GUC_ACTION_SAMPLE_FORCEWAKE;
496
	/* WaRsDisableCoarsePowerGating:skl,cnl */
497
	if (!HAS_RC6(dev_priv) || NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526
		action[1] = 0;
	else
		/* bit 0 and 1 are for Render and Media domain separately */
		action[1] = GUC_FORCEWAKE_RENDER | GUC_FORCEWAKE_MEDIA;

	return intel_guc_send(guc, action, ARRAY_SIZE(action));
}

/**
 * intel_guc_auth_huc() - Send action to GuC to authenticate HuC ucode
 * @guc: intel_guc structure
 * @rsa_offset: rsa offset w.r.t ggtt base of huc vma
 *
 * Triggers a HuC firmware authentication request to the GuC via intel_guc_send
 * INTEL_GUC_ACTION_AUTHENTICATE_HUC interface. This function is invoked by
 * intel_huc_auth().
 *
 * Return:	non-zero code on error
 */
int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset)
{
	u32 action[] = {
		INTEL_GUC_ACTION_AUTHENTICATE_HUC,
		rsa_offset
	};

	return intel_guc_send(guc, action, ARRAY_SIZE(action));
}

527 528 529
/**
 * intel_guc_suspend() - notify GuC entering suspend state
 * @guc:	the guc
530
 */
531
int intel_guc_suspend(struct intel_guc *guc)
532
{
533
	struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
534 535
	int ret;
	u32 status;
536 537 538 539 540
	u32 action[] = {
		INTEL_GUC_ACTION_ENTER_S_STATE,
		GUC_POWER_D1, /* any value greater than GUC_POWER_D0 */
	};

541 542 543 544 545 546 547
	/*
	 * If GuC communication is enabled but submission is not supported,
	 * we do not need to suspend the GuC.
	 */
	if (!intel_guc_submission_is_enabled(guc))
		return 0;

548 549 550 551 552 553 554 555 556
	/*
	 * The ENTER_S_STATE action queues the save/restore operation in GuC FW
	 * and then returns, so waiting on the H2G is not enough to guarantee
	 * GuC is done. When all the processing is done, GuC writes
	 * INTEL_GUC_SLEEP_STATE_SUCCESS to scratch register 14, so we can poll
	 * on that. Note that GuC does not ensure that the value in the register
	 * is different from INTEL_GUC_SLEEP_STATE_SUCCESS while the action is
	 * in progress so we need to take care of that ourselves as well.
	 */
557

558 559
	intel_uncore_write(uncore, SOFT_SCRATCH(14),
			   INTEL_GUC_SLEEP_STATE_INVALID_MASK);
560

561
	ret = intel_guc_send(guc, action, ARRAY_SIZE(action));
562 563 564
	if (ret)
		return ret;

565
	ret = __intel_wait_for_register(uncore, SOFT_SCRATCH(14),
566 567 568 569 570 571 572 573 574 575 576 577 578 579 580
					INTEL_GUC_SLEEP_STATE_INVALID_MASK,
					0, 0, 10, &status);
	if (ret)
		return ret;

	if (status != INTEL_GUC_SLEEP_STATE_SUCCESS) {
		DRM_ERROR("GuC failed to change sleep state. "
			  "action=0x%x, err=%u\n",
			  action[0], status);
		return -EIO;
	}

	return 0;
}

581 582 583 584 585 586 587 588
/**
 * intel_guc_reset_engine() - ask GuC to reset an engine
 * @guc:	intel_guc structure
 * @engine:	engine to be reset
 */
int intel_guc_reset_engine(struct intel_guc *guc,
			   struct intel_engine_cs *engine)
{
589
	/* XXX: to be implemented with submission interface rework */
590

591
	return -ENODEV;
592 593
}

594 595
/**
 * intel_guc_resume() - notify GuC resuming from suspend state
596
 * @guc:	the guc
597
 */
598
int intel_guc_resume(struct intel_guc *guc)
599
{
600
	u32 action[] = {
601 602 603
		INTEL_GUC_ACTION_EXIT_S_STATE,
		GUC_POWER_D0,
	};
604

605 606 607 608 609 610 611 612
	/*
	 * If GuC communication is enabled but submission is not supported,
	 * we do not need to resume the GuC but we do need to enable the
	 * GuC communication on resume (above).
	 */
	if (!intel_guc_submission_is_enabled(guc))
		return 0;

613
	return intel_guc_send(guc, action, ARRAY_SIZE(action));
614 615
}

616
/**
617
 * DOC: GuC Memory Management
618
 *
619 620 621 622 623 624 625
 * GuC can't allocate any memory for its own usage, so all the allocations must
 * be handled by the host driver. GuC accesses the memory via the GGTT, with the
 * exception of the top and bottom parts of the 4GB address space, which are
 * instead re-mapped by the GuC HW to memory location of the FW itself (WOPCM)
 * or other parts of the HW. The driver must take care not to place objects that
 * the GuC is going to access in these reserved ranges. The layout of the GuC
 * address space is shown below:
626
 *
627
 * ::
628
 *
629 630 631 632 633 634 635 636 637 638 639 640 641 642 643
 *     +===========> +====================+ <== FFFF_FFFF
 *     ^             |      Reserved      |
 *     |             +====================+ <== GUC_GGTT_TOP
 *     |             |                    |
 *     |             |        DRAM        |
 *    GuC            |                    |
 *  Address    +===> +====================+ <== GuC ggtt_pin_bias
 *   Space     ^     |                    |
 *     |       |     |                    |
 *     |      GuC    |        GuC         |
 *     |     WOPCM   |       WOPCM        |
 *     |      Size   |                    |
 *     |       |     |                    |
 *     v       v     |                    |
 *     +=======+===> +====================+ <== 0000_0000
644
 *
645
 * The lower part of GuC Address Space [0, ggtt_pin_bias) is mapped to GuC WOPCM
646
 * while upper part of GuC Address Space [ggtt_pin_bias, GUC_GGTT_TOP) is mapped
647
 * to DRAM. The value of the GuC ggtt_pin_bias is the GuC WOPCM size.
648 649
 */

650 651 652 653 654 655 656 657
/**
 * intel_guc_allocate_vma() - Allocate a GGTT VMA for GuC usage
 * @guc:	the guc
 * @size:	size of area to allocate (both virtual space and memory)
 *
 * This is a wrapper to create an object for use with the GuC. In order to
 * use it inside the GuC, an object needs to be pinned lifetime, so we allocate
 * both some backing storage and a range inside the Global GTT. We must pin
658
 * it in the GGTT somewhere other than than [0, GUC ggtt_pin_bias) because that
659 660 661 662 663 664
 * range is reserved inside GuC.
 *
 * Return:	A i915_vma if successful, otherwise an ERR_PTR.
 */
struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
{
665
	struct intel_gt *gt = guc_to_gt(guc);
666 667
	struct drm_i915_gem_object *obj;
	struct i915_vma *vma;
668
	u64 flags;
669 670
	int ret;

671
	obj = i915_gem_object_create_shmem(gt->i915, size);
672 673 674
	if (IS_ERR(obj))
		return ERR_CAST(obj);

675
	vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
676 677 678
	if (IS_ERR(vma))
		goto err;

679 680
	flags = PIN_GLOBAL | PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
	ret = i915_vma_pin(vma, 0, 0, flags);
681 682 683 684 685
	if (ret) {
		vma = ERR_PTR(ret);
		goto err;
	}

686
	return i915_vma_make_unshrinkable(vma);
687 688 689 690 691

err:
	i915_gem_object_put(obj);
	return vma;
}
692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725

/**
 * intel_guc_allocate_and_map_vma() - Allocate and map VMA for GuC usage
 * @guc:	the guc
 * @size:	size of area to allocate (both virtual space and memory)
 * @out_vma:	return variable for the allocated vma pointer
 * @out_vaddr:	return variable for the obj mapping
 *
 * This wrapper calls intel_guc_allocate_vma() and then maps the allocated
 * object with I915_MAP_WB.
 *
 * Return:	0 if successful, a negative errno code otherwise.
 */
int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size,
				   struct i915_vma **out_vma, void **out_vaddr)
{
	struct i915_vma *vma;
	void *vaddr;

	vma = intel_guc_allocate_vma(guc, size);
	if (IS_ERR(vma))
		return PTR_ERR(vma);

	vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
	if (IS_ERR(vaddr)) {
		i915_vma_unpin_and_release(&vma, 0);
		return PTR_ERR(vaddr);
	}

	*out_vma = vma;
	*out_vaddr = vaddr;

	return 0;
}