intel_guc.c 19.8 KB
Newer Older
1
// SPDX-License-Identifier: MIT
2
/*
3
 * Copyright © 2014-2019 Intel Corporation
4 5
 */

6
#include "gt/intel_gt.h"
7 8
#include "gt/intel_gt_irq.h"
#include "gt/intel_gt_pm_irq.h"
9
#include "intel_guc.h"
10
#include "intel_guc_ads.h"
11
#include "intel_guc_submission.h"
12 13
#include "i915_drv.h"

14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34
/**
 * DOC: GuC
 *
 * The GuC is a microcontroller inside the GT HW, introduced in gen9. The GuC is
 * designed to offload some of the functionality usually performed by the host
 * driver; currently the main operations it can take care of are:
 *
 * - Authentication of the HuC, which is required to fully enable HuC usage.
 * - Low latency graphics context scheduling (a.k.a. GuC submission).
 * - GT Power management.
 *
 * The enable_guc module parameter can be used to select which of those
 * operations to enable within GuC. Note that not all the operations are
 * supported on all gen9+ platforms.
 *
 * Enabling the GuC is not mandatory and therefore the firmware is only loaded
 * if at least one of the operations is selected. However, not loading the GuC
 * might result in the loss of some features that do require the GuC (currently
 * just the HuC, but more are expected to land in the future).
 */

35
void intel_guc_notify(struct intel_guc *guc)
36
{
37
	struct intel_gt *gt = guc_to_gt(guc);
38

39 40 41 42 43 44 45
	/*
	 * On Gen11+, the value written to the register is passes as a payload
	 * to the FW. However, the FW currently treats all values the same way
	 * (H2G interrupt), so we can just write the value that the HW expects
	 * on older gens.
	 */
	intel_uncore_write(gt->uncore, guc->notify_reg, GUC_SEND_TRIGGER);
46 47
}

48 49 50 51 52 53 54 55 56 57 58
static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i)
{
	GEM_BUG_ON(!guc->send_regs.base);
	GEM_BUG_ON(!guc->send_regs.count);
	GEM_BUG_ON(i >= guc->send_regs.count);

	return _MMIO(guc->send_regs.base + 4 * i);
}

void intel_guc_init_send_regs(struct intel_guc *guc)
{
59
	struct intel_gt *gt = guc_to_gt(guc);
60 61 62
	enum forcewake_domains fw_domains = 0;
	unsigned int i;

63 64
	GEM_BUG_ON(!guc->send_regs.base);
	GEM_BUG_ON(!guc->send_regs.count);
65 66

	for (i = 0; i < guc->send_regs.count; i++) {
67
		fw_domains |= intel_uncore_forcewake_for_reg(gt->uncore,
68 69 70 71 72 73
					guc_send_reg(guc, i),
					FW_REG_READ | FW_REG_WRITE);
	}
	guc->send_regs.fw_domains = fw_domains;
}

74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
static void gen9_reset_guc_interrupts(struct intel_guc *guc)
{
	struct intel_gt *gt = guc_to_gt(guc);

	assert_rpm_wakelock_held(&gt->i915->runtime_pm);

	spin_lock_irq(&gt->irq_lock);
	gen6_gt_pm_reset_iir(gt, gt->pm_guc_events);
	spin_unlock_irq(&gt->irq_lock);
}

static void gen9_enable_guc_interrupts(struct intel_guc *guc)
{
	struct intel_gt *gt = guc_to_gt(guc);

	assert_rpm_wakelock_held(&gt->i915->runtime_pm);

	spin_lock_irq(&gt->irq_lock);
92 93 94
	WARN_ON_ONCE(intel_uncore_read(gt->uncore, GEN8_GT_IIR(2)) &
		     gt->pm_guc_events);
	gen6_gt_pm_enable_irq(gt, gt->pm_guc_events);
95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125
	spin_unlock_irq(&gt->irq_lock);
}

static void gen9_disable_guc_interrupts(struct intel_guc *guc)
{
	struct intel_gt *gt = guc_to_gt(guc);

	assert_rpm_wakelock_held(&gt->i915->runtime_pm);

	spin_lock_irq(&gt->irq_lock);

	gen6_gt_pm_disable_irq(gt, gt->pm_guc_events);

	spin_unlock_irq(&gt->irq_lock);
	intel_synchronize_irq(gt->i915);

	gen9_reset_guc_interrupts(guc);
}

static void gen11_reset_guc_interrupts(struct intel_guc *guc)
{
	struct intel_gt *gt = guc_to_gt(guc);

	spin_lock_irq(&gt->irq_lock);
	gen11_gt_reset_one_iir(gt, 0, GEN11_GUC);
	spin_unlock_irq(&gt->irq_lock);
}

static void gen11_enable_guc_interrupts(struct intel_guc *guc)
{
	struct intel_gt *gt = guc_to_gt(guc);
126
	u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST);
127 128

	spin_lock_irq(&gt->irq_lock);
129 130 131 132 133
	WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GUC));
	intel_uncore_write(gt->uncore,
			   GEN11_GUC_SG_INTR_ENABLE, events);
	intel_uncore_write(gt->uncore,
			   GEN11_GUC_SG_INTR_MASK, ~events);
134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151
	spin_unlock_irq(&gt->irq_lock);
}

static void gen11_disable_guc_interrupts(struct intel_guc *guc)
{
	struct intel_gt *gt = guc_to_gt(guc);

	spin_lock_irq(&gt->irq_lock);

	intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~0);
	intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, 0);

	spin_unlock_irq(&gt->irq_lock);
	intel_synchronize_irq(gt->i915);

	gen11_reset_guc_interrupts(guc);
}

152 153
void intel_guc_init_early(struct intel_guc *guc)
{
154
	struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
155

156
	intel_uc_fw_init_early(&guc->fw, INTEL_UC_FW_TYPE_GUC);
157
	intel_guc_ct_init_early(&guc->ct);
158
	intel_guc_log_init_early(&guc->log);
159
	intel_guc_submission_init_early(guc);
160 161

	mutex_init(&guc->send_mutex);
162
	spin_lock_init(&guc->irq_lock);
163
	if (GRAPHICS_VER(i915) >= 11) {
164
		guc->notify_reg = GEN11_GUC_HOST_INTERRUPT;
165 166 167
		guc->interrupts.reset = gen11_reset_guc_interrupts;
		guc->interrupts.enable = gen11_enable_guc_interrupts;
		guc->interrupts.disable = gen11_disable_guc_interrupts;
168 169 170 171
		guc->send_regs.base =
			i915_mmio_reg_offset(GEN11_SOFT_SCRATCH(0));
		guc->send_regs.count = GEN11_SOFT_SCRATCH_COUNT;

172
	} else {
173
		guc->notify_reg = GUC_SEND_INTERRUPT;
174 175 176
		guc->interrupts.reset = gen9_reset_guc_interrupts;
		guc->interrupts.enable = gen9_enable_guc_interrupts;
		guc->interrupts.disable = gen9_disable_guc_interrupts;
177 178 179
		guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0));
		guc->send_regs.count = GUC_MAX_MMIO_MSG_LEN;
		BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT);
180
	}
181 182
}

183
static u32 guc_ctl_debug_flags(struct intel_guc *guc)
184
{
185
	u32 level = intel_guc_log_get_level(&guc->log);
186
	u32 flags = 0;
187

188
	if (!GUC_LOG_LEVEL_IS_VERBOSE(level))
189 190 191 192
		flags |= GUC_LOG_DISABLED;
	else
		flags |= GUC_LOG_LEVEL_TO_VERBOSITY(level) <<
			 GUC_LOG_VERBOSITY_SHIFT;
193

194
	return flags;
195 196
}

197 198 199 200
static u32 guc_ctl_feature_flags(struct intel_guc *guc)
{
	u32 flags = 0;

201
	if (!intel_guc_submission_is_used(guc))
202 203 204 205
		flags |= GUC_CTL_DISABLE_SCHEDULER;

	return flags;
}
206 207 208 209 210 211

static u32 guc_ctl_log_params_flags(struct intel_guc *guc)
{
	u32 offset = intel_guc_ggtt_offset(guc, guc->log.vma) >> PAGE_SHIFT;
	u32 flags;

212 213 214 215
	#if (((CRASH_BUFFER_SIZE) % SZ_1M) == 0)
	#define UNIT SZ_1M
	#define FLAG GUC_LOG_ALLOC_IN_MEGABYTE
	#else
216
	#define UNIT SZ_4K
217 218
	#define FLAG 0
	#endif
219 220 221

	BUILD_BUG_ON(!CRASH_BUFFER_SIZE);
	BUILD_BUG_ON(!IS_ALIGNED(CRASH_BUFFER_SIZE, UNIT));
222 223
	BUILD_BUG_ON(!DEBUG_BUFFER_SIZE);
	BUILD_BUG_ON(!IS_ALIGNED(DEBUG_BUFFER_SIZE, UNIT));
224 225 226

	BUILD_BUG_ON((CRASH_BUFFER_SIZE / UNIT - 1) >
			(GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT));
227 228
	BUILD_BUG_ON((DEBUG_BUFFER_SIZE / UNIT - 1) >
			(GUC_LOG_DEBUG_MASK >> GUC_LOG_DEBUG_SHIFT));
229 230 231

	flags = GUC_LOG_VALID |
		GUC_LOG_NOTIFY_ON_HALF_FULL |
232
		FLAG |
233
		((CRASH_BUFFER_SIZE / UNIT - 1) << GUC_LOG_CRASH_SHIFT) |
234
		((DEBUG_BUFFER_SIZE / UNIT - 1) << GUC_LOG_DEBUG_SHIFT) |
235 236
		(offset << GUC_LOG_BUF_ADDR_SHIFT);

237
	#undef UNIT
238
	#undef FLAG
239

240 241 242
	return flags;
}

243 244 245 246 247 248 249 250
static u32 guc_ctl_ads_flags(struct intel_guc *guc)
{
	u32 ads = intel_guc_ggtt_offset(guc, guc->ads_vma) >> PAGE_SHIFT;
	u32 flags = ads << GUC_ADS_ADDR_SHIFT;

	return flags;
}

251 252 253 254 255
/*
 * Initialise the GuC parameter block before starting the firmware
 * transfer. These parameters are read by the firmware on startup
 * and cannot be changed thereafter.
 */
256
static void guc_init_params(struct intel_guc *guc)
257
{
258
	u32 *params = guc->params;
259 260
	int i;

261
	BUILD_BUG_ON(sizeof(guc->params) != GUC_CTL_MAX_DWORDS * sizeof(u32));
262

263
	params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
264
	params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc);
265
	params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc);
266
	params[GUC_CTL_ADS] = guc_ctl_ads_flags(guc);
267

268 269
	for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
		DRM_DEBUG_DRIVER("param[%2d] = %#x\n", i, params[i]);
270 271 272 273 274 275 276 277 278 279 280
}

/*
 * Initialise the GuC parameter block before starting the firmware
 * transfer. These parameters are read by the firmware on startup
 * and cannot be changed thereafter.
 */
void intel_guc_write_params(struct intel_guc *guc)
{
	struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
	int i;
281

282
	/*
283
	 * All SOFT_SCRATCH registers are in FORCEWAKE_GT domain and
284 285 286
	 * they are power context saved so it's ok to release forcewake
	 * when we are done here and take it again at xfer time.
	 */
287
	intel_uncore_forcewake_get(uncore, FORCEWAKE_GT);
288

289
	intel_uncore_write(uncore, SOFT_SCRATCH(0), 0);
290 291

	for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
292
		intel_uncore_write(uncore, SOFT_SCRATCH(1 + i), guc->params[i]);
293

294
	intel_uncore_forcewake_put(uncore, FORCEWAKE_GT);
295 296
}

297 298 299 300 301 302 303
int intel_guc_init(struct intel_guc *guc)
{
	struct intel_gt *gt = guc_to_gt(guc);
	int ret;

	ret = intel_uc_fw_init(&guc->fw);
	if (ret)
304
		goto out;
305 306 307

	ret = intel_guc_log_create(&guc->log);
	if (ret)
308
		goto err_fw;
309 310 311 312 313 314 315 316 317 318

	ret = intel_guc_ads_create(guc);
	if (ret)
		goto err_log;
	GEM_BUG_ON(!guc->ads_vma);

	ret = intel_guc_ct_init(&guc->ct);
	if (ret)
		goto err_ads;

319
	if (intel_guc_submission_is_used(guc)) {
320 321 322 323 324 325 326 327 328
		/*
		 * This is stuff we need to have available at fw load time
		 * if we are planning to enable submission later
		 */
		ret = intel_guc_submission_init(guc);
		if (ret)
			goto err_ct;
	}

329 330 331 332 333 334
	/* now that everything is perma-pinned, initialize the parameters */
	guc_init_params(guc);

	/* We need to notify the guc whenever we change the GGTT */
	i915_ggtt_enable_guc(gt->ggtt);

335 336
	intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_LOADABLE);

337 338
	return 0;

339 340
err_ct:
	intel_guc_ct_fini(&guc->ct);
341 342 343 344 345 346
err_ads:
	intel_guc_ads_destroy(guc);
err_log:
	intel_guc_log_destroy(&guc->log);
err_fw:
	intel_uc_fw_fini(&guc->fw);
347
out:
348
	i915_probe_error(gt->i915, "failed with %d\n", ret);
349 350 351 352 353 354 355
	return ret;
}

void intel_guc_fini(struct intel_guc *guc)
{
	struct intel_gt *gt = guc_to_gt(guc);

356
	if (!intel_uc_fw_is_loadable(&guc->fw))
357 358
		return;

359 360
	i915_ggtt_disable_guc(gt->ggtt);

361
	if (intel_guc_submission_is_used(guc))
362 363
		intel_guc_submission_fini(guc);

364 365 366 367 368 369 370
	intel_guc_ct_fini(&guc->ct);

	intel_guc_ads_destroy(guc);
	intel_guc_log_destroy(&guc->log);
	intel_uc_fw_fini(&guc->fw);
}

371 372 373
/*
 * This function implements the MMIO based host to GuC interface.
 */
374
int intel_guc_send_mmio(struct intel_guc *guc, const u32 *request, u32 len,
375
			u32 *response_buf, u32 response_buf_size)
376
{
377
	struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
378
	struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
379
	u32 header;
380 381 382 383 384 385
	int i;
	int ret;

	GEM_BUG_ON(!len);
	GEM_BUG_ON(len > guc->send_regs.count);

386 387
	GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, request[0]) != GUC_HXG_ORIGIN_HOST);
	GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_TYPE, request[0]) != GUC_HXG_TYPE_REQUEST);
388 389

	mutex_lock(&guc->send_mutex);
390
	intel_uncore_forcewake_get(uncore, guc->send_regs.fw_domains);
391

392
retry:
393
	for (i = 0; i < len; i++)
394
		intel_uncore_write(uncore, guc_send_reg(guc, i), request[i]);
395

396
	intel_uncore_posting_read(uncore, guc_send_reg(guc, i - 1));
397 398 399 400 401 402 403

	intel_guc_notify(guc);

	/*
	 * No GuC command should ever take longer than 10ms.
	 * Fast commands should still complete in 10us.
	 */
404
	ret = __intel_wait_for_register_fw(uncore,
405
					   guc_send_reg(guc, 0),
406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453
					   GUC_HXG_MSG_0_ORIGIN,
					   FIELD_PREP(GUC_HXG_MSG_0_ORIGIN,
						      GUC_HXG_ORIGIN_GUC),
					   10, 10, &header);
	if (unlikely(ret)) {
timeout:
		drm_err(&i915->drm, "mmio request %#x: no reply %x\n",
			request[0], header);
		goto out;
	}

	if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) == GUC_HXG_TYPE_NO_RESPONSE_BUSY) {
#define done ({ header = intel_uncore_read(uncore, guc_send_reg(guc, 0)); \
		FIELD_GET(GUC_HXG_MSG_0_ORIGIN, header) != GUC_HXG_ORIGIN_GUC || \
		FIELD_GET(GUC_HXG_MSG_0_TYPE, header) != GUC_HXG_TYPE_NO_RESPONSE_BUSY; })

		ret = wait_for(done, 1000);
		if (unlikely(ret))
			goto timeout;
		if (unlikely(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, header) !=
				       GUC_HXG_ORIGIN_GUC))
			goto proto;
#undef done
	}

	if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) == GUC_HXG_TYPE_NO_RESPONSE_RETRY) {
		u32 reason = FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, header);

		drm_dbg(&i915->drm, "mmio request %#x: retrying, reason %u\n",
			request[0], reason);
		goto retry;
	}

	if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) == GUC_HXG_TYPE_RESPONSE_FAILURE) {
		u32 hint = FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, header);
		u32 error = FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, header);

		drm_err(&i915->drm, "mmio request %#x: failure %x/%u\n",
			request[0], error, hint);
		ret = -ENXIO;
		goto out;
	}

	if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) != GUC_HXG_TYPE_RESPONSE_SUCCESS) {
proto:
		drm_err(&i915->drm, "mmio request %#x: unexpected reply %#x\n",
			request[0], header);
		ret = -EPROTO;
454
		goto out;
455 456
	}

457
	if (response_buf) {
458 459 460
		int count = min(response_buf_size, guc->send_regs.count);

		GEM_BUG_ON(!count);
461

462 463 464
		response_buf[0] = header;

		for (i = 1; i < count; i++)
465
			response_buf[i] = intel_uncore_read(uncore,
466
							    guc_send_reg(guc, i));
467

468 469 470 471 472 473
		/* Use number of copied dwords as our return value */
		ret = count;
	} else {
		/* Use data from the GuC response as our return value */
		ret = FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, header);
	}
474 475

out:
476
	intel_uncore_forcewake_put(uncore, guc->send_regs.fw_domains);
477 478 479 480 481
	mutex_unlock(&guc->send_mutex);

	return ret;
}

482 483
int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
				       const u32 *payload, u32 len)
484
{
485 486 487 488 489
	u32 msg;

	if (unlikely(!len))
		return -EPROTO;

490
	/* Make sure to handle only enabled messages */
491
	msg = payload[0] & guc->msg_enabled_mask;
492

493
	if (msg & (INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
494
		   INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED))
495
		intel_guc_log_handle_flush_event(&guc->log);
496 497

	return 0;
498 499
}

500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520
/**
 * intel_guc_auth_huc() - Send action to GuC to authenticate HuC ucode
 * @guc: intel_guc structure
 * @rsa_offset: rsa offset w.r.t ggtt base of huc vma
 *
 * Triggers a HuC firmware authentication request to the GuC via intel_guc_send
 * INTEL_GUC_ACTION_AUTHENTICATE_HUC interface. This function is invoked by
 * intel_huc_auth().
 *
 * Return:	non-zero code on error
 */
int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset)
{
	u32 action[] = {
		INTEL_GUC_ACTION_AUTHENTICATE_HUC,
		rsa_offset
	};

	return intel_guc_send(guc, action, ARRAY_SIZE(action));
}

521 522 523
/**
 * intel_guc_suspend() - notify GuC entering suspend state
 * @guc:	the guc
524
 */
525
int intel_guc_suspend(struct intel_guc *guc)
526 527
{
	int ret;
528
	u32 action[] = {
529
		INTEL_GUC_ACTION_RESET_CLIENT,
530 531
	};

532
	if (!intel_guc_is_ready(guc))
533 534
		return 0;

535 536 537 538 539 540 541 542 543 544 545 546 547 548 549
	if (intel_guc_submission_is_used(guc)) {
		/*
		 * This H2G MMIO command tears down the GuC in two steps. First it will
		 * generate a G2H CTB for every active context indicating a reset. In
		 * practice the i915 shouldn't ever get a G2H as suspend should only be
		 * called when the GPU is idle. Next, it tears down the CTBs and this
		 * H2G MMIO command completes.
		 *
		 * Don't abort on a failure code from the GuC. Keep going and do the
		 * clean up in santize() and re-initialisation on resume and hopefully
		 * the error here won't be problematic.
		 */
		ret = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
		if (ret)
			DRM_ERROR("GuC suspend: RESET_CLIENT action failed with error %d!\n", ret);
550 551
	}

552 553 554
	/* Signal that the GuC isn't running. */
	intel_guc_sanitize(guc);

555 556 557
	return 0;
}

558 559
/**
 * intel_guc_resume() - notify GuC resuming from suspend state
560
 * @guc:	the guc
561
 */
562
int intel_guc_resume(struct intel_guc *guc)
563
{
564 565 566 567 568 569
	/*
	 * NB: This function can still be called even if GuC submission is
	 * disabled, e.g. if GuC is enabled for HuC authentication only. Thus,
	 * if any code is later added here, it must be support doing nothing
	 * if submission is disabled (as per intel_guc_suspend).
	 */
570
	return 0;
571 572
}

573
/**
574
 * DOC: GuC Memory Management
575
 *
576 577 578 579 580 581 582
 * GuC can't allocate any memory for its own usage, so all the allocations must
 * be handled by the host driver. GuC accesses the memory via the GGTT, with the
 * exception of the top and bottom parts of the 4GB address space, which are
 * instead re-mapped by the GuC HW to memory location of the FW itself (WOPCM)
 * or other parts of the HW. The driver must take care not to place objects that
 * the GuC is going to access in these reserved ranges. The layout of the GuC
 * address space is shown below:
583
 *
584
 * ::
585
 *
586 587 588 589 590 591 592 593 594 595 596 597 598 599 600
 *     +===========> +====================+ <== FFFF_FFFF
 *     ^             |      Reserved      |
 *     |             +====================+ <== GUC_GGTT_TOP
 *     |             |                    |
 *     |             |        DRAM        |
 *    GuC            |                    |
 *  Address    +===> +====================+ <== GuC ggtt_pin_bias
 *   Space     ^     |                    |
 *     |       |     |                    |
 *     |      GuC    |        GuC         |
 *     |     WOPCM   |       WOPCM        |
 *     |      Size   |                    |
 *     |       |     |                    |
 *     v       v     |                    |
 *     +=======+===> +====================+ <== 0000_0000
601
 *
602
 * The lower part of GuC Address Space [0, ggtt_pin_bias) is mapped to GuC WOPCM
603
 * while upper part of GuC Address Space [ggtt_pin_bias, GUC_GGTT_TOP) is mapped
604
 * to DRAM. The value of the GuC ggtt_pin_bias is the GuC WOPCM size.
605 606
 */

607 608 609 610 611 612 613 614
/**
 * intel_guc_allocate_vma() - Allocate a GGTT VMA for GuC usage
 * @guc:	the guc
 * @size:	size of area to allocate (both virtual space and memory)
 *
 * This is a wrapper to create an object for use with the GuC. In order to
 * use it inside the GuC, an object needs to be pinned lifetime, so we allocate
 * both some backing storage and a range inside the Global GTT. We must pin
615
 * it in the GGTT somewhere other than than [0, GUC ggtt_pin_bias) because that
616 617 618 619 620 621
 * range is reserved inside GuC.
 *
 * Return:	A i915_vma if successful, otherwise an ERR_PTR.
 */
struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
{
622
	struct intel_gt *gt = guc_to_gt(guc);
623 624
	struct drm_i915_gem_object *obj;
	struct i915_vma *vma;
625
	u64 flags;
626 627
	int ret;

628
	obj = i915_gem_object_create_shmem(gt->i915, size);
629 630 631
	if (IS_ERR(obj))
		return ERR_CAST(obj);

632
	vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
633 634 635
	if (IS_ERR(vma))
		goto err;

636
	flags = PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
637
	ret = i915_ggtt_pin(vma, NULL, 0, flags);
638 639 640 641 642
	if (ret) {
		vma = ERR_PTR(ret);
		goto err;
	}

643
	return i915_vma_make_unshrinkable(vma);
644 645 646 647 648

err:
	i915_gem_object_put(obj);
	return vma;
}
649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671

/**
 * intel_guc_allocate_and_map_vma() - Allocate and map VMA for GuC usage
 * @guc:	the guc
 * @size:	size of area to allocate (both virtual space and memory)
 * @out_vma:	return variable for the allocated vma pointer
 * @out_vaddr:	return variable for the obj mapping
 *
 * This wrapper calls intel_guc_allocate_vma() and then maps the allocated
 * object with I915_MAP_WB.
 *
 * Return:	0 if successful, a negative errno code otherwise.
 */
int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size,
				   struct i915_vma **out_vma, void **out_vaddr)
{
	struct i915_vma *vma;
	void *vaddr;

	vma = intel_guc_allocate_vma(guc, size);
	if (IS_ERR(vma))
		return PTR_ERR(vma);

672 673 674
	vaddr = i915_gem_object_pin_map_unlocked(vma->obj,
						 i915_coherent_map_type(guc_to_gt(guc)->i915,
									vma->obj, true));
675 676 677 678 679 680 681 682 683 684
	if (IS_ERR(vaddr)) {
		i915_vma_unpin_and_release(&vma, 0);
		return PTR_ERR(vaddr);
	}

	*out_vma = vma;
	*out_vaddr = vaddr;

	return 0;
}
685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728

/**
 * intel_guc_load_status - dump information about GuC load status
 * @guc: the GuC
 * @p: the &drm_printer
 *
 * Pretty printer for GuC load status.
 */
void intel_guc_load_status(struct intel_guc *guc, struct drm_printer *p)
{
	struct intel_gt *gt = guc_to_gt(guc);
	struct intel_uncore *uncore = gt->uncore;
	intel_wakeref_t wakeref;

	if (!intel_guc_is_supported(guc)) {
		drm_printf(p, "GuC not supported\n");
		return;
	}

	if (!intel_guc_is_wanted(guc)) {
		drm_printf(p, "GuC disabled\n");
		return;
	}

	intel_uc_fw_dump(&guc->fw, p);

	with_intel_runtime_pm(uncore->rpm, wakeref) {
		u32 status = intel_uncore_read(uncore, GUC_STATUS);
		u32 i;

		drm_printf(p, "\nGuC status 0x%08x:\n", status);
		drm_printf(p, "\tBootrom status = 0x%x\n",
			   (status & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
		drm_printf(p, "\tuKernel status = 0x%x\n",
			   (status & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
		drm_printf(p, "\tMIA Core status = 0x%x\n",
			   (status & GS_MIA_MASK) >> GS_MIA_SHIFT);
		drm_puts(p, "\nScratch registers:\n");
		for (i = 0; i < 16; i++) {
			drm_printf(p, "\t%2d: \t0x%x\n",
				   i, intel_uncore_read(uncore, SOFT_SCRATCH(i)));
		}
	}
}