intel_guc.c 21.5 KB
Newer Older
1
// SPDX-License-Identifier: MIT
2
/*
3
 * Copyright © 2014-2019 Intel Corporation
4 5
 */

6
#include "gem/i915_gem_lmem.h"
7
#include "gt/intel_gt.h"
8 9
#include "gt/intel_gt_irq.h"
#include "gt/intel_gt_pm_irq.h"
10
#include "intel_guc.h"
11
#include "intel_guc_slpc.h"
12
#include "intel_guc_ads.h"
13
#include "intel_guc_submission.h"
14
#include "i915_drv.h"
15
#include "i915_irq.h"
16

17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37
/**
 * DOC: GuC
 *
 * The GuC is a microcontroller inside the GT HW, introduced in gen9. The GuC is
 * designed to offload some of the functionality usually performed by the host
 * driver; currently the main operations it can take care of are:
 *
 * - Authentication of the HuC, which is required to fully enable HuC usage.
 * - Low latency graphics context scheduling (a.k.a. GuC submission).
 * - GT Power management.
 *
 * The enable_guc module parameter can be used to select which of those
 * operations to enable within GuC. Note that not all the operations are
 * supported on all gen9+ platforms.
 *
 * Enabling the GuC is not mandatory and therefore the firmware is only loaded
 * if at least one of the operations is selected. However, not loading the GuC
 * might result in the loss of some features that do require the GuC (currently
 * just the HuC, but more are expected to land in the future).
 */

38
void intel_guc_notify(struct intel_guc *guc)
39
{
40
	struct intel_gt *gt = guc_to_gt(guc);
41

42 43 44 45 46 47 48
	/*
	 * On Gen11+, the value written to the register is passes as a payload
	 * to the FW. However, the FW currently treats all values the same way
	 * (H2G interrupt), so we can just write the value that the HW expects
	 * on older gens.
	 */
	intel_uncore_write(gt->uncore, guc->notify_reg, GUC_SEND_TRIGGER);
49 50
}

51 52 53 54 55 56 57 58 59 60 61
static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i)
{
	GEM_BUG_ON(!guc->send_regs.base);
	GEM_BUG_ON(!guc->send_regs.count);
	GEM_BUG_ON(i >= guc->send_regs.count);

	return _MMIO(guc->send_regs.base + 4 * i);
}

void intel_guc_init_send_regs(struct intel_guc *guc)
{
62
	struct intel_gt *gt = guc_to_gt(guc);
63 64 65
	enum forcewake_domains fw_domains = 0;
	unsigned int i;

66 67
	GEM_BUG_ON(!guc->send_regs.base);
	GEM_BUG_ON(!guc->send_regs.count);
68 69

	for (i = 0; i < guc->send_regs.count; i++) {
70
		fw_domains |= intel_uncore_forcewake_for_reg(gt->uncore,
71 72 73 74 75 76
					guc_send_reg(guc, i),
					FW_REG_READ | FW_REG_WRITE);
	}
	guc->send_regs.fw_domains = fw_domains;
}

77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94
static void gen9_reset_guc_interrupts(struct intel_guc *guc)
{
	struct intel_gt *gt = guc_to_gt(guc);

	assert_rpm_wakelock_held(&gt->i915->runtime_pm);

	spin_lock_irq(&gt->irq_lock);
	gen6_gt_pm_reset_iir(gt, gt->pm_guc_events);
	spin_unlock_irq(&gt->irq_lock);
}

static void gen9_enable_guc_interrupts(struct intel_guc *guc)
{
	struct intel_gt *gt = guc_to_gt(guc);

	assert_rpm_wakelock_held(&gt->i915->runtime_pm);

	spin_lock_irq(&gt->irq_lock);
95 96 97
	WARN_ON_ONCE(intel_uncore_read(gt->uncore, GEN8_GT_IIR(2)) &
		     gt->pm_guc_events);
	gen6_gt_pm_enable_irq(gt, gt->pm_guc_events);
98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128
	spin_unlock_irq(&gt->irq_lock);
}

static void gen9_disable_guc_interrupts(struct intel_guc *guc)
{
	struct intel_gt *gt = guc_to_gt(guc);

	assert_rpm_wakelock_held(&gt->i915->runtime_pm);

	spin_lock_irq(&gt->irq_lock);

	gen6_gt_pm_disable_irq(gt, gt->pm_guc_events);

	spin_unlock_irq(&gt->irq_lock);
	intel_synchronize_irq(gt->i915);

	gen9_reset_guc_interrupts(guc);
}

static void gen11_reset_guc_interrupts(struct intel_guc *guc)
{
	struct intel_gt *gt = guc_to_gt(guc);

	spin_lock_irq(&gt->irq_lock);
	gen11_gt_reset_one_iir(gt, 0, GEN11_GUC);
	spin_unlock_irq(&gt->irq_lock);
}

static void gen11_enable_guc_interrupts(struct intel_guc *guc)
{
	struct intel_gt *gt = guc_to_gt(guc);
129
	u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST);
130 131

	spin_lock_irq(&gt->irq_lock);
132 133 134 135 136
	WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GUC));
	intel_uncore_write(gt->uncore,
			   GEN11_GUC_SG_INTR_ENABLE, events);
	intel_uncore_write(gt->uncore,
			   GEN11_GUC_SG_INTR_MASK, ~events);
137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154
	spin_unlock_irq(&gt->irq_lock);
}

static void gen11_disable_guc_interrupts(struct intel_guc *guc)
{
	struct intel_gt *gt = guc_to_gt(guc);

	spin_lock_irq(&gt->irq_lock);

	intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~0);
	intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, 0);

	spin_unlock_irq(&gt->irq_lock);
	intel_synchronize_irq(gt->i915);

	gen11_reset_guc_interrupts(guc);
}

155 156
void intel_guc_init_early(struct intel_guc *guc)
{
157
	struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
158

159
	intel_uc_fw_init_early(&guc->fw, INTEL_UC_FW_TYPE_GUC);
160
	intel_guc_ct_init_early(&guc->ct);
161
	intel_guc_log_init_early(&guc->log);
162
	intel_guc_submission_init_early(guc);
163
	intel_guc_slpc_init_early(&guc->slpc);
164
	intel_guc_rc_init_early(guc);
165 166

	mutex_init(&guc->send_mutex);
167
	spin_lock_init(&guc->irq_lock);
168
	if (GRAPHICS_VER(i915) >= 11) {
169
		guc->notify_reg = GEN11_GUC_HOST_INTERRUPT;
170 171 172
		guc->interrupts.reset = gen11_reset_guc_interrupts;
		guc->interrupts.enable = gen11_enable_guc_interrupts;
		guc->interrupts.disable = gen11_disable_guc_interrupts;
173 174 175 176
		guc->send_regs.base =
			i915_mmio_reg_offset(GEN11_SOFT_SCRATCH(0));
		guc->send_regs.count = GEN11_SOFT_SCRATCH_COUNT;

177
	} else {
178
		guc->notify_reg = GUC_SEND_INTERRUPT;
179 180 181
		guc->interrupts.reset = gen9_reset_guc_interrupts;
		guc->interrupts.enable = gen9_enable_guc_interrupts;
		guc->interrupts.disable = gen9_disable_guc_interrupts;
182 183 184
		guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0));
		guc->send_regs.count = GUC_MAX_MMIO_MSG_LEN;
		BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT);
185
	}
186 187
}

188 189 190 191 192
void intel_guc_init_late(struct intel_guc *guc)
{
	intel_guc_ads_init_late(guc);
}

193
static u32 guc_ctl_debug_flags(struct intel_guc *guc)
194
{
195
	u32 level = intel_guc_log_get_level(&guc->log);
196
	u32 flags = 0;
197

198
	if (!GUC_LOG_LEVEL_IS_VERBOSE(level))
199 200 201 202
		flags |= GUC_LOG_DISABLED;
	else
		flags |= GUC_LOG_LEVEL_TO_VERBOSITY(level) <<
			 GUC_LOG_VERBOSITY_SHIFT;
203

204
	return flags;
205 206
}

207 208 209 210
static u32 guc_ctl_feature_flags(struct intel_guc *guc)
{
	u32 flags = 0;

211
	if (!intel_guc_submission_is_used(guc))
212 213
		flags |= GUC_CTL_DISABLE_SCHEDULER;

214 215 216
	if (intel_guc_slpc_is_used(guc))
		flags |= GUC_CTL_ENABLE_SLPC;

217 218
	return flags;
}
219 220 221 222 223 224

static u32 guc_ctl_log_params_flags(struct intel_guc *guc)
{
	u32 offset = intel_guc_ggtt_offset(guc, guc->log.vma) >> PAGE_SHIFT;
	u32 flags;

225 226 227 228
	#if (((CRASH_BUFFER_SIZE) % SZ_1M) == 0)
	#define UNIT SZ_1M
	#define FLAG GUC_LOG_ALLOC_IN_MEGABYTE
	#else
229
	#define UNIT SZ_4K
230 231
	#define FLAG 0
	#endif
232 233 234

	BUILD_BUG_ON(!CRASH_BUFFER_SIZE);
	BUILD_BUG_ON(!IS_ALIGNED(CRASH_BUFFER_SIZE, UNIT));
235 236
	BUILD_BUG_ON(!DEBUG_BUFFER_SIZE);
	BUILD_BUG_ON(!IS_ALIGNED(DEBUG_BUFFER_SIZE, UNIT));
237 238 239

	BUILD_BUG_ON((CRASH_BUFFER_SIZE / UNIT - 1) >
			(GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT));
240 241
	BUILD_BUG_ON((DEBUG_BUFFER_SIZE / UNIT - 1) >
			(GUC_LOG_DEBUG_MASK >> GUC_LOG_DEBUG_SHIFT));
242 243 244

	flags = GUC_LOG_VALID |
		GUC_LOG_NOTIFY_ON_HALF_FULL |
245
		FLAG |
246
		((CRASH_BUFFER_SIZE / UNIT - 1) << GUC_LOG_CRASH_SHIFT) |
247
		((DEBUG_BUFFER_SIZE / UNIT - 1) << GUC_LOG_DEBUG_SHIFT) |
248 249
		(offset << GUC_LOG_BUF_ADDR_SHIFT);

250
	#undef UNIT
251
	#undef FLAG
252

253 254 255
	return flags;
}

256 257 258 259 260 261 262 263
static u32 guc_ctl_ads_flags(struct intel_guc *guc)
{
	u32 ads = intel_guc_ggtt_offset(guc, guc->ads_vma) >> PAGE_SHIFT;
	u32 flags = ads << GUC_ADS_ADDR_SHIFT;

	return flags;
}

264 265 266 267 268
/*
 * Initialise the GuC parameter block before starting the firmware
 * transfer. These parameters are read by the firmware on startup
 * and cannot be changed thereafter.
 */
269
static void guc_init_params(struct intel_guc *guc)
270
{
271
	u32 *params = guc->params;
272 273
	int i;

274
	BUILD_BUG_ON(sizeof(guc->params) != GUC_CTL_MAX_DWORDS * sizeof(u32));
275

276
	params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
277
	params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc);
278
	params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc);
279
	params[GUC_CTL_ADS] = guc_ctl_ads_flags(guc);
280

281 282
	for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
		DRM_DEBUG_DRIVER("param[%2d] = %#x\n", i, params[i]);
283 284 285 286 287 288 289 290 291 292 293
}

/*
 * Initialise the GuC parameter block before starting the firmware
 * transfer. These parameters are read by the firmware on startup
 * and cannot be changed thereafter.
 */
void intel_guc_write_params(struct intel_guc *guc)
{
	struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
	int i;
294

295
	/*
296
	 * All SOFT_SCRATCH registers are in FORCEWAKE_GT domain and
297 298 299
	 * they are power context saved so it's ok to release forcewake
	 * when we are done here and take it again at xfer time.
	 */
300
	intel_uncore_forcewake_get(uncore, FORCEWAKE_GT);
301

302
	intel_uncore_write(uncore, SOFT_SCRATCH(0), 0);
303 304

	for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
305
		intel_uncore_write(uncore, SOFT_SCRATCH(1 + i), guc->params[i]);
306

307
	intel_uncore_forcewake_put(uncore, FORCEWAKE_GT);
308 309
}

310 311 312 313 314 315 316
int intel_guc_init(struct intel_guc *guc)
{
	struct intel_gt *gt = guc_to_gt(guc);
	int ret;

	ret = intel_uc_fw_init(&guc->fw);
	if (ret)
317
		goto out;
318 319 320

	ret = intel_guc_log_create(&guc->log);
	if (ret)
321
		goto err_fw;
322 323 324 325 326 327 328 329 330 331

	ret = intel_guc_ads_create(guc);
	if (ret)
		goto err_log;
	GEM_BUG_ON(!guc->ads_vma);

	ret = intel_guc_ct_init(&guc->ct);
	if (ret)
		goto err_ads;

332
	if (intel_guc_submission_is_used(guc)) {
333 334 335 336 337 338 339 340 341
		/*
		 * This is stuff we need to have available at fw load time
		 * if we are planning to enable submission later
		 */
		ret = intel_guc_submission_init(guc);
		if (ret)
			goto err_ct;
	}

342 343 344 345 346 347
	if (intel_guc_slpc_is_used(guc)) {
		ret = intel_guc_slpc_init(&guc->slpc);
		if (ret)
			goto err_submission;
	}

348 349 350 351 352 353
	/* now that everything is perma-pinned, initialize the parameters */
	guc_init_params(guc);

	/* We need to notify the guc whenever we change the GGTT */
	i915_ggtt_enable_guc(gt->ggtt);

354 355
	intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_LOADABLE);

356 357
	return 0;

358 359
err_submission:
	intel_guc_submission_fini(guc);
360 361
err_ct:
	intel_guc_ct_fini(&guc->ct);
362 363 364 365 366 367
err_ads:
	intel_guc_ads_destroy(guc);
err_log:
	intel_guc_log_destroy(&guc->log);
err_fw:
	intel_uc_fw_fini(&guc->fw);
368
out:
369
	i915_probe_error(gt->i915, "failed with %d\n", ret);
370 371 372 373 374 375 376
	return ret;
}

void intel_guc_fini(struct intel_guc *guc)
{
	struct intel_gt *gt = guc_to_gt(guc);

377
	if (!intel_uc_fw_is_loadable(&guc->fw))
378 379
		return;

380 381
	i915_ggtt_disable_guc(gt->ggtt);

382 383 384
	if (intel_guc_slpc_is_used(guc))
		intel_guc_slpc_fini(&guc->slpc);

385
	if (intel_guc_submission_is_used(guc))
386 387
		intel_guc_submission_fini(guc);

388 389 390 391 392 393 394
	intel_guc_ct_fini(&guc->ct);

	intel_guc_ads_destroy(guc);
	intel_guc_log_destroy(&guc->log);
	intel_uc_fw_fini(&guc->fw);
}

395 396 397
/*
 * This function implements the MMIO based host to GuC interface.
 */
398
int intel_guc_send_mmio(struct intel_guc *guc, const u32 *request, u32 len,
399
			u32 *response_buf, u32 response_buf_size)
400
{
401
	struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
402
	struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
403
	u32 header;
404 405 406 407 408 409
	int i;
	int ret;

	GEM_BUG_ON(!len);
	GEM_BUG_ON(len > guc->send_regs.count);

410 411
	GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, request[0]) != GUC_HXG_ORIGIN_HOST);
	GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_TYPE, request[0]) != GUC_HXG_TYPE_REQUEST);
412 413

	mutex_lock(&guc->send_mutex);
414
	intel_uncore_forcewake_get(uncore, guc->send_regs.fw_domains);
415

416
retry:
417
	for (i = 0; i < len; i++)
418
		intel_uncore_write(uncore, guc_send_reg(guc, i), request[i]);
419

420
	intel_uncore_posting_read(uncore, guc_send_reg(guc, i - 1));
421 422 423 424 425 426 427

	intel_guc_notify(guc);

	/*
	 * No GuC command should ever take longer than 10ms.
	 * Fast commands should still complete in 10us.
	 */
428
	ret = __intel_wait_for_register_fw(uncore,
429
					   guc_send_reg(guc, 0),
430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477
					   GUC_HXG_MSG_0_ORIGIN,
					   FIELD_PREP(GUC_HXG_MSG_0_ORIGIN,
						      GUC_HXG_ORIGIN_GUC),
					   10, 10, &header);
	if (unlikely(ret)) {
timeout:
		drm_err(&i915->drm, "mmio request %#x: no reply %x\n",
			request[0], header);
		goto out;
	}

	if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) == GUC_HXG_TYPE_NO_RESPONSE_BUSY) {
#define done ({ header = intel_uncore_read(uncore, guc_send_reg(guc, 0)); \
		FIELD_GET(GUC_HXG_MSG_0_ORIGIN, header) != GUC_HXG_ORIGIN_GUC || \
		FIELD_GET(GUC_HXG_MSG_0_TYPE, header) != GUC_HXG_TYPE_NO_RESPONSE_BUSY; })

		ret = wait_for(done, 1000);
		if (unlikely(ret))
			goto timeout;
		if (unlikely(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, header) !=
				       GUC_HXG_ORIGIN_GUC))
			goto proto;
#undef done
	}

	if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) == GUC_HXG_TYPE_NO_RESPONSE_RETRY) {
		u32 reason = FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, header);

		drm_dbg(&i915->drm, "mmio request %#x: retrying, reason %u\n",
			request[0], reason);
		goto retry;
	}

	if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) == GUC_HXG_TYPE_RESPONSE_FAILURE) {
		u32 hint = FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, header);
		u32 error = FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, header);

		drm_err(&i915->drm, "mmio request %#x: failure %x/%u\n",
			request[0], error, hint);
		ret = -ENXIO;
		goto out;
	}

	if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) != GUC_HXG_TYPE_RESPONSE_SUCCESS) {
proto:
		drm_err(&i915->drm, "mmio request %#x: unexpected reply %#x\n",
			request[0], header);
		ret = -EPROTO;
478
		goto out;
479 480
	}

481
	if (response_buf) {
482 483 484
		int count = min(response_buf_size, guc->send_regs.count);

		GEM_BUG_ON(!count);
485

486 487 488
		response_buf[0] = header;

		for (i = 1; i < count; i++)
489
			response_buf[i] = intel_uncore_read(uncore,
490
							    guc_send_reg(guc, i));
491

492 493 494 495 496 497
		/* Use number of copied dwords as our return value */
		ret = count;
	} else {
		/* Use data from the GuC response as our return value */
		ret = FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, header);
	}
498 499

out:
500
	intel_uncore_forcewake_put(uncore, guc->send_regs.fw_domains);
501 502 503 504 505
	mutex_unlock(&guc->send_mutex);

	return ret;
}

506 507
int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
				       const u32 *payload, u32 len)
508
{
509 510 511 512 513
	u32 msg;

	if (unlikely(!len))
		return -EPROTO;

514
	/* Make sure to handle only enabled messages */
515
	msg = payload[0] & guc->msg_enabled_mask;
516

517
	if (msg & (INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
518
		   INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED))
519
		intel_guc_log_handle_flush_event(&guc->log);
520 521

	return 0;
522 523
}

524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544
/**
 * intel_guc_auth_huc() - Send action to GuC to authenticate HuC ucode
 * @guc: intel_guc structure
 * @rsa_offset: rsa offset w.r.t ggtt base of huc vma
 *
 * Triggers a HuC firmware authentication request to the GuC via intel_guc_send
 * INTEL_GUC_ACTION_AUTHENTICATE_HUC interface. This function is invoked by
 * intel_huc_auth().
 *
 * Return:	non-zero code on error
 */
int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset)
{
	u32 action[] = {
		INTEL_GUC_ACTION_AUTHENTICATE_HUC,
		rsa_offset
	};

	return intel_guc_send(guc, action, ARRAY_SIZE(action));
}

545 546 547
/**
 * intel_guc_suspend() - notify GuC entering suspend state
 * @guc:	the guc
548
 */
549
int intel_guc_suspend(struct intel_guc *guc)
550 551
{
	int ret;
552
	u32 action[] = {
553
		INTEL_GUC_ACTION_RESET_CLIENT,
554 555
	};

556
	if (!intel_guc_is_ready(guc))
557 558
		return 0;

559 560 561 562 563 564 565 566 567 568 569 570 571 572 573
	if (intel_guc_submission_is_used(guc)) {
		/*
		 * This H2G MMIO command tears down the GuC in two steps. First it will
		 * generate a G2H CTB for every active context indicating a reset. In
		 * practice the i915 shouldn't ever get a G2H as suspend should only be
		 * called when the GPU is idle. Next, it tears down the CTBs and this
		 * H2G MMIO command completes.
		 *
		 * Don't abort on a failure code from the GuC. Keep going and do the
		 * clean up in santize() and re-initialisation on resume and hopefully
		 * the error here won't be problematic.
		 */
		ret = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
		if (ret)
			DRM_ERROR("GuC suspend: RESET_CLIENT action failed with error %d!\n", ret);
574 575
	}

576 577 578
	/* Signal that the GuC isn't running. */
	intel_guc_sanitize(guc);

579 580 581
	return 0;
}

582 583
/**
 * intel_guc_resume() - notify GuC resuming from suspend state
584
 * @guc:	the guc
585
 */
586
int intel_guc_resume(struct intel_guc *guc)
587
{
588 589 590 591 592 593
	/*
	 * NB: This function can still be called even if GuC submission is
	 * disabled, e.g. if GuC is enabled for HuC authentication only. Thus,
	 * if any code is later added here, it must be support doing nothing
	 * if submission is disabled (as per intel_guc_suspend).
	 */
594
	return 0;
595 596
}

597
/**
598
 * DOC: GuC Memory Management
599
 *
600 601 602 603 604 605 606
 * GuC can't allocate any memory for its own usage, so all the allocations must
 * be handled by the host driver. GuC accesses the memory via the GGTT, with the
 * exception of the top and bottom parts of the 4GB address space, which are
 * instead re-mapped by the GuC HW to memory location of the FW itself (WOPCM)
 * or other parts of the HW. The driver must take care not to place objects that
 * the GuC is going to access in these reserved ranges. The layout of the GuC
 * address space is shown below:
607
 *
608
 * ::
609
 *
610 611 612 613 614 615 616 617 618 619 620 621 622 623 624
 *     +===========> +====================+ <== FFFF_FFFF
 *     ^             |      Reserved      |
 *     |             +====================+ <== GUC_GGTT_TOP
 *     |             |                    |
 *     |             |        DRAM        |
 *    GuC            |                    |
 *  Address    +===> +====================+ <== GuC ggtt_pin_bias
 *   Space     ^     |                    |
 *     |       |     |                    |
 *     |      GuC    |        GuC         |
 *     |     WOPCM   |       WOPCM        |
 *     |      Size   |                    |
 *     |       |     |                    |
 *     v       v     |                    |
 *     +=======+===> +====================+ <== 0000_0000
625
 *
626
 * The lower part of GuC Address Space [0, ggtt_pin_bias) is mapped to GuC WOPCM
627
 * while upper part of GuC Address Space [ggtt_pin_bias, GUC_GGTT_TOP) is mapped
628
 * to DRAM. The value of the GuC ggtt_pin_bias is the GuC WOPCM size.
629 630
 */

631 632 633 634 635 636 637 638
/**
 * intel_guc_allocate_vma() - Allocate a GGTT VMA for GuC usage
 * @guc:	the guc
 * @size:	size of area to allocate (both virtual space and memory)
 *
 * This is a wrapper to create an object for use with the GuC. In order to
 * use it inside the GuC, an object needs to be pinned lifetime, so we allocate
 * both some backing storage and a range inside the Global GTT. We must pin
639
 * it in the GGTT somewhere other than than [0, GUC ggtt_pin_bias) because that
640 641 642 643 644 645
 * range is reserved inside GuC.
 *
 * Return:	A i915_vma if successful, otherwise an ERR_PTR.
 */
struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
{
646
	struct intel_gt *gt = guc_to_gt(guc);
647 648
	struct drm_i915_gem_object *obj;
	struct i915_vma *vma;
649
	u64 flags;
650 651
	int ret;

652 653 654
	if (HAS_LMEM(gt->i915))
		obj = i915_gem_object_create_lmem(gt->i915, size,
						  I915_BO_ALLOC_CPU_CLEAR |
655 656
						  I915_BO_ALLOC_CONTIGUOUS |
						  I915_BO_ALLOC_PM_EARLY);
657 658 659
	else
		obj = i915_gem_object_create_shmem(gt->i915, size);

660 661 662
	if (IS_ERR(obj))
		return ERR_CAST(obj);

663
	vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
664 665 666
	if (IS_ERR(vma))
		goto err;

667
	flags = PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
668
	ret = i915_ggtt_pin(vma, NULL, 0, flags);
669 670 671 672 673
	if (ret) {
		vma = ERR_PTR(ret);
		goto err;
	}

674
	return i915_vma_make_unshrinkable(vma);
675 676 677 678 679

err:
	i915_gem_object_put(obj);
	return vma;
}
680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702

/**
 * intel_guc_allocate_and_map_vma() - Allocate and map VMA for GuC usage
 * @guc:	the guc
 * @size:	size of area to allocate (both virtual space and memory)
 * @out_vma:	return variable for the allocated vma pointer
 * @out_vaddr:	return variable for the obj mapping
 *
 * This wrapper calls intel_guc_allocate_vma() and then maps the allocated
 * object with I915_MAP_WB.
 *
 * Return:	0 if successful, a negative errno code otherwise.
 */
int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size,
				   struct i915_vma **out_vma, void **out_vaddr)
{
	struct i915_vma *vma;
	void *vaddr;

	vma = intel_guc_allocate_vma(guc, size);
	if (IS_ERR(vma))
		return PTR_ERR(vma);

703 704 705
	vaddr = i915_gem_object_pin_map_unlocked(vma->obj,
						 i915_coherent_map_type(guc_to_gt(guc)->i915,
									vma->obj, true));
706 707 708 709 710 711 712 713 714 715
	if (IS_ERR(vaddr)) {
		i915_vma_unpin_and_release(&vma, 0);
		return PTR_ERR(vaddr);
	}

	*out_vma = vma;
	*out_vaddr = vaddr;

	return 0;
}
716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759

/**
 * intel_guc_load_status - dump information about GuC load status
 * @guc: the GuC
 * @p: the &drm_printer
 *
 * Pretty printer for GuC load status.
 */
void intel_guc_load_status(struct intel_guc *guc, struct drm_printer *p)
{
	struct intel_gt *gt = guc_to_gt(guc);
	struct intel_uncore *uncore = gt->uncore;
	intel_wakeref_t wakeref;

	if (!intel_guc_is_supported(guc)) {
		drm_printf(p, "GuC not supported\n");
		return;
	}

	if (!intel_guc_is_wanted(guc)) {
		drm_printf(p, "GuC disabled\n");
		return;
	}

	intel_uc_fw_dump(&guc->fw, p);

	with_intel_runtime_pm(uncore->rpm, wakeref) {
		u32 status = intel_uncore_read(uncore, GUC_STATUS);
		u32 i;

		drm_printf(p, "\nGuC status 0x%08x:\n", status);
		drm_printf(p, "\tBootrom status = 0x%x\n",
			   (status & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
		drm_printf(p, "\tuKernel status = 0x%x\n",
			   (status & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
		drm_printf(p, "\tMIA Core status = 0x%x\n",
			   (status & GS_MIA_MASK) >> GS_MIA_SHIFT);
		drm_puts(p, "\nScratch registers:\n");
		for (i = 0; i < 16; i++) {
			drm_printf(p, "\t%2d: \t0x%x\n",
				   i, intel_uncore_read(uncore, SOFT_SCRATCH(i)));
		}
	}
}
760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788

void intel_guc_write_barrier(struct intel_guc *guc)
{
	struct intel_gt *gt = guc_to_gt(guc);

	if (i915_gem_object_is_lmem(guc->ct.vma->obj)) {
		/*
		 * Ensure intel_uncore_write_fw can be used rather than
		 * intel_uncore_write.
		 */
		GEM_BUG_ON(guc->send_regs.fw_domains);

		/*
		 * This register is used by the i915 and GuC for MMIO based
		 * communication. Once we are in this code CTBs are the only
		 * method the i915 uses to communicate with the GuC so it is
		 * safe to write to this register (a value of 0 is NOP for MMIO
		 * communication). If we ever start mixing CTBs and MMIOs a new
		 * register will have to be chosen. This function is also used
		 * to enforce ordering of a work queue item write and an update
		 * to the process descriptor. When a work queue is being used,
		 * CTBs are also the only mechanism of communication.
		 */
		intel_uncore_write_fw(gt->uncore, GEN11_SOFT_SCRATCH(0), 0);
	} else {
		/* wmb() sufficient for a barrier if in smem */
		wmb();
	}
}