intel_uc.c 15.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 * Copyright © 2016 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 */

25
#include "gt/intel_reset.h"
26
#include "intel_uc.h"
27
#include "intel_guc.h"
28 29
#include "intel_guc_ads.h"
#include "intel_guc_submission.h"
M
Michal Wajdeczko 已提交
30
#include "i915_drv.h"
31

32 33
static void guc_free_load_err_log(struct intel_guc *guc);

34 35 36 37 38 39 40
/* Reset GuC providing us with fresh state for both GuC and HuC.
 */
static int __intel_uc_reset_hw(struct drm_i915_private *dev_priv)
{
	int ret;
	u32 guc_status;

41
	ret = intel_reset_guc(&dev_priv->gt);
42
	if (ret) {
43
		DRM_ERROR("Failed to reset GuC, ret = %d\n", ret);
44 45 46 47 48 49 50 51 52 53 54
		return ret;
	}

	guc_status = I915_READ(GUC_STATUS);
	WARN(!(guc_status & GS_MIA_IN_RESET),
	     "GuC status: 0x%x, MIA core expected to be in reset\n",
	     guc_status);

	return ret;
}

55
static int __get_platform_enable_guc(struct drm_i915_private *i915)
56
{
57 58
	struct intel_uc_fw *guc_fw = &i915->guc.fw;
	struct intel_uc_fw *huc_fw = &i915->huc.fw;
59
	int enable_guc = 0;
60

61 62 63 64 65 66 67
	if (!HAS_GUC(i915))
		return 0;

	/* We don't want to enable GuC/HuC on pre-Gen11 by default */
	if (INTEL_GEN(i915) < 11)
		return 0;

68
	if (intel_uc_fw_is_selected(guc_fw) && intel_uc_fw_is_selected(huc_fw))
69
		enable_guc |= ENABLE_GUC_LOAD_HUC;
70

71 72
	return enable_guc;
}
73

74
static int __get_default_guc_log_level(struct drm_i915_private *i915)
75
{
76
	int guc_log_level;
77

78 79
	if (!intel_uc_fw_supported(&i915->guc.fw) ||
	    !intel_uc_is_using_guc(i915))
80 81 82 83 84 85
		guc_log_level = GUC_LOG_LEVEL_DISABLED;
	else if (IS_ENABLED(CONFIG_DRM_I915_DEBUG) ||
		 IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
		guc_log_level = GUC_LOG_LEVEL_MAX;
	else
		guc_log_level = GUC_LOG_LEVEL_NON_VERBOSE;
86 87 88 89 90 91

	/* Any platform specific fine-tuning can be done here */

	return guc_log_level;
}

92
/**
93
 * sanitize_options_early - sanitize uC related modparam options
94
 * @i915: device private
95 96 97 98 99 100
 *
 * In case of "enable_guc" option this function will attempt to modify
 * it only if it was initially set to "auto(-1)". Default value for this
 * modparam varies between platforms and it is hardcoded in driver code.
 * Any other modparam value is only monitored against availability of the
 * related hardware or firmware definitions.
101 102 103 104 105 106 107
 *
 * In case of "guc_log_level" option this function will attempt to modify
 * it only if it was initially set to "auto(-1)" or if initial value was
 * "enable(1..4)" on platforms without the GuC. Default value for this
 * modparam varies between platforms and is usually set to "disable(0)"
 * unless GuC is enabled on given platform and the driver is compiled with
 * debug config when this modparam will default to "enable(1..4)".
108
 */
109
static void sanitize_options_early(struct drm_i915_private *i915)
110
{
111 112
	struct intel_uc_fw *guc_fw = &i915->guc.fw;
	struct intel_uc_fw *huc_fw = &i915->huc.fw;
113 114

	/* A negative value means "use platform default" */
115
	if (i915_modparams.enable_guc < 0)
116
		i915_modparams.enable_guc = __get_platform_enable_guc(i915);
117 118 119

	DRM_DEBUG_DRIVER("enable_guc=%d (submission:%s huc:%s)\n",
			 i915_modparams.enable_guc,
120 121
			 yesno(intel_uc_is_using_guc_submission(i915)),
			 yesno(intel_uc_is_using_huc(i915)));
122 123

	/* Verify GuC firmware availability */
124
	if (intel_uc_is_using_guc(i915) && !intel_uc_fw_is_selected(guc_fw)) {
125 126
		DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
			 "enable_guc", i915_modparams.enable_guc,
127 128
			 !intel_uc_fw_supported(guc_fw) ?
				"no GuC hardware" : "no GuC firmware");
129 130 131
	}

	/* Verify HuC firmware availability */
132
	if (intel_uc_is_using_huc(i915) && !intel_uc_fw_is_selected(huc_fw)) {
133 134
		DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
			 "enable_guc", i915_modparams.enable_guc,
135 136
			 !intel_uc_fw_supported(huc_fw) ?
				"no HuC hardware" : "no HuC firmware");
137 138
	}

139 140 141 142 143 144 145 146 147
	/* XXX: GuC submission is unavailable for now */
	if (intel_uc_is_using_guc_submission(i915)) {
		DRM_INFO("Incompatible option detected: %s=%d, %s!\n",
			 "enable_guc", i915_modparams.enable_guc,
			 "GuC submission not supported");
		DRM_INFO("Switching to non-GuC submission mode!\n");
		i915_modparams.enable_guc &= ~ENABLE_GUC_SUBMISSION;
	}

148 149 150
	/* A negative value means "use platform/config default" */
	if (i915_modparams.guc_log_level < 0)
		i915_modparams.guc_log_level =
151
			__get_default_guc_log_level(i915);
152

153
	if (i915_modparams.guc_log_level > 0 && !intel_uc_is_using_guc(i915)) {
154 155
		DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
			 "guc_log_level", i915_modparams.guc_log_level,
156 157
			 !intel_uc_fw_supported(guc_fw) ?
				"no GuC hardware" : "GuC not enabled");
158 159 160
		i915_modparams.guc_log_level = 0;
	}

161
	if (i915_modparams.guc_log_level > GUC_LOG_LEVEL_MAX) {
162 163 164
		DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
			 "guc_log_level", i915_modparams.guc_log_level,
			 "verbosity too high");
165
		i915_modparams.guc_log_level = GUC_LOG_LEVEL_MAX;
166 167
	}

168
	DRM_DEBUG_DRIVER("guc_log_level=%d (enabled:%s, verbose:%s, verbosity:%d)\n",
169 170
			 i915_modparams.guc_log_level,
			 yesno(i915_modparams.guc_log_level),
171
			 yesno(GUC_LOG_LEVEL_IS_VERBOSE(i915_modparams.guc_log_level)),
172
			 GUC_LOG_LEVEL_TO_VERBOSITY(i915_modparams.guc_log_level));
173

174 175
	/* Make sure that sanitization was done */
	GEM_BUG_ON(i915_modparams.enable_guc < 0);
176
	GEM_BUG_ON(i915_modparams.guc_log_level < 0);
177 178
}

179
void intel_uc_init_early(struct drm_i915_private *i915)
180
{
181 182
	struct intel_guc *guc = &i915->guc;
	struct intel_huc *huc = &i915->huc;
183

184 185
	intel_guc_init_early(guc);
	intel_huc_init_early(huc);
186

187
	sanitize_options_early(i915);
188 189
}

190
void intel_uc_cleanup_early(struct drm_i915_private *i915)
191
{
192
	struct intel_guc *guc = &i915->guc;
193

194
	guc_free_load_err_log(guc);
195 196
}

197 198
/**
 * intel_uc_init_mmio - setup uC MMIO access
199
 * @i915: device private
200 201 202 203
 *
 * Setup minimal state necessary for MMIO accesses later in the
 * initialization sequence.
 */
204
void intel_uc_init_mmio(struct drm_i915_private *i915)
205
{
206
	intel_guc_init_send_regs(&i915->guc);
207 208
}

209 210
static void guc_capture_load_err_log(struct intel_guc *guc)
{
211
	if (!guc->log.vma || !intel_guc_log_get_level(&guc->log))
212 213 214 215 216 217 218 219 220 221 222 223 224 225
		return;

	if (!guc->load_err_log)
		guc->load_err_log = i915_gem_object_get(guc->log.vma->obj);

	return;
}

static void guc_free_load_err_log(struct intel_guc *guc)
{
	if (guc->load_err_log)
		i915_gem_object_put(guc->load_err_log);
}

226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272
/*
 * Events triggered while CT buffers are disabled are logged in the SCRATCH_15
 * register using the same bits used in the CT message payload. Since our
 * communication channel with guc is turned off at this point, we can save the
 * message and handle it after we turn it back on.
 */
static void guc_clear_mmio_msg(struct intel_guc *guc)
{
	intel_uncore_write(&guc_to_i915(guc)->uncore, SOFT_SCRATCH(15), 0);
}

static void guc_get_mmio_msg(struct intel_guc *guc)
{
	u32 val;

	spin_lock_irq(&guc->irq_lock);

	val = intel_uncore_read(&guc_to_i915(guc)->uncore, SOFT_SCRATCH(15));
	guc->mmio_msg |= val & guc->msg_enabled_mask;

	/*
	 * clear all events, including the ones we're not currently servicing,
	 * to make sure we don't try to process a stale message if we enable
	 * handling of more events later.
	 */
	guc_clear_mmio_msg(guc);

	spin_unlock_irq(&guc->irq_lock);
}

static void guc_handle_mmio_msg(struct intel_guc *guc)
{
	struct drm_i915_private *i915 = guc_to_i915(guc);

	/* we need communication to be enabled to reply to GuC */
	GEM_BUG_ON(guc->handler == intel_guc_to_host_event_handler_nop);

	if (!guc->mmio_msg)
		return;

	spin_lock_irq(&i915->irq_lock);
	intel_guc_to_host_process_recv_msg(guc, &guc->mmio_msg, 1);
	spin_unlock_irq(&i915->irq_lock);

	guc->mmio_msg = 0;
}

273 274 275 276 277 278 279 280 281 282 283 284 285 286 287
static void guc_reset_interrupts(struct intel_guc *guc)
{
	guc->interrupts.reset(guc_to_i915(guc));
}

static void guc_enable_interrupts(struct intel_guc *guc)
{
	guc->interrupts.enable(guc_to_i915(guc));
}

static void guc_disable_interrupts(struct intel_guc *guc)
{
	guc->interrupts.disable(guc_to_i915(guc));
}

288 289
static int guc_enable_communication(struct intel_guc *guc)
{
290
	struct drm_i915_private *i915 = guc_to_i915(guc);
291 292 293 294 295 296 297 298 299
	int ret;

	ret = intel_guc_ct_enable(&guc->ct);
	if (ret)
		return ret;

	guc->send = intel_guc_send_ct;
	guc->handler = intel_guc_to_host_event_handler_ct;

300 301 302 303
	/* check for mmio messages received before/during the CT enable */
	guc_get_mmio_msg(guc);
	guc_handle_mmio_msg(guc);

304
	guc_enable_interrupts(guc);
305

306 307 308 309 310
	/* check for CT messages received before we enabled interrupts */
	spin_lock_irq(&i915->irq_lock);
	intel_guc_to_host_event_handler_ct(guc);
	spin_unlock_irq(&i915->irq_lock);

311 312 313
	DRM_INFO("GuC communication enabled\n");

	return 0;
314 315
}

316 317
static void guc_stop_communication(struct intel_guc *guc)
{
318
	intel_guc_ct_stop(&guc->ct);
319 320 321

	guc->send = intel_guc_send_nop;
	guc->handler = intel_guc_to_host_event_handler_nop;
322 323

	guc_clear_mmio_msg(guc);
324 325
}

326 327
static void guc_disable_communication(struct intel_guc *guc)
{
328 329 330 331 332 333 334
	/*
	 * Events generated during or after CT disable are logged by guc in
	 * via mmio. Make sure the register is clear before disabling CT since
	 * all events we cared about have already been processed via CT.
	 */
	guc_clear_mmio_msg(guc);

335
	guc_disable_interrupts(guc);
336

337
	guc->send = intel_guc_send_nop;
338
	guc->handler = intel_guc_to_host_event_handler_nop;
339 340 341

	intel_guc_ct_disable(&guc->ct);

342 343 344 345 346 347 348 349
	/*
	 * Check for messages received during/after the CT disable. We do not
	 * expect any messages to have arrived via CT between the interrupt
	 * disable and the CT disable because GuC should've been idle until we
	 * triggered the CT disable protocol.
	 */
	guc_get_mmio_msg(guc);

350
	DRM_INFO("GuC communication disabled\n");
351 352
}

353
void intel_uc_fetch_firmwares(struct drm_i915_private *i915)
354
{
355
	if (!USES_GUC(i915))
356
		return;
357

358
	intel_uc_fw_fetch(i915, &i915->guc.fw);
359

360 361
	if (USES_HUC(i915))
		intel_uc_fw_fetch(i915, &i915->huc.fw);
362 363
}

364
void intel_uc_cleanup_firmwares(struct drm_i915_private *i915)
365
{
366
	if (!USES_GUC(i915))
367 368
		return;

369
	if (USES_HUC(i915))
370
		intel_uc_fw_cleanup_fetch(&i915->huc.fw);
371

372
	intel_uc_fw_cleanup_fetch(&i915->guc.fw);
373 374
}

375
int intel_uc_init(struct drm_i915_private *i915)
376
{
377
	struct intel_guc *guc = &i915->guc;
378
	struct intel_huc *huc = &i915->huc;
379
	int ret;
380

381
	if (!USES_GUC(i915))
382 383
		return 0;

384
	if (!intel_uc_fw_supported(&guc->fw))
385
		return -ENODEV;
386

387 388 389
	/* XXX: GuC submission is unavailable for now */
	GEM_BUG_ON(USES_GUC_SUBMISSION(i915));

390 391
	ret = intel_guc_init(guc);
	if (ret)
392
		return ret;
393

394 395 396 397 398 399
	if (USES_HUC(i915)) {
		ret = intel_huc_init(huc);
		if (ret)
			goto err_guc;
	}

400
	if (USES_GUC_SUBMISSION(i915)) {
401 402 403 404
		/*
		 * This is stuff we need to have available at fw load time
		 * if we are planning to enable submission later
		 */
405
		ret = intel_guc_submission_init(guc);
406 407
		if (ret)
			goto err_huc;
408
	}
409

410
	return 0;
411 412 413 414 415 416 417

err_huc:
	if (USES_HUC(i915))
		intel_huc_fini(huc);
err_guc:
	intel_guc_fini(guc);
	return ret;
418 419
}

420
void intel_uc_fini(struct drm_i915_private *i915)
421
{
422
	struct intel_guc *guc = &i915->guc;
423

424
	if (!USES_GUC(i915))
425 426
		return;

427
	GEM_BUG_ON(!intel_uc_fw_supported(&guc->fw));
428

429
	if (USES_GUC_SUBMISSION(i915))
430 431
		intel_guc_submission_fini(guc);

432 433 434
	if (USES_HUC(i915))
		intel_huc_fini(&i915->huc);

435 436 437
	intel_guc_fini(guc);
}

438
static void __uc_sanitize(struct drm_i915_private *i915)
439 440 441 442
{
	struct intel_guc *guc = &i915->guc;
	struct intel_huc *huc = &i915->huc;

443
	GEM_BUG_ON(!intel_uc_fw_supported(&guc->fw));
444 445 446 447 448 449 450

	intel_huc_sanitize(huc);
	intel_guc_sanitize(guc);

	__intel_uc_reset_hw(i915);
}

451 452 453 454 455 456 457 458
void intel_uc_sanitize(struct drm_i915_private *i915)
{
	if (!USES_GUC(i915))
		return;

	__uc_sanitize(i915);
}

459
int intel_uc_init_hw(struct drm_i915_private *i915)
460
{
461 462
	struct intel_guc *guc = &i915->guc;
	struct intel_huc *huc = &i915->huc;
463 464
	int ret, attempts;

465
	if (!USES_GUC(i915))
466 467
		return 0;

468
	GEM_BUG_ON(!intel_uc_fw_supported(&guc->fw));
469

470
	guc_reset_interrupts(guc);
471

472 473
	/* WaEnableuKernelHeaderValidFix:skl */
	/* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */
474
	if (IS_GEN(i915, 9))
475 476 477 478 479 480 481 482 483
		attempts = 3;
	else
		attempts = 1;

	while (attempts--) {
		/*
		 * Always reset the GuC just before (re)loading, so
		 * that the state and timing are fairly predictable
		 */
484
		ret = __intel_uc_reset_hw(i915);
485
		if (ret)
486
			goto err_out;
487

488
		if (USES_HUC(i915)) {
489
			ret = intel_huc_fw_upload(huc);
490
			if (ret)
491
				goto err_out;
492 493
		}

494
		intel_guc_ads_reset(guc);
495
		intel_guc_init_params(guc);
496
		ret = intel_guc_fw_upload(guc);
497
		if (ret == 0)
498 499 500 501 502 503 504 505
			break;

		DRM_DEBUG_DRIVER("GuC fw load failed: %d; will reset and "
				 "retry %d more time(s)\n", ret, attempts);
	}

	/* Did we succeded or run out of retries? */
	if (ret)
506
		goto err_log_capture;
507

508 509
	ret = guc_enable_communication(guc);
	if (ret)
510
		goto err_log_capture;
511

512
	if (USES_HUC(i915)) {
513 514 515 516 517
		ret = intel_huc_auth(huc);
		if (ret)
			goto err_communication;
	}

518 519 520 521
	ret = intel_guc_sample_forcewake(guc);
	if (ret)
		goto err_communication;

522
	if (USES_GUC_SUBMISSION(i915)) {
523
		ret = intel_guc_submission_enable(guc);
524
		if (ret)
525
			goto err_communication;
526 527
	}

528
	dev_info(i915->drm.dev, "GuC firmware version %u.%u\n",
529
		 guc->fw.major_ver_found, guc->fw.minor_ver_found);
530 531 532 533
	dev_info(i915->drm.dev, "GuC submission %s\n",
		 enableddisabled(USES_GUC_SUBMISSION(i915)));
	dev_info(i915->drm.dev, "HuC %s\n",
		 enableddisabled(USES_HUC(i915)));
534

535 536 537 538 539
	return 0;

	/*
	 * We've failed to load the firmware :(
	 */
540 541
err_communication:
	guc_disable_communication(guc);
542 543
err_log_capture:
	guc_capture_load_err_log(guc);
544
err_out:
545 546
	__uc_sanitize(i915);

547 548 549 550 551 552
	/*
	 * Note that there is no fallback as either user explicitly asked for
	 * the GuC or driver default option was to run with the GuC enabled.
	 */
	if (GEM_WARN_ON(ret == -EIO))
		ret = -EINVAL;
553

554
	dev_err(i915->drm.dev, "GuC initialization failed %d\n", ret);
555 556 557
	return ret;
}

558
void intel_uc_fini_hw(struct drm_i915_private *i915)
559
{
560
	struct intel_guc *guc = &i915->guc;
561

562
	if (!intel_guc_is_loaded(guc))
563 564
		return;

565
	GEM_BUG_ON(!intel_uc_fw_supported(&guc->fw));
566

567
	if (USES_GUC_SUBMISSION(i915))
568
		intel_guc_submission_disable(guc);
569

570
	guc_disable_communication(guc);
571
	__uc_sanitize(i915);
572
}
573

574 575 576 577 578 579 580 581 582 583
/**
 * intel_uc_reset_prepare - Prepare for reset
 * @i915: device private
 *
 * Preparing for full gpu reset.
 */
void intel_uc_reset_prepare(struct drm_i915_private *i915)
{
	struct intel_guc *guc = &i915->guc;

584
	if (!intel_guc_is_loaded(guc))
585 586
		return;

587
	guc_stop_communication(guc);
588
	__uc_sanitize(i915);
589 590
}

C
Chris Wilson 已提交
591
void intel_uc_runtime_suspend(struct drm_i915_private *i915)
592 593 594 595
{
	struct intel_guc *guc = &i915->guc;
	int err;

596
	if (!intel_guc_is_loaded(guc))
597
		return;
598

C
Chris Wilson 已提交
599 600 601
	err = intel_guc_suspend(guc);
	if (err)
		DRM_DEBUG_DRIVER("Failed to suspend GuC, err=%d", err);
602

C
Chris Wilson 已提交
603 604 605 606 607 608 609 610
	guc_disable_communication(guc);
}

void intel_uc_suspend(struct drm_i915_private *i915)
{
	struct intel_guc *guc = &i915->guc;
	intel_wakeref_t wakeref;

611
	if (!intel_guc_is_loaded(guc))
C
Chris Wilson 已提交
612 613
		return;

614
	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
C
Chris Wilson 已提交
615
		intel_uc_runtime_suspend(i915);
616 617 618 619 620 621 622
}

int intel_uc_resume(struct drm_i915_private *i915)
{
	struct intel_guc *guc = &i915->guc;
	int err;

623
	if (!intel_guc_is_loaded(guc))
624 625
		return 0;

626
	guc_enable_communication(guc);
627 628 629 630 631 632 633 634 635

	err = intel_guc_resume(guc);
	if (err) {
		DRM_DEBUG_DRIVER("Failed to resume GuC, err=%d", err);
		return err;
	}

	return 0;
}