intel_uc.c 13.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 * Copyright © 2016 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 */

25
#include "gt/intel_reset.h"
26
#include "intel_uc.h"
27
#include "intel_guc.h"
28 29
#include "intel_guc_ads.h"
#include "intel_guc_submission.h"
M
Michal Wajdeczko 已提交
30
#include "i915_drv.h"
31

32 33
static void guc_free_load_err_log(struct intel_guc *guc);

34 35 36 37 38 39 40
/* Reset GuC providing us with fresh state for both GuC and HuC.
 */
static int __intel_uc_reset_hw(struct drm_i915_private *dev_priv)
{
	int ret;
	u32 guc_status;

41
	ret = intel_reset_guc(dev_priv);
42
	if (ret) {
43
		DRM_ERROR("Failed to reset GuC, ret = %d\n", ret);
44 45 46 47 48 49 50 51 52 53 54
		return ret;
	}

	guc_status = I915_READ(GUC_STATUS);
	WARN(!(guc_status & GS_MIA_IN_RESET),
	     "GuC status: 0x%x, MIA core expected to be in reset\n",
	     guc_status);

	return ret;
}

55
static int __get_platform_enable_guc(struct drm_i915_private *i915)
56
{
57 58
	struct intel_uc_fw *guc_fw = &i915->guc.fw;
	struct intel_uc_fw *huc_fw = &i915->huc.fw;
59
	int enable_guc = 0;
60

61 62
	/* Default is to use HuC if we know GuC and HuC firmwares */
	if (intel_uc_fw_is_selected(guc_fw) && intel_uc_fw_is_selected(huc_fw))
63
		enable_guc |= ENABLE_GUC_LOAD_HUC;
64

65
	/* Any platform specific fine-tuning can be done here */
66

67 68
	return enable_guc;
}
69

70
static int __get_default_guc_log_level(struct drm_i915_private *i915)
71
{
72
	int guc_log_level;
73

74
	if (!HAS_GUC(i915) || !intel_uc_is_using_guc(i915))
75 76 77 78 79 80
		guc_log_level = GUC_LOG_LEVEL_DISABLED;
	else if (IS_ENABLED(CONFIG_DRM_I915_DEBUG) ||
		 IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
		guc_log_level = GUC_LOG_LEVEL_MAX;
	else
		guc_log_level = GUC_LOG_LEVEL_NON_VERBOSE;
81 82 83 84 85 86

	/* Any platform specific fine-tuning can be done here */

	return guc_log_level;
}

87
/**
88
 * sanitize_options_early - sanitize uC related modparam options
89
 * @i915: device private
90 91 92 93 94 95
 *
 * In case of "enable_guc" option this function will attempt to modify
 * it only if it was initially set to "auto(-1)". Default value for this
 * modparam varies between platforms and it is hardcoded in driver code.
 * Any other modparam value is only monitored against availability of the
 * related hardware or firmware definitions.
96 97 98 99 100 101 102
 *
 * In case of "guc_log_level" option this function will attempt to modify
 * it only if it was initially set to "auto(-1)" or if initial value was
 * "enable(1..4)" on platforms without the GuC. Default value for this
 * modparam varies between platforms and is usually set to "disable(0)"
 * unless GuC is enabled on given platform and the driver is compiled with
 * debug config when this modparam will default to "enable(1..4)".
103
 */
104
static void sanitize_options_early(struct drm_i915_private *i915)
105
{
106 107
	struct intel_uc_fw *guc_fw = &i915->guc.fw;
	struct intel_uc_fw *huc_fw = &i915->huc.fw;
108 109

	/* A negative value means "use platform default" */
110
	if (i915_modparams.enable_guc < 0)
111
		i915_modparams.enable_guc = __get_platform_enable_guc(i915);
112 113 114

	DRM_DEBUG_DRIVER("enable_guc=%d (submission:%s huc:%s)\n",
			 i915_modparams.enable_guc,
115 116
			 yesno(intel_uc_is_using_guc_submission(i915)),
			 yesno(intel_uc_is_using_huc(i915)));
117 118

	/* Verify GuC firmware availability */
119
	if (intel_uc_is_using_guc(i915) && !intel_uc_fw_is_selected(guc_fw)) {
120 121
		DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
			 "enable_guc", i915_modparams.enable_guc,
122 123
			 !HAS_GUC(i915) ? "no GuC hardware" :
					  "no GuC firmware");
124 125 126
	}

	/* Verify HuC firmware availability */
127
	if (intel_uc_is_using_huc(i915) && !intel_uc_fw_is_selected(huc_fw)) {
128 129
		DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
			 "enable_guc", i915_modparams.enable_guc,
130 131
			 !HAS_HUC(i915) ? "no HuC hardware" :
					  "no HuC firmware");
132 133
	}

134 135 136 137 138 139 140 141 142
	/* XXX: GuC submission is unavailable for now */
	if (intel_uc_is_using_guc_submission(i915)) {
		DRM_INFO("Incompatible option detected: %s=%d, %s!\n",
			 "enable_guc", i915_modparams.enable_guc,
			 "GuC submission not supported");
		DRM_INFO("Switching to non-GuC submission mode!\n");
		i915_modparams.enable_guc &= ~ENABLE_GUC_SUBMISSION;
	}

143 144 145
	/* A negative value means "use platform/config default" */
	if (i915_modparams.guc_log_level < 0)
		i915_modparams.guc_log_level =
146
			__get_default_guc_log_level(i915);
147

148
	if (i915_modparams.guc_log_level > 0 && !intel_uc_is_using_guc(i915)) {
149 150
		DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
			 "guc_log_level", i915_modparams.guc_log_level,
151 152
			 !HAS_GUC(i915) ? "no GuC hardware" :
					  "GuC not enabled");
153 154 155
		i915_modparams.guc_log_level = 0;
	}

156
	if (i915_modparams.guc_log_level > GUC_LOG_LEVEL_MAX) {
157 158 159
		DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
			 "guc_log_level", i915_modparams.guc_log_level,
			 "verbosity too high");
160
		i915_modparams.guc_log_level = GUC_LOG_LEVEL_MAX;
161 162
	}

163
	DRM_DEBUG_DRIVER("guc_log_level=%d (enabled:%s, verbose:%s, verbosity:%d)\n",
164 165
			 i915_modparams.guc_log_level,
			 yesno(i915_modparams.guc_log_level),
166
			 yesno(GUC_LOG_LEVEL_IS_VERBOSE(i915_modparams.guc_log_level)),
167
			 GUC_LOG_LEVEL_TO_VERBOSITY(i915_modparams.guc_log_level));
168

169 170
	/* Make sure that sanitization was done */
	GEM_BUG_ON(i915_modparams.enable_guc < 0);
171
	GEM_BUG_ON(i915_modparams.guc_log_level < 0);
172 173
}

174
void intel_uc_init_early(struct drm_i915_private *i915)
175
{
176 177
	struct intel_guc *guc = &i915->guc;
	struct intel_huc *huc = &i915->huc;
178

179 180
	intel_guc_init_early(guc);
	intel_huc_init_early(huc);
181

182
	sanitize_options_early(i915);
183 184
}

185
void intel_uc_cleanup_early(struct drm_i915_private *i915)
186
{
187
	struct intel_guc *guc = &i915->guc;
188

189
	guc_free_load_err_log(guc);
190 191
}

192 193
/**
 * intel_uc_init_mmio - setup uC MMIO access
194
 * @i915: device private
195 196 197 198
 *
 * Setup minimal state necessary for MMIO accesses later in the
 * initialization sequence.
 */
199
void intel_uc_init_mmio(struct drm_i915_private *i915)
200
{
201
	intel_guc_init_send_regs(&i915->guc);
202 203
}

204 205
static void guc_capture_load_err_log(struct intel_guc *guc)
{
206
	if (!guc->log.vma || !intel_guc_log_get_level(&guc->log))
207 208 209 210 211 212 213 214 215 216 217 218 219 220
		return;

	if (!guc->load_err_log)
		guc->load_err_log = i915_gem_object_get(guc->log.vma->obj);

	return;
}

static void guc_free_load_err_log(struct intel_guc *guc)
{
	if (guc->load_err_log)
		i915_gem_object_put(guc->load_err_log);
}

221 222
static int guc_enable_communication(struct intel_guc *guc)
{
223
	struct drm_i915_private *i915 = guc_to_i915(guc);
224

225
	gen9_enable_guc_interrupts(i915);
226

227
	if (HAS_GUC_CT(i915))
228
		return intel_guc_ct_enable(&guc->ct);
229

230
	guc->send = intel_guc_send_mmio;
231
	guc->handler = intel_guc_to_host_event_handler_mmio;
232 233 234
	return 0;
}

235 236 237 238 239 240 241 242 243 244 245
static void guc_stop_communication(struct intel_guc *guc)
{
	struct drm_i915_private *i915 = guc_to_i915(guc);

	if (HAS_GUC_CT(i915))
		intel_guc_ct_stop(&guc->ct);

	guc->send = intel_guc_send_nop;
	guc->handler = intel_guc_to_host_event_handler_nop;
}

246 247
static void guc_disable_communication(struct intel_guc *guc)
{
248
	struct drm_i915_private *i915 = guc_to_i915(guc);
249

250
	if (HAS_GUC_CT(i915))
251
		intel_guc_ct_disable(&guc->ct);
252

253
	gen9_disable_guc_interrupts(i915);
254

255
	guc->send = intel_guc_send_nop;
256
	guc->handler = intel_guc_to_host_event_handler_nop;
257 258
}

259
int intel_uc_init_misc(struct drm_i915_private *i915)
260
{
261
	struct intel_guc *guc = &i915->guc;
262
	struct intel_huc *huc = &i915->huc;
263 264
	int ret;

265
	if (!USES_GUC(i915))
266 267
		return 0;

268
	ret = intel_guc_init_misc(guc);
269 270
	if (ret)
		return ret;
271

272 273 274 275 276 277
	if (USES_HUC(i915)) {
		ret = intel_huc_init_misc(huc);
		if (ret)
			goto err_guc;
	}

278
	return 0;
279 280 281 282

err_guc:
	intel_guc_fini_misc(guc);
	return ret;
283 284
}

285
void intel_uc_fini_misc(struct drm_i915_private *i915)
286
{
287
	struct intel_guc *guc = &i915->guc;
288
	struct intel_huc *huc = &i915->huc;
289

290
	if (!USES_GUC(i915))
291 292
		return;

293 294 295
	if (USES_HUC(i915))
		intel_huc_fini_misc(huc);

296
	intel_guc_fini_misc(guc);
297 298
}

299
int intel_uc_init(struct drm_i915_private *i915)
300
{
301
	struct intel_guc *guc = &i915->guc;
302
	struct intel_huc *huc = &i915->huc;
303
	int ret;
304

305
	if (!USES_GUC(i915))
306 307
		return 0;

308
	if (!HAS_GUC(i915))
309
		return -ENODEV;
310

311 312 313
	/* XXX: GuC submission is unavailable for now */
	GEM_BUG_ON(USES_GUC_SUBMISSION(i915));

314 315
	ret = intel_guc_init(guc);
	if (ret)
316
		return ret;
317

318 319 320 321 322 323
	if (USES_HUC(i915)) {
		ret = intel_huc_init(huc);
		if (ret)
			goto err_guc;
	}

324
	if (USES_GUC_SUBMISSION(i915)) {
325 326 327 328
		/*
		 * This is stuff we need to have available at fw load time
		 * if we are planning to enable submission later
		 */
329
		ret = intel_guc_submission_init(guc);
330 331
		if (ret)
			goto err_huc;
332
	}
333

334
	return 0;
335 336 337 338 339 340 341

err_huc:
	if (USES_HUC(i915))
		intel_huc_fini(huc);
err_guc:
	intel_guc_fini(guc);
	return ret;
342 343
}

344
void intel_uc_fini(struct drm_i915_private *i915)
345
{
346
	struct intel_guc *guc = &i915->guc;
347

348
	if (!USES_GUC(i915))
349 350
		return;

351
	GEM_BUG_ON(!HAS_GUC(i915));
352

353
	if (USES_GUC_SUBMISSION(i915))
354 355
		intel_guc_submission_fini(guc);

356 357 358
	if (USES_HUC(i915))
		intel_huc_fini(&i915->huc);

359 360 361
	intel_guc_fini(guc);
}

362
static void __uc_sanitize(struct drm_i915_private *i915)
363 364 365 366 367 368 369 370 371 372 373 374
{
	struct intel_guc *guc = &i915->guc;
	struct intel_huc *huc = &i915->huc;

	GEM_BUG_ON(!HAS_GUC(i915));

	intel_huc_sanitize(huc);
	intel_guc_sanitize(guc);

	__intel_uc_reset_hw(i915);
}

375 376 377 378 379 380 381 382
void intel_uc_sanitize(struct drm_i915_private *i915)
{
	if (!USES_GUC(i915))
		return;

	__uc_sanitize(i915);
}

383
int intel_uc_init_hw(struct drm_i915_private *i915)
384
{
385 386
	struct intel_guc *guc = &i915->guc;
	struct intel_huc *huc = &i915->huc;
387 388
	int ret, attempts;

389
	if (!USES_GUC(i915))
390 391
		return 0;

392
	GEM_BUG_ON(!HAS_GUC(i915));
393

394
	gen9_reset_guc_interrupts(i915);
395

396 397
	/* WaEnableuKernelHeaderValidFix:skl */
	/* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */
398
	if (IS_GEN(i915, 9))
399 400 401 402 403 404 405 406 407
		attempts = 3;
	else
		attempts = 1;

	while (attempts--) {
		/*
		 * Always reset the GuC just before (re)loading, so
		 * that the state and timing are fairly predictable
		 */
408
		ret = __intel_uc_reset_hw(i915);
409
		if (ret)
410
			goto err_out;
411

412
		if (USES_HUC(i915)) {
413
			ret = intel_huc_fw_upload(huc);
414
			if (ret)
415
				goto err_out;
416 417
		}

418
		intel_guc_ads_reset(guc);
419
		intel_guc_init_params(guc);
420
		ret = intel_guc_fw_upload(guc);
421
		if (ret == 0)
422 423 424 425 426 427 428 429
			break;

		DRM_DEBUG_DRIVER("GuC fw load failed: %d; will reset and "
				 "retry %d more time(s)\n", ret, attempts);
	}

	/* Did we succeded or run out of retries? */
	if (ret)
430
		goto err_log_capture;
431

432 433
	ret = guc_enable_communication(guc);
	if (ret)
434
		goto err_log_capture;
435

436
	if (USES_HUC(i915)) {
437 438 439 440 441
		ret = intel_huc_auth(huc);
		if (ret)
			goto err_communication;
	}

442
	if (USES_GUC_SUBMISSION(i915)) {
443
		ret = intel_guc_submission_enable(guc);
444
		if (ret)
445
			goto err_communication;
446 447 448 449
	} else if (INTEL_GEN(i915) < 11) {
		ret = intel_guc_sample_forcewake(guc);
		if (ret)
			goto err_communication;
450 451
	}

452
	dev_info(i915->drm.dev, "GuC firmware version %u.%u\n",
453
		 guc->fw.major_ver_found, guc->fw.minor_ver_found);
454 455 456 457
	dev_info(i915->drm.dev, "GuC submission %s\n",
		 enableddisabled(USES_GUC_SUBMISSION(i915)));
	dev_info(i915->drm.dev, "HuC %s\n",
		 enableddisabled(USES_HUC(i915)));
458

459 460 461 462 463
	return 0;

	/*
	 * We've failed to load the firmware :(
	 */
464 465
err_communication:
	guc_disable_communication(guc);
466 467
err_log_capture:
	guc_capture_load_err_log(guc);
468
err_out:
469 470
	__uc_sanitize(i915);

471 472 473 474 475 476
	/*
	 * Note that there is no fallback as either user explicitly asked for
	 * the GuC or driver default option was to run with the GuC enabled.
	 */
	if (GEM_WARN_ON(ret == -EIO))
		ret = -EINVAL;
477

478
	dev_err(i915->drm.dev, "GuC initialization failed %d\n", ret);
479 480 481
	return ret;
}

482
void intel_uc_fini_hw(struct drm_i915_private *i915)
483
{
484
	struct intel_guc *guc = &i915->guc;
485

486
	if (!intel_guc_is_loaded(guc))
487 488
		return;

489
	GEM_BUG_ON(!HAS_GUC(i915));
490

491
	if (USES_GUC_SUBMISSION(i915))
492
		intel_guc_submission_disable(guc);
493

494
	guc_disable_communication(guc);
495
	__uc_sanitize(i915);
496
}
497

498 499 500 501 502 503 504 505 506 507
/**
 * intel_uc_reset_prepare - Prepare for reset
 * @i915: device private
 *
 * Preparing for full gpu reset.
 */
void intel_uc_reset_prepare(struct drm_i915_private *i915)
{
	struct intel_guc *guc = &i915->guc;

508
	if (!intel_guc_is_loaded(guc))
509 510
		return;

511
	guc_stop_communication(guc);
512
	__uc_sanitize(i915);
513 514
}

C
Chris Wilson 已提交
515
void intel_uc_runtime_suspend(struct drm_i915_private *i915)
516 517 518 519
{
	struct intel_guc *guc = &i915->guc;
	int err;

520
	if (!intel_guc_is_loaded(guc))
521
		return;
522

C
Chris Wilson 已提交
523 524 525
	err = intel_guc_suspend(guc);
	if (err)
		DRM_DEBUG_DRIVER("Failed to suspend GuC, err=%d", err);
526

C
Chris Wilson 已提交
527 528 529 530 531 532 533 534
	guc_disable_communication(guc);
}

void intel_uc_suspend(struct drm_i915_private *i915)
{
	struct intel_guc *guc = &i915->guc;
	intel_wakeref_t wakeref;

535
	if (!intel_guc_is_loaded(guc))
C
Chris Wilson 已提交
536 537 538 539
		return;

	with_intel_runtime_pm(i915, wakeref)
		intel_uc_runtime_suspend(i915);
540 541 542 543 544 545 546
}

int intel_uc_resume(struct drm_i915_private *i915)
{
	struct intel_guc *guc = &i915->guc;
	int err;

547
	if (!intel_guc_is_loaded(guc))
548 549
		return 0;

550
	guc_enable_communication(guc);
551 552 553 554 555 556 557 558 559

	err = intel_guc_resume(guc);
	if (err) {
		DRM_DEBUG_DRIVER("Failed to resume GuC, err=%d", err);
		return err;
	}

	return 0;
}