intel_uc.c 11.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
/*
 * Copyright © 2016 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 */

#include "i915_drv.h"
#include "intel_uc.h"
27
#include "i915_guc_submission.h"
28
#include <linux/firmware.h>
29

30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50
/* Reset GuC providing us with fresh state for both GuC and HuC.
 */
static int __intel_uc_reset_hw(struct drm_i915_private *dev_priv)
{
	int ret;
	u32 guc_status;

	ret = intel_guc_reset(dev_priv);
	if (ret) {
		DRM_ERROR("GuC reset failed, ret = %d\n", ret);
		return ret;
	}

	guc_status = I915_READ(GUC_STATUS);
	WARN(!(guc_status & GS_MIA_IN_RESET),
	     "GuC status: 0x%x, MIA core expected to be in reset\n",
	     guc_status);

	return ret;
}

51 52 53
void intel_uc_sanitize_options(struct drm_i915_private *dev_priv)
{
	if (!HAS_GUC(dev_priv)) {
54 55
		if (i915_modparams.enable_guc_loading > 0 ||
		    i915_modparams.enable_guc_submission > 0)
56
			DRM_INFO("Ignoring GuC options, no hardware\n");
57

58 59
		i915_modparams.enable_guc_loading = 0;
		i915_modparams.enable_guc_submission = 0;
60
		return;
61
	}
62

63
	/* A negative value means "use platform default" */
64 65
	if (i915_modparams.enable_guc_loading < 0)
		i915_modparams.enable_guc_loading = HAS_GUC_UCODE(dev_priv);
66 67

	/* Verify firmware version */
68
	if (i915_modparams.enable_guc_loading) {
69 70 71 72
		if (HAS_HUC_UCODE(dev_priv))
			intel_huc_select_fw(&dev_priv->huc);

		if (intel_guc_select_fw(&dev_priv->guc))
73
			i915_modparams.enable_guc_loading = 0;
74
	}
75 76

	/* Can't enable guc submission without guc loaded */
77 78
	if (!i915_modparams.enable_guc_loading)
		i915_modparams.enable_guc_submission = 0;
79 80

	/* A negative value means "use platform default" */
81 82
	if (i915_modparams.enable_guc_submission < 0)
		i915_modparams.enable_guc_submission = HAS_GUC_SCHED(dev_priv);
83 84
}

85
static void gen8_guc_raise_irq(struct intel_guc *guc)
86 87 88 89 90 91
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);

	I915_WRITE(GUC_SEND_INTERRUPT, GUC_SEND_TRIGGER);
}

92
static void guc_init_early(struct intel_guc *guc)
93
{
94 95
	intel_guc_ct_init_early(&guc->ct);

96
	mutex_init(&guc->send_mutex);
97
	guc->send = intel_guc_send_nop;
98
	guc->notify = gen8_guc_raise_irq;
99 100
}

101 102 103 104 105
void intel_uc_init_early(struct drm_i915_private *dev_priv)
{
	guc_init_early(&dev_priv->guc);
}

106 107
void intel_uc_init_fw(struct drm_i915_private *dev_priv)
{
108 109
	intel_uc_fw_fetch(dev_priv, &dev_priv->huc.fw);
	intel_uc_fw_fetch(dev_priv, &dev_priv->guc.fw);
110 111
}

112 113
void intel_uc_fini_fw(struct drm_i915_private *dev_priv)
{
114 115
	intel_uc_fw_fini(&dev_priv->guc.fw);
	intel_uc_fw_fini(&dev_priv->huc.fw);
116 117
}

118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143
static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i)
{
	GEM_BUG_ON(!guc->send_regs.base);
	GEM_BUG_ON(!guc->send_regs.count);
	GEM_BUG_ON(i >= guc->send_regs.count);

	return _MMIO(guc->send_regs.base + 4 * i);
}

static void guc_init_send_regs(struct intel_guc *guc)
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
	enum forcewake_domains fw_domains = 0;
	unsigned int i;

	guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0));
	guc->send_regs.count = SOFT_SCRATCH_COUNT - 1;

	for (i = 0; i < guc->send_regs.count; i++) {
		fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
					guc_send_reg(guc, i),
					FW_REG_READ | FW_REG_WRITE);
	}
	guc->send_regs.fw_domains = fw_domains;
}

144 145 146 147 148 149 150 151 152 153 154 155 156
/**
 * intel_uc_init_mmio - setup uC MMIO access
 *
 * @dev_priv: device private
 *
 * Setup minimal state necessary for MMIO accesses later in the
 * initialization sequence.
 */
void intel_uc_init_mmio(struct drm_i915_private *dev_priv)
{
	guc_init_send_regs(&dev_priv->guc);
}

157 158
static void guc_capture_load_err_log(struct intel_guc *guc)
{
159
	if (!guc->log.vma || i915_modparams.guc_log_level < 0)
160 161 162 163 164 165 166 167 168 169 170 171 172 173
		return;

	if (!guc->load_err_log)
		guc->load_err_log = i915_gem_object_get(guc->log.vma->obj);

	return;
}

static void guc_free_load_err_log(struct intel_guc *guc)
{
	if (guc->load_err_log)
		i915_gem_object_put(guc->load_err_log);
}

174 175
static int guc_enable_communication(struct intel_guc *guc)
{
176 177 178 179 180
	struct drm_i915_private *dev_priv = guc_to_i915(guc);

	if (HAS_GUC_CT(dev_priv))
		return intel_guc_enable_ct(guc);

181 182 183 184 185 186
	guc->send = intel_guc_send_mmio;
	return 0;
}

static void guc_disable_communication(struct intel_guc *guc)
{
187 188 189 190 191
	struct drm_i915_private *dev_priv = guc_to_i915(guc);

	if (HAS_GUC_CT(dev_priv))
		intel_guc_disable_ct(guc);

192 193 194
	guc->send = intel_guc_send_nop;
}

195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215
/**
 * intel_guc_auth_huc() - Send action to GuC to authenticate HuC ucode
 * @guc: intel_guc structure
 * @rsa_offset: rsa offset w.r.t ggtt base of huc vma
 *
 * Triggers a HuC firmware authentication request to the GuC via intel_guc_send
 * INTEL_GUC_ACTION_AUTHENTICATE_HUC interface. This function is invoked by
 * intel_huc_auth().
 *
 * Return:	non-zero code on error
 */
int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset)
{
	u32 action[] = {
		INTEL_GUC_ACTION_AUTHENTICATE_HUC,
		rsa_offset
	};

	return intel_guc_send(guc, action, ARRAY_SIZE(action));
}

216 217
int intel_uc_init_hw(struct drm_i915_private *dev_priv)
{
218
	struct intel_guc *guc = &dev_priv->guc;
219 220
	int ret, attempts;

221
	if (!i915_modparams.enable_guc_loading)
222 223
		return 0;

224
	guc_disable_communication(guc);
225 226 227 228 229
	gen9_reset_guc_interrupts(dev_priv);

	/* We need to notify the guc whenever we change the GGTT */
	i915_ggtt_enable_guc(dev_priv);

230
	if (i915_modparams.enable_guc_submission) {
231 232 233 234 235 236 237 238
		/*
		 * This is stuff we need to have available at fw load time
		 * if we are planning to enable submission later
		 */
		ret = i915_guc_submission_init(dev_priv);
		if (ret)
			goto err_guc;
	}
239

240 241 242 243 244
	/* init WOPCM */
	I915_WRITE(GUC_WOPCM_SIZE, intel_guc_wopcm_size(dev_priv));
	I915_WRITE(DMA_GUC_WOPCM_OFFSET,
		   GUC_WOPCM_OFFSET_VALUE | HUC_LOADING_AGENT_GUC);

245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271
	/* WaEnableuKernelHeaderValidFix:skl */
	/* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */
	if (IS_GEN9(dev_priv))
		attempts = 3;
	else
		attempts = 1;

	while (attempts--) {
		/*
		 * Always reset the GuC just before (re)loading, so
		 * that the state and timing are fairly predictable
		 */
		ret = __intel_uc_reset_hw(dev_priv);
		if (ret)
			goto err_submission;

		intel_huc_init_hw(&dev_priv->huc);
		ret = intel_guc_init_hw(&dev_priv->guc);
		if (ret == 0 || ret != -EAGAIN)
			break;

		DRM_DEBUG_DRIVER("GuC fw load failed: %d; will reset and "
				 "retry %d more time(s)\n", ret, attempts);
	}

	/* Did we succeded or run out of retries? */
	if (ret)
272
		goto err_log_capture;
273

274 275
	ret = guc_enable_communication(guc);
	if (ret)
276
		goto err_log_capture;
277

278
	intel_huc_auth(&dev_priv->huc);
279 280
	if (i915_modparams.enable_guc_submission) {
		if (i915_modparams.guc_log_level >= 0)
281 282 283 284
			gen9_enable_guc_interrupts(dev_priv);

		ret = i915_guc_submission_enable(dev_priv);
		if (ret)
285
			goto err_interrupts;
286 287 288 289 290 291 292 293 294 295 296 297 298
	}

	return 0;

	/*
	 * We've failed to load the firmware :(
	 *
	 * Decide whether to disable GuC submission and fall back to
	 * execlist mode, and whether to hide the error by returning
	 * zero or to return -EIO, which the caller will treat as a
	 * nonfatal error (i.e. it doesn't prevent driver load, but
	 * marks the GPU as wedged until reset).
	 */
299
err_interrupts:
300
	guc_disable_communication(guc);
301
	gen9_disable_guc_interrupts(dev_priv);
302 303
err_log_capture:
	guc_capture_load_err_log(guc);
304
err_submission:
305
	if (i915_modparams.enable_guc_submission)
306
		i915_guc_submission_fini(dev_priv);
307
err_guc:
308 309 310
	i915_ggtt_disable_guc(dev_priv);

	DRM_ERROR("GuC init failed\n");
311 312
	if (i915_modparams.enable_guc_loading > 1 ||
	    i915_modparams.enable_guc_submission > 1)
313 314 315 316
		ret = -EIO;
	else
		ret = 0;

317 318
	if (i915_modparams.enable_guc_submission) {
		i915_modparams.enable_guc_submission = 0;
319 320 321
		DRM_NOTE("Falling back from GuC submission to execlist mode\n");
	}

322
	i915_modparams.enable_guc_loading = 0;
323 324
	DRM_NOTE("GuC firmware loading disabled\n");

325 326 327
	return ret;
}

328 329
void intel_uc_fini_hw(struct drm_i915_private *dev_priv)
{
330 331
	guc_free_load_err_log(&dev_priv->guc);

332
	if (!i915_modparams.enable_guc_loading)
333 334
		return;

335
	if (i915_modparams.enable_guc_submission)
336
		i915_guc_submission_disable(dev_priv);
337 338 339

	guc_disable_communication(&dev_priv->guc);

340
	if (i915_modparams.enable_guc_submission) {
341
		gen9_disable_guc_interrupts(dev_priv);
342
		i915_guc_submission_fini(dev_priv);
343
	}
344

345 346 347
	i915_ggtt_disable_guc(dev_priv);
}

348 349 350 351 352 353
int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len)
{
	WARN(1, "Unexpected send: action=%#x\n", *action);
	return -ENODEV;
}

354 355 356 357
/*
 * This function implements the MMIO based host to GuC interface.
 */
int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len)
358 359 360 361 362 363
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
	u32 status;
	int i;
	int ret;

364 365
	GEM_BUG_ON(!len);
	GEM_BUG_ON(len > guc->send_regs.count);
366

367 368 369 370 371
	/* If CT is available, we expect to use MMIO only during init/fini */
	GEM_BUG_ON(HAS_GUC_CT(dev_priv) &&
		*action != INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER &&
		*action != INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER);

372
	mutex_lock(&guc->send_mutex);
373
	intel_uncore_forcewake_get(dev_priv, guc->send_regs.fw_domains);
374 375

	for (i = 0; i < len; i++)
376
		I915_WRITE(guc_send_reg(guc, i), action[i]);
377

378
	POSTING_READ(guc_send_reg(guc, i - 1));
379

380
	intel_guc_notify(guc);
381 382

	/*
383 384
	 * No GuC command should ever take longer than 10ms.
	 * Fast commands should still complete in 10us.
385
	 */
386
	ret = __intel_wait_for_register_fw(dev_priv,
387
					   guc_send_reg(guc, 0),
388 389 390
					   INTEL_GUC_RECV_MASK,
					   INTEL_GUC_RECV_MASK,
					   10, 10, &status);
391 392 393 394 395 396 397 398 399 400 401 402 403 404
	if (status != INTEL_GUC_STATUS_SUCCESS) {
		/*
		 * Either the GuC explicitly returned an error (which
		 * we convert to -EIO here) or no response at all was
		 * received within the timeout limit (-ETIMEDOUT)
		 */
		if (ret != -ETIMEDOUT)
			ret = -EIO;

		DRM_WARN("INTEL_GUC_SEND: Action 0x%X failed;"
			 " ret=%d status=0x%08X response=0x%08X\n",
			 action[0], ret, status, I915_READ(SOFT_SCRATCH(15)));
	}

405
	intel_uncore_forcewake_put(dev_priv, guc->send_regs.fw_domains);
406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425
	mutex_unlock(&guc->send_mutex);

	return ret;
}

int intel_guc_sample_forcewake(struct intel_guc *guc)
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
	u32 action[2];

	action[0] = INTEL_GUC_ACTION_SAMPLE_FORCEWAKE;
	/* WaRsDisableCoarsePowerGating:skl,bxt */
	if (!intel_enable_rc6() || NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
		action[1] = 0;
	else
		/* bit 0 and 1 are for Render and Media domain separately */
		action[1] = GUC_FORCEWAKE_RENDER | GUC_FORCEWAKE_MEDIA;

	return intel_guc_send(guc, action, ARRAY_SIZE(action));
}