intel_uc.c 11.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
/*
 * Copyright © 2016 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 */

#include "i915_drv.h"
#include "intel_uc.h"
27
#include <linux/firmware.h>
28

29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49
/* Reset GuC providing us with fresh state for both GuC and HuC.
 */
static int __intel_uc_reset_hw(struct drm_i915_private *dev_priv)
{
	int ret;
	u32 guc_status;

	ret = intel_guc_reset(dev_priv);
	if (ret) {
		DRM_ERROR("GuC reset failed, ret = %d\n", ret);
		return ret;
	}

	guc_status = I915_READ(GUC_STATUS);
	WARN(!(guc_status & GS_MIA_IN_RESET),
	     "GuC status: 0x%x, MIA core expected to be in reset\n",
	     guc_status);

	return ret;
}

50 51 52
void intel_uc_sanitize_options(struct drm_i915_private *dev_priv)
{
	if (!HAS_GUC(dev_priv)) {
53 54
		if (i915_modparams.enable_guc_loading > 0 ||
		    i915_modparams.enable_guc_submission > 0)
55
			DRM_INFO("Ignoring GuC options, no hardware\n");
56

57 58
		i915_modparams.enable_guc_loading = 0;
		i915_modparams.enable_guc_submission = 0;
59
		return;
60
	}
61

62
	/* A negative value means "use platform default" */
63 64
	if (i915_modparams.enable_guc_loading < 0)
		i915_modparams.enable_guc_loading = HAS_GUC_UCODE(dev_priv);
65 66

	/* Verify firmware version */
67
	if (i915_modparams.enable_guc_loading) {
68 69 70 71
		if (HAS_HUC_UCODE(dev_priv))
			intel_huc_select_fw(&dev_priv->huc);

		if (intel_guc_select_fw(&dev_priv->guc))
72
			i915_modparams.enable_guc_loading = 0;
73
	}
74 75

	/* Can't enable guc submission without guc loaded */
76 77
	if (!i915_modparams.enable_guc_loading)
		i915_modparams.enable_guc_submission = 0;
78 79

	/* A negative value means "use platform default" */
80 81
	if (i915_modparams.enable_guc_submission < 0)
		i915_modparams.enable_guc_submission = HAS_GUC_SCHED(dev_priv);
82 83
}

84
static void gen8_guc_raise_irq(struct intel_guc *guc)
85 86 87 88 89 90
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);

	I915_WRITE(GUC_SEND_INTERRUPT, GUC_SEND_TRIGGER);
}

91 92
void intel_uc_init_early(struct drm_i915_private *dev_priv)
{
93 94
	struct intel_guc *guc = &dev_priv->guc;

95 96
	intel_guc_ct_init_early(&guc->ct);

97
	mutex_init(&guc->send_mutex);
98
	guc->send = intel_guc_send_nop;
99
	guc->notify = gen8_guc_raise_irq;
100 101
}

102 103
void intel_uc_init_fw(struct drm_i915_private *dev_priv)
{
104 105
	intel_uc_fw_fetch(dev_priv, &dev_priv->huc.fw);
	intel_uc_fw_fetch(dev_priv, &dev_priv->guc.fw);
106 107
}

108 109
void intel_uc_fini_fw(struct drm_i915_private *dev_priv)
{
110 111
	intel_uc_fw_fini(&dev_priv->guc.fw);
	intel_uc_fw_fini(&dev_priv->huc.fw);
112 113
}

114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139
static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i)
{
	GEM_BUG_ON(!guc->send_regs.base);
	GEM_BUG_ON(!guc->send_regs.count);
	GEM_BUG_ON(i >= guc->send_regs.count);

	return _MMIO(guc->send_regs.base + 4 * i);
}

static void guc_init_send_regs(struct intel_guc *guc)
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
	enum forcewake_domains fw_domains = 0;
	unsigned int i;

	guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0));
	guc->send_regs.count = SOFT_SCRATCH_COUNT - 1;

	for (i = 0; i < guc->send_regs.count; i++) {
		fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
					guc_send_reg(guc, i),
					FW_REG_READ | FW_REG_WRITE);
	}
	guc->send_regs.fw_domains = fw_domains;
}

140 141 142 143 144 145 146 147 148 149 150 151 152
/**
 * intel_uc_init_mmio - setup uC MMIO access
 *
 * @dev_priv: device private
 *
 * Setup minimal state necessary for MMIO accesses later in the
 * initialization sequence.
 */
void intel_uc_init_mmio(struct drm_i915_private *dev_priv)
{
	guc_init_send_regs(&dev_priv->guc);
}

153 154
static void guc_capture_load_err_log(struct intel_guc *guc)
{
155
	if (!guc->log.vma || i915_modparams.guc_log_level < 0)
156 157 158 159 160 161 162 163 164 165 166 167 168 169
		return;

	if (!guc->load_err_log)
		guc->load_err_log = i915_gem_object_get(guc->log.vma->obj);

	return;
}

static void guc_free_load_err_log(struct intel_guc *guc)
{
	if (guc->load_err_log)
		i915_gem_object_put(guc->load_err_log);
}

170 171
static int guc_enable_communication(struct intel_guc *guc)
{
172 173 174 175 176
	struct drm_i915_private *dev_priv = guc_to_i915(guc);

	if (HAS_GUC_CT(dev_priv))
		return intel_guc_enable_ct(guc);

177 178 179 180 181 182
	guc->send = intel_guc_send_mmio;
	return 0;
}

static void guc_disable_communication(struct intel_guc *guc)
{
183 184 185 186 187
	struct drm_i915_private *dev_priv = guc_to_i915(guc);

	if (HAS_GUC_CT(dev_priv))
		intel_guc_disable_ct(guc);

188 189 190
	guc->send = intel_guc_send_nop;
}

191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211
/**
 * intel_guc_auth_huc() - Send action to GuC to authenticate HuC ucode
 * @guc: intel_guc structure
 * @rsa_offset: rsa offset w.r.t ggtt base of huc vma
 *
 * Triggers a HuC firmware authentication request to the GuC via intel_guc_send
 * INTEL_GUC_ACTION_AUTHENTICATE_HUC interface. This function is invoked by
 * intel_huc_auth().
 *
 * Return:	non-zero code on error
 */
int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset)
{
	u32 action[] = {
		INTEL_GUC_ACTION_AUTHENTICATE_HUC,
		rsa_offset
	};

	return intel_guc_send(guc, action, ARRAY_SIZE(action));
}

212 213
int intel_uc_init_hw(struct drm_i915_private *dev_priv)
{
214
	struct intel_guc *guc = &dev_priv->guc;
215 216
	int ret, attempts;

217
	if (!i915_modparams.enable_guc_loading)
218 219
		return 0;

220
	guc_disable_communication(guc);
221 222 223 224 225
	gen9_reset_guc_interrupts(dev_priv);

	/* We need to notify the guc whenever we change the GGTT */
	i915_ggtt_enable_guc(dev_priv);

226
	if (i915_modparams.enable_guc_submission) {
227 228 229 230 231 232 233 234
		/*
		 * This is stuff we need to have available at fw load time
		 * if we are planning to enable submission later
		 */
		ret = i915_guc_submission_init(dev_priv);
		if (ret)
			goto err_guc;
	}
235

236 237 238 239 240
	/* init WOPCM */
	I915_WRITE(GUC_WOPCM_SIZE, intel_guc_wopcm_size(dev_priv));
	I915_WRITE(DMA_GUC_WOPCM_OFFSET,
		   GUC_WOPCM_OFFSET_VALUE | HUC_LOADING_AGENT_GUC);

241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267
	/* WaEnableuKernelHeaderValidFix:skl */
	/* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */
	if (IS_GEN9(dev_priv))
		attempts = 3;
	else
		attempts = 1;

	while (attempts--) {
		/*
		 * Always reset the GuC just before (re)loading, so
		 * that the state and timing are fairly predictable
		 */
		ret = __intel_uc_reset_hw(dev_priv);
		if (ret)
			goto err_submission;

		intel_huc_init_hw(&dev_priv->huc);
		ret = intel_guc_init_hw(&dev_priv->guc);
		if (ret == 0 || ret != -EAGAIN)
			break;

		DRM_DEBUG_DRIVER("GuC fw load failed: %d; will reset and "
				 "retry %d more time(s)\n", ret, attempts);
	}

	/* Did we succeded or run out of retries? */
	if (ret)
268
		goto err_log_capture;
269

270 271
	ret = guc_enable_communication(guc);
	if (ret)
272
		goto err_log_capture;
273

274
	intel_huc_auth(&dev_priv->huc);
275 276
	if (i915_modparams.enable_guc_submission) {
		if (i915_modparams.guc_log_level >= 0)
277 278 279 280
			gen9_enable_guc_interrupts(dev_priv);

		ret = i915_guc_submission_enable(dev_priv);
		if (ret)
281
			goto err_interrupts;
282 283 284 285 286 287 288 289 290 291 292 293 294
	}

	return 0;

	/*
	 * We've failed to load the firmware :(
	 *
	 * Decide whether to disable GuC submission and fall back to
	 * execlist mode, and whether to hide the error by returning
	 * zero or to return -EIO, which the caller will treat as a
	 * nonfatal error (i.e. it doesn't prevent driver load, but
	 * marks the GPU as wedged until reset).
	 */
295
err_interrupts:
296
	guc_disable_communication(guc);
297
	gen9_disable_guc_interrupts(dev_priv);
298 299
err_log_capture:
	guc_capture_load_err_log(guc);
300
err_submission:
301
	if (i915_modparams.enable_guc_submission)
302
		i915_guc_submission_fini(dev_priv);
303
err_guc:
304 305 306
	i915_ggtt_disable_guc(dev_priv);

	DRM_ERROR("GuC init failed\n");
307 308
	if (i915_modparams.enable_guc_loading > 1 ||
	    i915_modparams.enable_guc_submission > 1)
309 310 311 312
		ret = -EIO;
	else
		ret = 0;

313 314
	if (i915_modparams.enable_guc_submission) {
		i915_modparams.enable_guc_submission = 0;
315 316 317
		DRM_NOTE("Falling back from GuC submission to execlist mode\n");
	}

318
	i915_modparams.enable_guc_loading = 0;
319 320
	DRM_NOTE("GuC firmware loading disabled\n");

321 322 323
	return ret;
}

324 325
void intel_uc_fini_hw(struct drm_i915_private *dev_priv)
{
326 327
	guc_free_load_err_log(&dev_priv->guc);

328
	if (!i915_modparams.enable_guc_loading)
329 330
		return;

331
	if (i915_modparams.enable_guc_submission)
332
		i915_guc_submission_disable(dev_priv);
333 334 335

	guc_disable_communication(&dev_priv->guc);

336
	if (i915_modparams.enable_guc_submission) {
337
		gen9_disable_guc_interrupts(dev_priv);
338
		i915_guc_submission_fini(dev_priv);
339
	}
340

341 342 343
	i915_ggtt_disable_guc(dev_priv);
}

344 345 346 347 348 349
int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len)
{
	WARN(1, "Unexpected send: action=%#x\n", *action);
	return -ENODEV;
}

350 351 352 353
/*
 * This function implements the MMIO based host to GuC interface.
 */
int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len)
354 355 356 357 358 359
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
	u32 status;
	int i;
	int ret;

360 361
	GEM_BUG_ON(!len);
	GEM_BUG_ON(len > guc->send_regs.count);
362

363 364 365 366 367
	/* If CT is available, we expect to use MMIO only during init/fini */
	GEM_BUG_ON(HAS_GUC_CT(dev_priv) &&
		*action != INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER &&
		*action != INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER);

368
	mutex_lock(&guc->send_mutex);
369
	intel_uncore_forcewake_get(dev_priv, guc->send_regs.fw_domains);
370 371

	for (i = 0; i < len; i++)
372
		I915_WRITE(guc_send_reg(guc, i), action[i]);
373

374
	POSTING_READ(guc_send_reg(guc, i - 1));
375

376
	intel_guc_notify(guc);
377 378

	/*
379 380
	 * No GuC command should ever take longer than 10ms.
	 * Fast commands should still complete in 10us.
381
	 */
382
	ret = __intel_wait_for_register_fw(dev_priv,
383
					   guc_send_reg(guc, 0),
384 385 386
					   INTEL_GUC_RECV_MASK,
					   INTEL_GUC_RECV_MASK,
					   10, 10, &status);
387 388 389 390 391 392 393 394 395 396 397 398 399 400
	if (status != INTEL_GUC_STATUS_SUCCESS) {
		/*
		 * Either the GuC explicitly returned an error (which
		 * we convert to -EIO here) or no response at all was
		 * received within the timeout limit (-ETIMEDOUT)
		 */
		if (ret != -ETIMEDOUT)
			ret = -EIO;

		DRM_WARN("INTEL_GUC_SEND: Action 0x%X failed;"
			 " ret=%d status=0x%08X response=0x%08X\n",
			 action[0], ret, status, I915_READ(SOFT_SCRATCH(15)));
	}

401
	intel_uncore_forcewake_put(dev_priv, guc->send_regs.fw_domains);
402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421
	mutex_unlock(&guc->send_mutex);

	return ret;
}

int intel_guc_sample_forcewake(struct intel_guc *guc)
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
	u32 action[2];

	action[0] = INTEL_GUC_ACTION_SAMPLE_FORCEWAKE;
	/* WaRsDisableCoarsePowerGating:skl,bxt */
	if (!intel_enable_rc6() || NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
		action[1] = 0;
	else
		/* bit 0 and 1 are for Render and Media domain separately */
		action[1] = GUC_FORCEWAKE_RENDER | GUC_FORCEWAKE_MEDIA;

	return intel_guc_send(guc, action, ARRAY_SIZE(action));
}