intel_uc_fw.c 17.6 KB
Newer Older
1
// SPDX-License-Identifier: MIT
2
/*
3
 * Copyright © 2016-2019 Intel Corporation
4 5
 */

6
#include <linux/bitfield.h>
7
#include <linux/firmware.h>
8
#include <drm/drm_print.h>
9 10

#include "intel_uc_fw.h"
11
#include "intel_uc_fw_abi.h"
12 13
#include "i915_drv.h"

14 15 16 17 18 19 20 21 22 23
static inline struct intel_gt *__uc_fw_to_gt(struct intel_uc_fw *uc_fw)
{
	GEM_BUG_ON(uc_fw->status == INTEL_UC_FIRMWARE_UNINITIALIZED);
	if (uc_fw->type == INTEL_UC_FW_TYPE_GUC)
		return container_of(uc_fw, struct intel_gt, uc.guc.fw);

	GEM_BUG_ON(uc_fw->type != INTEL_UC_FW_TYPE_HUC);
	return container_of(uc_fw, struct intel_gt, uc.huc.fw);
}

24
#ifdef CONFIG_DRM_I915_DEBUG_GUC
25 26 27 28 29 30 31 32 33 34 35 36
void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
			       enum intel_uc_fw_status status)
{
	uc_fw->__status =  status;
	DRM_DEV_DEBUG_DRIVER(__uc_fw_to_gt(uc_fw)->i915->drm.dev,
			     "%s firmware -> %s\n",
			     intel_uc_fw_type_repr(uc_fw->type),
			     status == INTEL_UC_FIRMWARE_SELECTED ?
			     uc_fw->path : intel_uc_fw_status_repr(status));
}
#endif

37 38 39
/*
 * List of required GuC and HuC binaries per-platform.
 * Must be ordered based on platform + revid, from newer to older.
40 41 42 43
 *
 * TGL 35.2 is interface-compatible with 33.0 for previous Gens. The deltas
 * between 33.0 and 35.2 are only related to new additions to support new Gen12
 * features.
44 45
 */
#define INTEL_UC_FIRMWARE_DEFS(fw_def, guc_def, huc_def) \
46
	fw_def(TIGERLAKE,   0, guc_def(tgl, 35, 2, 0), huc_def(tgl,  7, 0, 3)) \
47 48 49 50 51 52 53 54
	fw_def(ELKHARTLAKE, 0, guc_def(ehl, 33, 0, 4), huc_def(ehl,  9, 0, 0)) \
	fw_def(ICELAKE,     0, guc_def(icl, 33, 0, 0), huc_def(icl,  9, 0, 0)) \
	fw_def(COFFEELAKE,  5, guc_def(cml, 33, 0, 0), huc_def(cml,  4, 0, 0)) \
	fw_def(COFFEELAKE,  0, guc_def(kbl, 33, 0, 0), huc_def(kbl,  4, 0, 0)) \
	fw_def(GEMINILAKE,  0, guc_def(glk, 33, 0, 0), huc_def(glk,  4, 0, 0)) \
	fw_def(KABYLAKE,    0, guc_def(kbl, 33, 0, 0), huc_def(kbl,  4, 0, 0)) \
	fw_def(BROXTON,     0, guc_def(bxt, 33, 0, 0), huc_def(bxt,  2, 0, 0)) \
	fw_def(SKYLAKE,     0, guc_def(skl, 33, 0, 0), huc_def(skl,  2, 0, 0))
55

56
#define __MAKE_UC_FW_PATH(prefix_, name_, major_, minor_, patch_) \
57 58
	"i915/" \
	__stringify(prefix_) name_ \
59 60
	__stringify(major_) "." \
	__stringify(minor_) "." \
61 62 63
	__stringify(patch_) ".bin"

#define MAKE_GUC_FW_PATH(prefix_, major_, minor_, patch_) \
64
	__MAKE_UC_FW_PATH(prefix_, "_guc_", major_, minor_, patch_)
65 66

#define MAKE_HUC_FW_PATH(prefix_, major_, minor_, bld_num_) \
67
	__MAKE_UC_FW_PATH(prefix_, "_huc_", major_, minor_, bld_num_)
68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145

/* All blobs need to be declared via MODULE_FIRMWARE() */
#define INTEL_UC_MODULE_FW(platform_, revid_, guc_, huc_) \
	MODULE_FIRMWARE(guc_); \
	MODULE_FIRMWARE(huc_);

INTEL_UC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_GUC_FW_PATH, MAKE_HUC_FW_PATH)

/* The below structs and macros are used to iterate across the list of blobs */
struct __packed uc_fw_blob {
	u8 major;
	u8 minor;
	const char *path;
};

#define UC_FW_BLOB(major_, minor_, path_) \
	{ .major = major_, .minor = minor_, .path = path_ }

#define GUC_FW_BLOB(prefix_, major_, minor_, patch_) \
	UC_FW_BLOB(major_, minor_, \
		   MAKE_GUC_FW_PATH(prefix_, major_, minor_, patch_))

#define HUC_FW_BLOB(prefix_, major_, minor_, bld_num_) \
	UC_FW_BLOB(major_, minor_, \
		   MAKE_HUC_FW_PATH(prefix_, major_, minor_, bld_num_))

struct __packed uc_fw_platform_requirement {
	enum intel_platform p;
	u8 rev; /* first platform rev using this FW */
	const struct uc_fw_blob blobs[INTEL_UC_FW_NUM_TYPES];
};

#define MAKE_FW_LIST(platform_, revid_, guc_, huc_) \
{ \
	.p = INTEL_##platform_, \
	.rev = revid_, \
	.blobs[INTEL_UC_FW_TYPE_GUC] = guc_, \
	.blobs[INTEL_UC_FW_TYPE_HUC] = huc_, \
},

static void
__uc_fw_auto_select(struct intel_uc_fw *uc_fw, enum intel_platform p, u8 rev)
{
	static const struct uc_fw_platform_requirement fw_blobs[] = {
		INTEL_UC_FIRMWARE_DEFS(MAKE_FW_LIST, GUC_FW_BLOB, HUC_FW_BLOB)
	};
	int i;

	for (i = 0; i < ARRAY_SIZE(fw_blobs) && p <= fw_blobs[i].p; i++) {
		if (p == fw_blobs[i].p && rev >= fw_blobs[i].rev) {
			const struct uc_fw_blob *blob =
					&fw_blobs[i].blobs[uc_fw->type];
			uc_fw->path = blob->path;
			uc_fw->major_ver_wanted = blob->major;
			uc_fw->minor_ver_wanted = blob->minor;
			break;
		}
	}

	/* make sure the list is ordered as expected */
	if (IS_ENABLED(CONFIG_DRM_I915_SELFTEST)) {
		for (i = 1; i < ARRAY_SIZE(fw_blobs); i++) {
			if (fw_blobs[i].p < fw_blobs[i - 1].p)
				continue;

			if (fw_blobs[i].p == fw_blobs[i - 1].p &&
			    fw_blobs[i].rev < fw_blobs[i - 1].rev)
				continue;

			pr_err("invalid FW blob order: %s r%u comes before %s r%u\n",
			       intel_platform_name(fw_blobs[i - 1].p),
			       fw_blobs[i - 1].rev,
			       intel_platform_name(fw_blobs[i].p),
			       fw_blobs[i].rev);

			uc_fw->path = NULL;
		}
	}
146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164

	/* We don't want to enable GuC/HuC on pre-Gen11 by default */
	if (i915_modparams.enable_guc == -1 && p < INTEL_ICELAKE)
		uc_fw->path = NULL;
}

static const char *__override_guc_firmware_path(void)
{
	if (i915_modparams.enable_guc & (ENABLE_GUC_SUBMISSION |
					 ENABLE_GUC_LOAD_HUC))
		return i915_modparams.guc_firmware_path;
	return "";
}

static const char *__override_huc_firmware_path(void)
{
	if (i915_modparams.enable_guc & ENABLE_GUC_LOAD_HUC)
		return i915_modparams.huc_firmware_path;
	return "";
165 166
}

167
static void __uc_fw_user_override(struct intel_uc_fw *uc_fw)
168
{
169 170
	const char *path = NULL;

171 172
	switch (uc_fw->type) {
	case INTEL_UC_FW_TYPE_GUC:
173
		path = __override_guc_firmware_path();
174 175
		break;
	case INTEL_UC_FW_TYPE_HUC:
176
		path = __override_huc_firmware_path();
177 178 179
		break;
	}

180 181 182 183
	if (unlikely(path)) {
		uc_fw->path = path;
		uc_fw->user_overridden = true;
	}
184 185 186 187 188 189
}

/**
 * intel_uc_fw_init_early - initialize the uC object and select the firmware
 * @uc_fw: uC firmware
 * @type: type of uC
190 191 192
 * @supported: is uC support possible
 * @platform: platform identifier
 * @rev: hardware revision
193 194 195 196 197
 *
 * Initialize the state of our uC object and relevant tracking and select the
 * firmware to fetch and load.
 */
void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw,
198 199
			    enum intel_uc_fw_type type, bool supported,
			    enum intel_platform platform, u8 rev)
200 201
{
	/*
202
	 * we use FIRMWARE_UNINITIALIZED to detect checks against uc_fw->status
203 204 205
	 * before we're looked at the HW caps to see if we have uc support
	 */
	BUILD_BUG_ON(INTEL_UC_FIRMWARE_UNINITIALIZED);
206
	GEM_BUG_ON(uc_fw->status);
207 208 209 210
	GEM_BUG_ON(uc_fw->path);

	uc_fw->type = type;

211
	if (supported) {
212
		__uc_fw_auto_select(uc_fw, platform, rev);
213 214
		__uc_fw_user_override(uc_fw);
	}
215

216
	intel_uc_fw_change_status(uc_fw, uc_fw->path ? *uc_fw->path ?
217
				  INTEL_UC_FIRMWARE_SELECTED :
218
				  INTEL_UC_FIRMWARE_DISABLED :
219
				  INTEL_UC_FIRMWARE_NOT_SUPPORTED);
220 221
}

222
static void __force_fw_fetch_failures(struct intel_uc_fw *uc_fw, int e)
223
{
224
	struct drm_i915_private *i915 = __uc_fw_to_gt(uc_fw)->i915;
225 226
	bool user = e == -EINVAL;

227
	if (i915_inject_probe_error(i915, e)) {
228 229 230
		/* non-existing blob */
		uc_fw->path = "<invalid>";
		uc_fw->user_overridden = user;
231
	} else if (i915_inject_probe_error(i915, e)) {
232 233 234 235
		/* require next major version */
		uc_fw->major_ver_wanted += 1;
		uc_fw->minor_ver_wanted = 0;
		uc_fw->user_overridden = user;
236
	} else if (i915_inject_probe_error(i915, e)) {
237 238 239
		/* require next minor version */
		uc_fw->minor_ver_wanted += 1;
		uc_fw->user_overridden = user;
240 241
	} else if (uc_fw->major_ver_wanted &&
		   i915_inject_probe_error(i915, e)) {
242 243 244 245
		/* require prev major version */
		uc_fw->major_ver_wanted -= 1;
		uc_fw->minor_ver_wanted = 0;
		uc_fw->user_overridden = user;
246 247
	} else if (uc_fw->minor_ver_wanted &&
		   i915_inject_probe_error(i915, e)) {
248 249 250
		/* require prev minor version - hey, this should work! */
		uc_fw->minor_ver_wanted -= 1;
		uc_fw->user_overridden = user;
251
	} else if (user && i915_inject_probe_error(i915, e)) {
252 253 254 255 256 257 258
		/* officially unsupported platform */
		uc_fw->major_ver_wanted = 0;
		uc_fw->minor_ver_wanted = 0;
		uc_fw->user_overridden = true;
	}
}

259 260 261 262 263
/**
 * intel_uc_fw_fetch - fetch uC firmware
 * @uc_fw: uC firmware
 *
 * Fetch uC firmware into GEM obj.
264 265
 *
 * Return: 0 on success, a negative errno code on failure.
266
 */
267
int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
268
{
269
	struct drm_i915_private *i915 = __uc_fw_to_gt(uc_fw)->i915;
270
	struct device *dev = i915->drm.dev;
271 272 273 274 275 276
	struct drm_i915_gem_object *obj;
	const struct firmware *fw = NULL;
	struct uc_css_header *css;
	size_t size;
	int err;

277
	GEM_BUG_ON(!i915->wopcm.size);
278
	GEM_BUG_ON(!intel_uc_fw_is_enabled(uc_fw));
279

280
	err = i915_inject_probe_error(i915, -ENXIO);
281
	if (err)
282
		return err;
283

284 285
	__force_fw_fetch_failures(uc_fw, -EINVAL);
	__force_fw_fetch_failures(uc_fw, -ESTALE);
286 287 288 289

	err = request_firmware(&fw, uc_fw->path, dev);
	if (err)
		goto fail;
290 291

	/* Check the size of the blob before examining buffer contents */
292 293 294
	if (unlikely(fw->size < sizeof(struct uc_css_header))) {
		dev_warn(dev, "%s firmware %s: invalid size: %zu < %zu\n",
			 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
295 296
			 fw->size, sizeof(struct uc_css_header));
		err = -ENODATA;
297 298 299 300 301
		goto fail;
	}

	css = (struct uc_css_header *)fw->data;

302 303 304
	/* Check integrity of size values inside CSS header */
	size = (css->header_size_dw - css->key_size_dw - css->modulus_size_dw -
		css->exponent_size_dw) * sizeof(u32);
305 306 307 308 309 310
	if (unlikely(size != sizeof(struct uc_css_header))) {
		dev_warn(dev,
			 "%s firmware %s: unexpected header size: %zu != %zu\n",
			 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
			 fw->size, sizeof(struct uc_css_header));
		err = -EPROTO;
311 312 313
		goto fail;
	}

314
	/* uCode size must calculated from other sizes */
315 316 317
	uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);

	/* now RSA */
318 319 320 321 322
	if (unlikely(css->key_size_dw != UOS_RSA_SCRATCH_COUNT)) {
		dev_warn(dev, "%s firmware %s: unexpected key size: %u != %u\n",
			 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
			 css->key_size_dw, UOS_RSA_SCRATCH_COUNT);
		err = -EPROTO;
323 324 325 326 327
		goto fail;
	}
	uc_fw->rsa_size = css->key_size_dw * sizeof(u32);

	/* At least, it should have header, uCode and RSA. Size of all three. */
328
	size = sizeof(struct uc_css_header) + uc_fw->ucode_size + uc_fw->rsa_size;
329 330 331 332
	if (unlikely(fw->size < size)) {
		dev_warn(dev, "%s firmware %s: invalid size: %zu < %zu\n",
			 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
			 fw->size, size);
333
		err = -ENOEXEC;
334 335 336
		goto fail;
	}

337 338 339 340 341 342 343 344 345 346
	/* Sanity check whether this fw is not larger than whole WOPCM memory */
	size = __intel_uc_fw_get_upload_size(uc_fw);
	if (unlikely(size >= i915->wopcm.size)) {
		dev_warn(dev, "%s firmware %s: invalid size: %zu > %zu\n",
			 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
			 size, (size_t)i915->wopcm.size);
		err = -E2BIG;
		goto fail;
	}

347
	/* Get version numbers from the CSS header */
348 349 350 351
	uc_fw->major_ver_found = FIELD_GET(CSS_SW_VERSION_UC_MAJOR,
					   css->sw_version);
	uc_fw->minor_ver_found = FIELD_GET(CSS_SW_VERSION_UC_MINOR,
					   css->sw_version);
352

353 354 355 356 357 358 359 360 361 362
	if (uc_fw->major_ver_found != uc_fw->major_ver_wanted ||
	    uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) {
		dev_notice(dev, "%s firmware %s: unexpected version: %u.%u != %u.%u\n",
			   intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
			   uc_fw->major_ver_found, uc_fw->minor_ver_found,
			   uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
		if (!intel_uc_fw_is_overridden(uc_fw)) {
			err = -ENOEXEC;
			goto fail;
		}
363 364
	}

365
	obj = i915_gem_object_create_shmem_from_data(i915, fw->data, fw->size);
366 367 368 369 370 371 372
	if (IS_ERR(obj)) {
		err = PTR_ERR(obj);
		goto fail;
	}

	uc_fw->obj = obj;
	uc_fw->size = fw->size;
373
	intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_AVAILABLE);
374

375
	release_firmware(fw);
376
	return 0;
377 378

fail:
379 380 381
	intel_uc_fw_change_status(uc_fw, err == -ENOENT ?
				  INTEL_UC_FIRMWARE_MISSING :
				  INTEL_UC_FIRMWARE_ERROR);
382

383 384 385
	dev_notice(dev, "%s firmware %s: fetch failed with error %d\n",
		   intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err);
	dev_info(dev, "%s firmware(s) can be downloaded from %s\n",
386
		 intel_uc_fw_type_repr(uc_fw->type), INTEL_UC_FIRMWARE_URL);
387 388

	release_firmware(fw);		/* OK even if fw is NULL */
389
	return err;
390 391
}

392 393 394 395
static u32 uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw, struct i915_ggtt *ggtt)
{
	struct drm_mm_node *node = &ggtt->uc_fw;

396
	GEM_BUG_ON(!drm_mm_node_allocated(node));
397 398 399 400 401 402
	GEM_BUG_ON(upper_32_bits(node->start));
	GEM_BUG_ON(upper_32_bits(node->start + node->size - 1));

	return lower_32_bits(node->start);
}

403 404
static void intel_uc_fw_ggtt_bind(struct intel_uc_fw *uc_fw,
				  struct intel_gt *gt)
405 406
{
	struct drm_i915_gem_object *obj = uc_fw->obj;
407
	struct i915_ggtt *ggtt = gt->ggtt;
408
	struct i915_vma dummy = {
409
		.node.start = uc_fw_ggtt_offset(uc_fw, ggtt),
410 411 412 413 414 415 416 417 418 419 420 421 422 423
		.node.size = obj->base.size,
		.pages = obj->mm.pages,
		.vm = &ggtt->vm,
	};

	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
	GEM_BUG_ON(dummy.node.size > ggtt->uc_fw.size);

	/* uc_fw->obj cache domains were not controlled across suspend */
	drm_clflush_sg(dummy.pages);

	ggtt->vm.insert_entries(&ggtt->vm, &dummy, I915_CACHE_NONE, 0);
}

424 425
static void intel_uc_fw_ggtt_unbind(struct intel_uc_fw *uc_fw,
				    struct intel_gt *gt)
426 427
{
	struct drm_i915_gem_object *obj = uc_fw->obj;
428
	struct i915_ggtt *ggtt = gt->ggtt;
429
	u64 start = uc_fw_ggtt_offset(uc_fw, ggtt);
430 431 432 433

	ggtt->vm.clear_range(&ggtt->vm, start, obj->base.size);
}

434 435 436 437 438 439 440
static int uc_fw_xfer(struct intel_uc_fw *uc_fw, struct intel_gt *gt,
		      u32 wopcm_offset, u32 dma_flags)
{
	struct intel_uncore *uncore = gt->uncore;
	u64 offset;
	int ret;

441
	ret = i915_inject_probe_error(gt->i915, -ETIMEDOUT);
442 443 444
	if (ret)
		return ret;

445 446 447
	intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);

	/* Set the source address for the uCode */
448
	offset = uc_fw_ggtt_offset(uc_fw, gt->ggtt);
449 450 451 452 453 454 455 456 457 458 459 460 461
	GEM_BUG_ON(upper_32_bits(offset) & 0xFFFF0000);
	intel_uncore_write_fw(uncore, DMA_ADDR_0_LOW, lower_32_bits(offset));
	intel_uncore_write_fw(uncore, DMA_ADDR_0_HIGH, upper_32_bits(offset));

	/* Set the DMA destination */
	intel_uncore_write_fw(uncore, DMA_ADDR_1_LOW, wopcm_offset);
	intel_uncore_write_fw(uncore, DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);

	/*
	 * Set the transfer size. The header plus uCode will be copied to WOPCM
	 * via DMA, excluding any other components
	 */
	intel_uncore_write_fw(uncore, DMA_COPY_SIZE,
462
			      sizeof(struct uc_css_header) + uc_fw->ucode_size);
463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482

	/* Start the DMA */
	intel_uncore_write_fw(uncore, DMA_CTRL,
			      _MASKED_BIT_ENABLE(dma_flags | START_DMA));

	/* Wait for DMA to finish */
	ret = intel_wait_for_register_fw(uncore, DMA_CTRL, START_DMA, 0, 100);
	if (ret)
		dev_err(gt->i915->drm.dev, "DMA for %s fw failed, DMA_CTRL=%u\n",
			intel_uc_fw_type_repr(uc_fw->type),
			intel_uncore_read_fw(uncore, DMA_CTRL));

	/* Disable the bits once DMA is over */
	intel_uncore_write_fw(uncore, DMA_CTRL, _MASKED_BIT_DISABLE(dma_flags));

	intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);

	return ret;
}

483 484 485
/**
 * intel_uc_fw_upload - load uC firmware using custom loader
 * @uc_fw: uC firmware
486
 * @gt: the intel_gt structure
487 488
 * @wopcm_offset: destination offset in wopcm
 * @dma_flags: flags for flags for dma ctrl
489
 *
490
 * Loads uC firmware and updates internal flags.
491 492
 *
 * Return: 0 on success, non-zero on failure.
493
 */
494
int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, struct intel_gt *gt,
495
		       u32 wopcm_offset, u32 dma_flags)
496 497 498
{
	int err;

499 500
	/* make sure the status was cleared the last time we reset the uc */
	GEM_BUG_ON(intel_uc_fw_is_loaded(uc_fw));
501

502
	err = i915_inject_probe_error(gt->i915, -ENOEXEC);
503 504 505
	if (err)
		return err;

506 507
	if (!intel_uc_fw_is_available(uc_fw))
		return -ENOEXEC;
508

509
	/* Call custom loader */
510
	intel_uc_fw_ggtt_bind(uc_fw, gt);
511
	err = uc_fw_xfer(uc_fw, gt, wopcm_offset, dma_flags);
512
	intel_uc_fw_ggtt_unbind(uc_fw, gt);
513 514 515
	if (err)
		goto fail;

516
	intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_TRANSFERRED);
517 518 519
	return 0;

fail:
520 521 522
	i915_probe_error(gt->i915, "Failed to load %s firmware %s (%d)\n",
			 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
			 err);
523
	intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_FAIL);
524 525 526
	return err;
}

527 528 529 530
int intel_uc_fw_init(struct intel_uc_fw *uc_fw)
{
	int err;

531 532 533 534
	/* this should happen before the load! */
	GEM_BUG_ON(intel_uc_fw_is_loaded(uc_fw));

	if (!intel_uc_fw_is_available(uc_fw))
535 536 537
		return -ENOEXEC;

	err = i915_gem_object_pin_pages(uc_fw->obj);
538
	if (err) {
539 540
		DRM_DEBUG_DRIVER("%s fw pin-pages err=%d\n",
				 intel_uc_fw_type_repr(uc_fw->type), err);
541 542
		intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_FAIL);
	}
543 544 545 546 547 548

	return err;
}

void intel_uc_fw_fini(struct intel_uc_fw *uc_fw)
{
549
	if (!intel_uc_fw_is_available(uc_fw))
550 551 552 553 554
		return;

	i915_gem_object_unpin_pages(uc_fw->obj);
}

555
/**
556
 * intel_uc_fw_cleanup_fetch - cleanup uC firmware
557 558 559 560
 * @uc_fw: uC firmware
 *
 * Cleans up uC firmware by releasing the firmware GEM obj.
 */
561
void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw)
562
{
563 564
	if (!intel_uc_fw_is_available(uc_fw))
		return;
565

566
	i915_gem_object_put(fetch_and_zero(&uc_fw->obj));
567

568
	intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_SELECTED);
569
}
570

571 572 573 574 575 576 577 578 579 580 581 582 583
/**
 * intel_uc_fw_copy_rsa - copy fw RSA to buffer
 *
 * @uc_fw: uC firmware
 * @dst: dst buffer
 * @max_len: max number of bytes to copy
 *
 * Return: number of copied bytes.
 */
size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len)
{
	struct sg_table *pages = uc_fw->obj->mm.pages;
	u32 size = min_t(u32, uc_fw->rsa_size, max_len);
584
	u32 offset = sizeof(struct uc_css_header) + uc_fw->ucode_size;
585 586 587

	GEM_BUG_ON(!intel_uc_fw_is_available(uc_fw));

588
	return sg_pcopy_to_buffer(pages->sgl, pages->nents, dst, size, offset);
589 590
}

591 592 593 594 595 596 597
/**
 * intel_uc_fw_dump - dump information about uC firmware
 * @uc_fw: uC firmware
 * @p: the &drm_printer
 *
 * Pretty printer for uC firmware.
 */
598
void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p)
599 600 601
{
	drm_printf(p, "%s firmware: %s\n",
		   intel_uc_fw_type_repr(uc_fw->type), uc_fw->path);
602 603
	drm_printf(p, "\tstatus: %s\n",
		   intel_uc_fw_status_repr(uc_fw->status));
604 605 606
	drm_printf(p, "\tversion: wanted %u.%u, found %u.%u\n",
		   uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted,
		   uc_fw->major_ver_found, uc_fw->minor_ver_found);
607
	drm_printf(p, "\tuCode: %u bytes\n", uc_fw->ucode_size);
608
	drm_printf(p, "\tRSA: %u bytes\n", uc_fw->rsa_size);
609
}