intel_uc_fw.c 25.1 KB
Newer Older
1
// SPDX-License-Identifier: MIT
2
/*
3
 * Copyright © 2016-2019 Intel Corporation
4 5
 */

6
#include <linux/bitfield.h>
7
#include <linux/firmware.h>
8
#include <linux/highmem.h>
9 10

#include <drm/drm_cache.h>
11
#include <drm/drm_print.h>
12

13
#include "gem/i915_gem_lmem.h"
14
#include "intel_uc_fw.h"
15
#include "intel_uc_fw_abi.h"
16
#include "i915_drv.h"
17
#include "i915_reg.h"
18

19 20
static inline struct intel_gt *
____uc_fw_to_gt(struct intel_uc_fw *uc_fw, enum intel_uc_fw_type type)
21
{
22
	if (type == INTEL_UC_FW_TYPE_GUC)
23 24
		return container_of(uc_fw, struct intel_gt, uc.guc.fw);

25
	GEM_BUG_ON(type != INTEL_UC_FW_TYPE_HUC);
26 27 28
	return container_of(uc_fw, struct intel_gt, uc.huc.fw);
}

29 30 31 32 33 34
static inline struct intel_gt *__uc_fw_to_gt(struct intel_uc_fw *uc_fw)
{
	GEM_BUG_ON(uc_fw->status == INTEL_UC_FIRMWARE_UNINITIALIZED);
	return ____uc_fw_to_gt(uc_fw, uc_fw->type);
}

35
#ifdef CONFIG_DRM_I915_DEBUG_GUC
36 37 38 39
void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
			       enum intel_uc_fw_status status)
{
	uc_fw->__status =  status;
40 41 42 43 44
	drm_dbg(&__uc_fw_to_gt(uc_fw)->i915->drm,
		"%s firmware -> %s\n",
		intel_uc_fw_type_repr(uc_fw->type),
		status == INTEL_UC_FIRMWARE_SELECTED ?
		uc_fw->path : intel_uc_fw_status_repr(status));
45 46 47
}
#endif

48 49 50
/*
 * List of required GuC and HuC binaries per-platform.
 * Must be ordered based on platform + revid, from newer to older.
51
 *
52 53
 * Note that RKL and ADL-S have the same GuC/HuC device ID's and use the same
 * firmware as TGL.
54
 */
55
#define INTEL_GUC_FIRMWARE_DEFS(fw_def, guc_def) \
56
	fw_def(DG2,          0, guc_def(dg2,  70, 4, 1)) \
57 58 59 60 61 62 63 64 65 66 67 68 69 70 71
	fw_def(ALDERLAKE_P,  0, guc_def(adlp, 70, 1, 1)) \
	fw_def(ALDERLAKE_S,  0, guc_def(tgl,  70, 1, 1)) \
	fw_def(DG1,          0, guc_def(dg1,  70, 1, 1)) \
	fw_def(ROCKETLAKE,   0, guc_def(tgl,  70, 1, 1)) \
	fw_def(TIGERLAKE,    0, guc_def(tgl,  70, 1, 1)) \
	fw_def(JASPERLAKE,   0, guc_def(ehl,  70, 1, 1)) \
	fw_def(ELKHARTLAKE,  0, guc_def(ehl,  70, 1, 1)) \
	fw_def(ICELAKE,      0, guc_def(icl,  70, 1, 1)) \
	fw_def(COMETLAKE,    5, guc_def(cml,  70, 1, 1)) \
	fw_def(COMETLAKE,    0, guc_def(kbl,  70, 1, 1)) \
	fw_def(COFFEELAKE,   0, guc_def(kbl,  70, 1, 1)) \
	fw_def(GEMINILAKE,   0, guc_def(glk,  70, 1, 1)) \
	fw_def(KABYLAKE,     0, guc_def(kbl,  70, 1, 1)) \
	fw_def(BROXTON,      0, guc_def(bxt,  70, 1, 1)) \
	fw_def(SKYLAKE,      0, guc_def(skl,  70, 1, 1))
72

73 74 75 76
#define INTEL_GUC_FIRMWARE_DEFS_FALLBACK(fw_def, guc_def) \
	fw_def(ALDERLAKE_P,  0, guc_def(adlp, 69, 0, 3)) \
	fw_def(ALDERLAKE_S,  0, guc_def(tgl,  69, 0, 3))

77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
#define INTEL_HUC_FIRMWARE_DEFS(fw_def, huc_def) \
	fw_def(ALDERLAKE_P,  0, huc_def(tgl,  7, 9, 3)) \
	fw_def(ALDERLAKE_S,  0, huc_def(tgl,  7, 9, 3)) \
	fw_def(DG1,          0, huc_def(dg1,  7, 9, 3)) \
	fw_def(ROCKETLAKE,   0, huc_def(tgl,  7, 9, 3)) \
	fw_def(TIGERLAKE,    0, huc_def(tgl,  7, 9, 3)) \
	fw_def(JASPERLAKE,   0, huc_def(ehl,  9, 0, 0)) \
	fw_def(ELKHARTLAKE,  0, huc_def(ehl,  9, 0, 0)) \
	fw_def(ICELAKE,      0, huc_def(icl,  9, 0, 0)) \
	fw_def(COMETLAKE,    5, huc_def(cml,  4, 0, 0)) \
	fw_def(COMETLAKE,    0, huc_def(kbl,  4, 0, 0)) \
	fw_def(COFFEELAKE,   0, huc_def(kbl,  4, 0, 0)) \
	fw_def(GEMINILAKE,   0, huc_def(glk,  4, 0, 0)) \
	fw_def(KABYLAKE,     0, huc_def(kbl,  4, 0, 0)) \
	fw_def(BROXTON,      0, huc_def(bxt,  2, 0, 0)) \
	fw_def(SKYLAKE,      0, huc_def(skl,  2, 0, 0))
93

94
#define __MAKE_UC_FW_PATH(prefix_, name_, major_, minor_, patch_) \
95 96
	"i915/" \
	__stringify(prefix_) name_ \
97 98
	__stringify(major_) "." \
	__stringify(minor_) "." \
99 100 101
	__stringify(patch_) ".bin"

#define MAKE_GUC_FW_PATH(prefix_, major_, minor_, patch_) \
102
	__MAKE_UC_FW_PATH(prefix_, "_guc_", major_, minor_, patch_)
103 104

#define MAKE_HUC_FW_PATH(prefix_, major_, minor_, bld_num_) \
105
	__MAKE_UC_FW_PATH(prefix_, "_huc_", major_, minor_, bld_num_)
106 107

/* All blobs need to be declared via MODULE_FIRMWARE() */
108 109
#define INTEL_UC_MODULE_FW(platform_, revid_, uc_) \
	MODULE_FIRMWARE(uc_);
110

111
INTEL_GUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_GUC_FW_PATH)
112
INTEL_GUC_FIRMWARE_DEFS_FALLBACK(INTEL_UC_MODULE_FW, MAKE_GUC_FW_PATH)
113
INTEL_HUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_HUC_FW_PATH)
114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135

/* The below structs and macros are used to iterate across the list of blobs */
struct __packed uc_fw_blob {
	u8 major;
	u8 minor;
	const char *path;
};

#define UC_FW_BLOB(major_, minor_, path_) \
	{ .major = major_, .minor = minor_, .path = path_ }

#define GUC_FW_BLOB(prefix_, major_, minor_, patch_) \
	UC_FW_BLOB(major_, minor_, \
		   MAKE_GUC_FW_PATH(prefix_, major_, minor_, patch_))

#define HUC_FW_BLOB(prefix_, major_, minor_, bld_num_) \
	UC_FW_BLOB(major_, minor_, \
		   MAKE_HUC_FW_PATH(prefix_, major_, minor_, bld_num_))

struct __packed uc_fw_platform_requirement {
	enum intel_platform p;
	u8 rev; /* first platform rev using this FW */
136
	const struct uc_fw_blob blob;
137 138
};

139
#define MAKE_FW_LIST(platform_, revid_, uc_) \
140 141 142
{ \
	.p = INTEL_##platform_, \
	.rev = revid_, \
143
	.blob = uc_, \
144 145
},

146 147 148 149 150
struct fw_blobs_by_type {
	const struct uc_fw_platform_requirement *blobs;
	u32 count;
};

151
static void
152
__uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
153
{
154 155 156
	static const struct uc_fw_platform_requirement blobs_guc[] = {
		INTEL_GUC_FIRMWARE_DEFS(MAKE_FW_LIST, GUC_FW_BLOB)
	};
157 158 159
	static const struct uc_fw_platform_requirement blobs_guc_fallback[] = {
		INTEL_GUC_FIRMWARE_DEFS_FALLBACK(MAKE_FW_LIST, GUC_FW_BLOB)
	};
160 161
	static const struct uc_fw_platform_requirement blobs_huc[] = {
		INTEL_HUC_FIRMWARE_DEFS(MAKE_FW_LIST, HUC_FW_BLOB)
162
	};
163 164 165 166
	static const struct fw_blobs_by_type blobs_all[INTEL_UC_FW_NUM_TYPES] = {
		[INTEL_UC_FW_TYPE_GUC] = { blobs_guc, ARRAY_SIZE(blobs_guc) },
		[INTEL_UC_FW_TYPE_HUC] = { blobs_huc, ARRAY_SIZE(blobs_huc) },
	};
167
	const struct uc_fw_platform_requirement *fw_blobs;
168
	enum intel_platform p = INTEL_INFO(i915)->platform;
169
	u32 fw_count;
170
	u8 rev = INTEL_REVID(i915);
171 172
	int i;

173 174 175 176 177 178 179 180 181
	/*
	 * The only difference between the ADL GuC FWs is the HWConfig support.
	 * ADL-N does not support HWConfig, so we should use the same binary as
	 * ADL-S, otherwise the GuC might attempt to fetch a config table that
	 * does not exist.
	 */
	if (IS_ADLP_N(i915))
		p = INTEL_ALDERLAKE_S;

182 183 184 185 186
	GEM_BUG_ON(uc_fw->type >= ARRAY_SIZE(blobs_all));
	fw_blobs = blobs_all[uc_fw->type].blobs;
	fw_count = blobs_all[uc_fw->type].count;

	for (i = 0; i < fw_count && p <= fw_blobs[i].p; i++) {
187
		if (p == fw_blobs[i].p && rev >= fw_blobs[i].rev) {
188
			const struct uc_fw_blob *blob = &fw_blobs[i].blob;
189
			uc_fw->path = blob->path;
190
			uc_fw->wanted_path = blob->path;
191 192 193 194 195 196
			uc_fw->major_ver_wanted = blob->major;
			uc_fw->minor_ver_wanted = blob->minor;
			break;
		}
	}

197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212
	if (uc_fw->type == INTEL_UC_FW_TYPE_GUC) {
		const struct uc_fw_platform_requirement *blobs = blobs_guc_fallback;
		u32 count = ARRAY_SIZE(blobs_guc_fallback);

		for (i = 0; i < count && p <= blobs[i].p; i++) {
			if (p == blobs[i].p && rev >= blobs[i].rev) {
				const struct uc_fw_blob *blob = &blobs[i].blob;

				uc_fw->fallback.path = blob->path;
				uc_fw->fallback.major_ver = blob->major;
				uc_fw->fallback.minor_ver = blob->minor;
				break;
			}
		}
	}

213 214
	/* make sure the list is ordered as expected */
	if (IS_ENABLED(CONFIG_DRM_I915_SELFTEST)) {
215
		for (i = 1; i < fw_count; i++) {
216 217 218 219 220 221 222
			if (fw_blobs[i].p < fw_blobs[i - 1].p)
				continue;

			if (fw_blobs[i].p == fw_blobs[i - 1].p &&
			    fw_blobs[i].rev < fw_blobs[i - 1].rev)
				continue;

223 224 225 226 227
			drm_err(&i915->drm, "Invalid FW blob order: %s r%u comes before %s r%u\n",
				intel_platform_name(fw_blobs[i - 1].p),
				fw_blobs[i - 1].rev,
				intel_platform_name(fw_blobs[i].p),
				fw_blobs[i].rev);
228 229 230 231

			uc_fw->path = NULL;
		}
	}
232 233
}

234
static const char *__override_guc_firmware_path(struct drm_i915_private *i915)
235
{
236
	if (i915->params.enable_guc & ENABLE_GUC_MASK)
237
		return i915->params.guc_firmware_path;
238 239 240
	return "";
}

241
static const char *__override_huc_firmware_path(struct drm_i915_private *i915)
242
{
243 244
	if (i915->params.enable_guc & ENABLE_GUC_LOAD_HUC)
		return i915->params.huc_firmware_path;
245
	return "";
246 247
}

248
static void __uc_fw_user_override(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
249
{
250 251
	const char *path = NULL;

252 253
	switch (uc_fw->type) {
	case INTEL_UC_FW_TYPE_GUC:
254
		path = __override_guc_firmware_path(i915);
255 256
		break;
	case INTEL_UC_FW_TYPE_HUC:
257
		path = __override_huc_firmware_path(i915);
258 259 260
		break;
	}

261 262 263 264
	if (unlikely(path)) {
		uc_fw->path = path;
		uc_fw->user_overridden = true;
	}
265 266 267 268 269 270 271 272 273 274 275
}

/**
 * intel_uc_fw_init_early - initialize the uC object and select the firmware
 * @uc_fw: uC firmware
 * @type: type of uC
 *
 * Initialize the state of our uC object and relevant tracking and select the
 * firmware to fetch and load.
 */
void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw,
276
			    enum intel_uc_fw_type type)
277
{
278 279
	struct drm_i915_private *i915 = ____uc_fw_to_gt(uc_fw, type)->i915;

280
	/*
281
	 * we use FIRMWARE_UNINITIALIZED to detect checks against uc_fw->status
282 283 284
	 * before we're looked at the HW caps to see if we have uc support
	 */
	BUILD_BUG_ON(INTEL_UC_FIRMWARE_UNINITIALIZED);
285
	GEM_BUG_ON(uc_fw->status);
286 287 288 289
	GEM_BUG_ON(uc_fw->path);

	uc_fw->type = type;

290
	if (HAS_GT_UC(i915)) {
291 292
		__uc_fw_auto_select(i915, uc_fw);
		__uc_fw_user_override(i915, uc_fw);
293
	}
294

295
	intel_uc_fw_change_status(uc_fw, uc_fw->path ? *uc_fw->path ?
296
				  INTEL_UC_FIRMWARE_SELECTED :
297
				  INTEL_UC_FIRMWARE_DISABLED :
298
				  INTEL_UC_FIRMWARE_NOT_SUPPORTED);
299 300
}

301
static void __force_fw_fetch_failures(struct intel_uc_fw *uc_fw, int e)
302
{
303
	struct drm_i915_private *i915 = __uc_fw_to_gt(uc_fw)->i915;
304 305
	bool user = e == -EINVAL;

306
	if (i915_inject_probe_error(i915, e)) {
307 308 309
		/* non-existing blob */
		uc_fw->path = "<invalid>";
		uc_fw->user_overridden = user;
310
	} else if (i915_inject_probe_error(i915, e)) {
311 312 313 314
		/* require next major version */
		uc_fw->major_ver_wanted += 1;
		uc_fw->minor_ver_wanted = 0;
		uc_fw->user_overridden = user;
315
	} else if (i915_inject_probe_error(i915, e)) {
316 317 318
		/* require next minor version */
		uc_fw->minor_ver_wanted += 1;
		uc_fw->user_overridden = user;
319 320
	} else if (uc_fw->major_ver_wanted &&
		   i915_inject_probe_error(i915, e)) {
321 322 323 324
		/* require prev major version */
		uc_fw->major_ver_wanted -= 1;
		uc_fw->minor_ver_wanted = 0;
		uc_fw->user_overridden = user;
325 326
	} else if (uc_fw->minor_ver_wanted &&
		   i915_inject_probe_error(i915, e)) {
327 328 329
		/* require prev minor version - hey, this should work! */
		uc_fw->minor_ver_wanted -= 1;
		uc_fw->user_overridden = user;
330
	} else if (user && i915_inject_probe_error(i915, e)) {
331 332 333 334 335 336 337
		/* officially unsupported platform */
		uc_fw->major_ver_wanted = 0;
		uc_fw->minor_ver_wanted = 0;
		uc_fw->user_overridden = true;
	}
}

338 339
static int check_gsc_manifest(const struct firmware *fw,
			      struct intel_uc_fw *uc_fw)
340
{
341 342
	u32 *dw = (u32 *)fw->data;
	u32 version = dw[HUC_GSC_VERSION_DW];
343

344 345
	uc_fw->major_ver_found = FIELD_GET(HUC_GSC_MAJOR_VER_MASK, version);
	uc_fw->minor_ver_found = FIELD_GET(HUC_GSC_MINOR_VER_MASK, version);
346

347 348
	return 0;
}
349

350 351 352 353 354 355
static int check_ccs_header(struct drm_i915_private *i915,
			    const struct firmware *fw,
			    struct intel_uc_fw *uc_fw)
{
	struct uc_css_header *css;
	size_t size;
356 357

	/* Check the size of the blob before examining buffer contents */
358
	if (unlikely(fw->size < sizeof(struct uc_css_header))) {
359
		drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu < %zu\n",
360
			 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
361
			 fw->size, sizeof(struct uc_css_header));
362
		return -ENODATA;
363 364 365 366
	}

	css = (struct uc_css_header *)fw->data;

367 368 369
	/* Check integrity of size values inside CSS header */
	size = (css->header_size_dw - css->key_size_dw - css->modulus_size_dw -
		css->exponent_size_dw) * sizeof(u32);
370
	if (unlikely(size != sizeof(struct uc_css_header))) {
371
		drm_warn(&i915->drm,
372 373 374
			 "%s firmware %s: unexpected header size: %zu != %zu\n",
			 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
			 fw->size, sizeof(struct uc_css_header));
375
		return -EPROTO;
376 377
	}

378
	/* uCode size must calculated from other sizes */
379 380 381 382 383 384
	uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);

	/* now RSA */
	uc_fw->rsa_size = css->key_size_dw * sizeof(u32);

	/* At least, it should have header, uCode and RSA. Size of all three. */
385
	size = sizeof(struct uc_css_header) + uc_fw->ucode_size + uc_fw->rsa_size;
386
	if (unlikely(fw->size < size)) {
387
		drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu < %zu\n",
388 389
			 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
			 fw->size, size);
390
		return -ENOEXEC;
391 392
	}

393 394 395
	/* Sanity check whether this fw is not larger than whole WOPCM memory */
	size = __intel_uc_fw_get_upload_size(uc_fw);
	if (unlikely(size >= i915->wopcm.size)) {
396
		drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu > %zu\n",
397 398
			 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
			 size, (size_t)i915->wopcm.size);
399
		return -E2BIG;
400 401
	}

402
	/* Get version numbers from the CSS header */
403 404 405 406
	uc_fw->major_ver_found = FIELD_GET(CSS_SW_VERSION_UC_MAJOR,
					   css->sw_version);
	uc_fw->minor_ver_found = FIELD_GET(CSS_SW_VERSION_UC_MINOR,
					   css->sw_version);
407

408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439
	if (uc_fw->type == INTEL_UC_FW_TYPE_GUC)
		uc_fw->private_data_size = css->private_data_size;

	return 0;
}

/**
 * intel_uc_fw_fetch - fetch uC firmware
 * @uc_fw: uC firmware
 *
 * Fetch uC firmware into GEM obj.
 *
 * Return: 0 on success, a negative errno code on failure.
 */
int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
{
	struct drm_i915_private *i915 = __uc_fw_to_gt(uc_fw)->i915;
	struct device *dev = i915->drm.dev;
	struct drm_i915_gem_object *obj;
	const struct firmware *fw = NULL;
	int err;

	GEM_BUG_ON(!i915->wopcm.size);
	GEM_BUG_ON(!intel_uc_fw_is_enabled(uc_fw));

	err = i915_inject_probe_error(i915, -ENXIO);
	if (err)
		goto fail;

	__force_fw_fetch_failures(uc_fw, -EINVAL);
	__force_fw_fetch_failures(uc_fw, -ESTALE);

440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457
	err = firmware_request_nowarn(&fw, uc_fw->path, dev);
	if (err && !intel_uc_fw_is_overridden(uc_fw) && uc_fw->fallback.path) {
		err = firmware_request_nowarn(&fw, uc_fw->fallback.path, dev);
		if (!err) {
			drm_notice(&i915->drm,
				   "%s firmware %s is recommended, but only %s was found\n",
				   intel_uc_fw_type_repr(uc_fw->type),
				   uc_fw->wanted_path,
				   uc_fw->fallback.path);
			drm_info(&i915->drm,
				 "Consider updating your linux-firmware pkg or downloading from %s\n",
				 INTEL_UC_FIRMWARE_URL);

			uc_fw->path = uc_fw->fallback.path;
			uc_fw->major_ver_wanted = uc_fw->fallback.major_ver;
			uc_fw->minor_ver_wanted = uc_fw->fallback.minor_ver;
		}
	}
458 459 460 461 462 463 464 465 466 467
	if (err)
		goto fail;

	if (uc_fw->loaded_via_gsc)
		err = check_gsc_manifest(fw, uc_fw);
	else
		err = check_ccs_header(i915, fw, uc_fw);
	if (err)
		goto fail;

468 469
	if (uc_fw->major_ver_found != uc_fw->major_ver_wanted ||
	    uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) {
470
		drm_notice(&i915->drm, "%s firmware %s: unexpected version: %u.%u != %u.%u\n",
471 472 473 474 475 476 477
			   intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
			   uc_fw->major_ver_found, uc_fw->minor_ver_found,
			   uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
		if (!intel_uc_fw_is_overridden(uc_fw)) {
			err = -ENOEXEC;
			goto fail;
		}
478 479
	}

480
	if (HAS_LMEM(i915)) {
481
		obj = i915_gem_object_create_lmem_from_data(i915, fw->data, fw->size);
482 483 484
		if (!IS_ERR(obj))
			obj->flags |= I915_BO_ALLOC_PM_EARLY;
	} else {
485
		obj = i915_gem_object_create_shmem_from_data(i915, fw->data, fw->size);
486
	}
487

488 489 490 491 492 493 494
	if (IS_ERR(obj)) {
		err = PTR_ERR(obj);
		goto fail;
	}

	uc_fw->obj = obj;
	uc_fw->size = fw->size;
495
	intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_AVAILABLE);
496

497
	release_firmware(fw);
498
	return 0;
499 500

fail:
501 502 503
	intel_uc_fw_change_status(uc_fw, err == -ENOENT ?
				  INTEL_UC_FIRMWARE_MISSING :
				  INTEL_UC_FIRMWARE_ERROR);
504

505 506
	i915_probe_error(i915, "%s firmware %s: fetch failed with error %d\n",
			 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err);
507
	drm_info(&i915->drm, "%s firmware(s) can be downloaded from %s\n",
508
		 intel_uc_fw_type_repr(uc_fw->type), INTEL_UC_FIRMWARE_URL);
509 510

	release_firmware(fw);		/* OK even if fw is NULL */
511
	return err;
512 513
}

514
static u32 uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw)
515
{
516
	struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt;
517 518
	struct drm_mm_node *node = &ggtt->uc_fw;

519
	GEM_BUG_ON(!drm_mm_node_allocated(node));
520 521 522 523 524 525
	GEM_BUG_ON(upper_32_bits(node->start));
	GEM_BUG_ON(upper_32_bits(node->start + node->size - 1));

	return lower_32_bits(node->start);
}

526
static void uc_fw_bind_ggtt(struct intel_uc_fw *uc_fw)
527 528
{
	struct drm_i915_gem_object *obj = uc_fw->obj;
529
	struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt;
530
	struct i915_vma_resource *dummy = &uc_fw->dummy;
531
	u32 pte_flags = 0;
532

533 534 535
	dummy->start = uc_fw_ggtt_offset(uc_fw);
	dummy->node_size = obj->base.size;
	dummy->bi.pages = obj->mm.pages;
536 537

	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
538
	GEM_BUG_ON(dummy->node_size > ggtt->uc_fw.size);
539 540

	/* uc_fw->obj cache domains were not controlled across suspend */
541
	if (i915_gem_object_has_struct_page(obj))
542
		drm_clflush_sg(dummy->bi.pages);
543 544 545

	if (i915_gem_object_is_lmem(obj))
		pte_flags |= PTE_LM;
546

547 548 549 550
	if (ggtt->vm.raw_insert_entries)
		ggtt->vm.raw_insert_entries(&ggtt->vm, dummy, I915_CACHE_NONE, pte_flags);
	else
		ggtt->vm.insert_entries(&ggtt->vm, dummy, I915_CACHE_NONE, pte_flags);
551 552
}

553
static void uc_fw_unbind_ggtt(struct intel_uc_fw *uc_fw)
554 555
{
	struct drm_i915_gem_object *obj = uc_fw->obj;
556
	struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt;
557
	u64 start = uc_fw_ggtt_offset(uc_fw);
558 559 560 561

	ggtt->vm.clear_range(&ggtt->vm, start, obj->base.size);
}

562
static int uc_fw_xfer(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
563
{
564
	struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
565 566 567 568
	struct intel_uncore *uncore = gt->uncore;
	u64 offset;
	int ret;

569
	ret = i915_inject_probe_error(gt->i915, -ETIMEDOUT);
570 571 572
	if (ret)
		return ret;

573 574 575
	intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);

	/* Set the source address for the uCode */
576
	offset = uc_fw_ggtt_offset(uc_fw);
577 578 579 580 581
	GEM_BUG_ON(upper_32_bits(offset) & 0xFFFF0000);
	intel_uncore_write_fw(uncore, DMA_ADDR_0_LOW, lower_32_bits(offset));
	intel_uncore_write_fw(uncore, DMA_ADDR_0_HIGH, upper_32_bits(offset));

	/* Set the DMA destination */
582
	intel_uncore_write_fw(uncore, DMA_ADDR_1_LOW, dst_offset);
583 584 585 586 587 588 589
	intel_uncore_write_fw(uncore, DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);

	/*
	 * Set the transfer size. The header plus uCode will be copied to WOPCM
	 * via DMA, excluding any other components
	 */
	intel_uncore_write_fw(uncore, DMA_COPY_SIZE,
590
			      sizeof(struct uc_css_header) + uc_fw->ucode_size);
591 592 593 594 595 596 597 598

	/* Start the DMA */
	intel_uncore_write_fw(uncore, DMA_CTRL,
			      _MASKED_BIT_ENABLE(dma_flags | START_DMA));

	/* Wait for DMA to finish */
	ret = intel_wait_for_register_fw(uncore, DMA_CTRL, START_DMA, 0, 100);
	if (ret)
599
		drm_err(&gt->i915->drm, "DMA for %s fw failed, DMA_CTRL=%u\n",
600 601 602 603 604 605 606 607 608 609 610
			intel_uc_fw_type_repr(uc_fw->type),
			intel_uncore_read_fw(uncore, DMA_CTRL));

	/* Disable the bits once DMA is over */
	intel_uncore_write_fw(uncore, DMA_CTRL, _MASKED_BIT_DISABLE(dma_flags));

	intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);

	return ret;
}

611 612 613
/**
 * intel_uc_fw_upload - load uC firmware using custom loader
 * @uc_fw: uC firmware
614
 * @dst_offset: destination offset
615
 * @dma_flags: flags for flags for dma ctrl
616
 *
617
 * Loads uC firmware and updates internal flags.
618 619
 *
 * Return: 0 on success, non-zero on failure.
620
 */
621
int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
622
{
623
	struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
624 625
	int err;

626 627
	/* make sure the status was cleared the last time we reset the uc */
	GEM_BUG_ON(intel_uc_fw_is_loaded(uc_fw));
628

629
	err = i915_inject_probe_error(gt->i915, -ENOEXEC);
630 631 632
	if (err)
		return err;

633
	if (!intel_uc_fw_is_loadable(uc_fw))
634
		return -ENOEXEC;
635

636
	/* Call custom loader */
637 638 639
	uc_fw_bind_ggtt(uc_fw);
	err = uc_fw_xfer(uc_fw, dst_offset, dma_flags);
	uc_fw_unbind_ggtt(uc_fw);
640 641 642
	if (err)
		goto fail;

643
	intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_TRANSFERRED);
644 645 646
	return 0;

fail:
647 648 649
	i915_probe_error(gt->i915, "Failed to load %s firmware %s (%d)\n",
			 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
			 err);
650
	intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_LOAD_FAIL);
651 652 653
	return err;
}

654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722
static inline bool uc_fw_need_rsa_in_memory(struct intel_uc_fw *uc_fw)
{
	/*
	 * The HW reads the GuC RSA from memory if the key size is > 256 bytes,
	 * while it reads it from the 64 RSA registers if it is smaller.
	 * The HuC RSA is always read from memory.
	 */
	return uc_fw->type == INTEL_UC_FW_TYPE_HUC || uc_fw->rsa_size > 256;
}

static int uc_fw_rsa_data_create(struct intel_uc_fw *uc_fw)
{
	struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
	struct i915_vma *vma;
	size_t copied;
	void *vaddr;
	int err;

	err = i915_inject_probe_error(gt->i915, -ENXIO);
	if (err)
		return err;

	if (!uc_fw_need_rsa_in_memory(uc_fw))
		return 0;

	/*
	 * uC firmwares will sit above GUC_GGTT_TOP and will not map through
	 * GGTT. Unfortunately, this means that the GuC HW cannot perform the uC
	 * authentication from memory, as the RSA offset now falls within the
	 * GuC inaccessible range. We resort to perma-pinning an additional vma
	 * within the accessible range that only contains the RSA signature.
	 * The GuC HW can use this extra pinning to perform the authentication
	 * since its GGTT offset will be GuC accessible.
	 */
	GEM_BUG_ON(uc_fw->rsa_size > PAGE_SIZE);
	vma = intel_guc_allocate_vma(&gt->uc.guc, PAGE_SIZE);
	if (IS_ERR(vma))
		return PTR_ERR(vma);

	vaddr = i915_gem_object_pin_map_unlocked(vma->obj,
						 i915_coherent_map_type(gt->i915, vma->obj, true));
	if (IS_ERR(vaddr)) {
		i915_vma_unpin_and_release(&vma, 0);
		err = PTR_ERR(vaddr);
		goto unpin_out;
	}

	copied = intel_uc_fw_copy_rsa(uc_fw, vaddr, vma->size);
	i915_gem_object_unpin_map(vma->obj);

	if (copied < uc_fw->rsa_size) {
		err = -ENOMEM;
		goto unpin_out;
	}

	uc_fw->rsa_data = vma;

	return 0;

unpin_out:
	i915_vma_unpin_and_release(&vma, 0);
	return err;
}

static void uc_fw_rsa_data_destroy(struct intel_uc_fw *uc_fw)
{
	i915_vma_unpin_and_release(&uc_fw->rsa_data, 0);
}

723 724 725 726
int intel_uc_fw_init(struct intel_uc_fw *uc_fw)
{
	int err;

727 728 729 730
	/* this should happen before the load! */
	GEM_BUG_ON(intel_uc_fw_is_loaded(uc_fw));

	if (!intel_uc_fw_is_available(uc_fw))
731 732
		return -ENOEXEC;

733
	err = i915_gem_object_pin_pages_unlocked(uc_fw->obj);
734
	if (err) {
735 736
		DRM_DEBUG_DRIVER("%s fw pin-pages err=%d\n",
				 intel_uc_fw_type_repr(uc_fw->type), err);
737
		goto out;
738
	}
739

740 741 742 743 744 745 746 747 748 749 750 751 752
	err = uc_fw_rsa_data_create(uc_fw);
	if (err) {
		DRM_DEBUG_DRIVER("%s fw rsa data creation failed, err=%d\n",
				 intel_uc_fw_type_repr(uc_fw->type), err);
		goto out_unpin;
	}

	return 0;

out_unpin:
	i915_gem_object_unpin_pages(uc_fw->obj);
out:
	intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_INIT_FAIL);
753 754 755 756 757
	return err;
}

void intel_uc_fw_fini(struct intel_uc_fw *uc_fw)
{
758 759
	uc_fw_rsa_data_destroy(uc_fw);

760 761 762 763
	if (i915_gem_object_has_pinned_pages(uc_fw->obj))
		i915_gem_object_unpin_pages(uc_fw->obj);

	intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_AVAILABLE);
764 765
}

766
/**
767
 * intel_uc_fw_cleanup_fetch - cleanup uC firmware
768 769 770 771
 * @uc_fw: uC firmware
 *
 * Cleans up uC firmware by releasing the firmware GEM obj.
 */
772
void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw)
773
{
774 775
	if (!intel_uc_fw_is_available(uc_fw))
		return;
776

777
	i915_gem_object_put(fetch_and_zero(&uc_fw->obj));
778

779
	intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_SELECTED);
780
}
781

782 783 784 785 786 787 788 789 790 791 792
/**
 * intel_uc_fw_copy_rsa - copy fw RSA to buffer
 *
 * @uc_fw: uC firmware
 * @dst: dst buffer
 * @max_len: max number of bytes to copy
 *
 * Return: number of copied bytes.
 */
size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len)
{
793
	struct intel_memory_region *mr = uc_fw->obj->mm.region;
794
	u32 size = min_t(u32, uc_fw->rsa_size, max_len);
795
	u32 offset = sizeof(struct uc_css_header) + uc_fw->ucode_size;
796 797 798
	struct sgt_iter iter;
	size_t count = 0;
	int idx;
799

800
	/* Called during reset handling, must be atomic [no fs_reclaim] */
801 802
	GEM_BUG_ON(!intel_uc_fw_is_available(uc_fw));

803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854
	idx = offset >> PAGE_SHIFT;
	offset = offset_in_page(offset);
	if (i915_gem_object_has_struct_page(uc_fw->obj)) {
		struct page *page;

		for_each_sgt_page(page, iter, uc_fw->obj->mm.pages) {
			u32 len = min_t(u32, size, PAGE_SIZE - offset);
			void *vaddr;

			if (idx > 0) {
				idx--;
				continue;
			}

			vaddr = kmap_atomic(page);
			memcpy(dst, vaddr + offset, len);
			kunmap_atomic(vaddr);

			offset = 0;
			dst += len;
			size -= len;
			count += len;
			if (!size)
				break;
		}
	} else {
		dma_addr_t addr;

		for_each_sgt_daddr(addr, iter, uc_fw->obj->mm.pages) {
			u32 len = min_t(u32, size, PAGE_SIZE - offset);
			void __iomem *vaddr;

			if (idx > 0) {
				idx--;
				continue;
			}

			vaddr = io_mapping_map_atomic_wc(&mr->iomap,
							 addr - mr->region.start);
			memcpy_fromio(dst, vaddr + offset, len);
			io_mapping_unmap_atomic(vaddr);

			offset = 0;
			dst += len;
			size -= len;
			count += len;
			if (!size)
				break;
		}
	}

	return count;
855 856
}

857 858 859 860 861 862 863
/**
 * intel_uc_fw_dump - dump information about uC firmware
 * @uc_fw: uC firmware
 * @p: the &drm_printer
 *
 * Pretty printer for uC firmware.
 */
864
void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p)
865 866
{
	drm_printf(p, "%s firmware: %s\n",
867 868 869 870 871 872 873
		   intel_uc_fw_type_repr(uc_fw->type), uc_fw->wanted_path);
	if (uc_fw->fallback.path) {
		drm_printf(p, "%s firmware fallback: %s\n",
			   intel_uc_fw_type_repr(uc_fw->type), uc_fw->fallback.path);
		drm_printf(p, "fallback selected: %s\n",
			   str_yes_no(uc_fw->path == uc_fw->fallback.path));
	}
874 875
	drm_printf(p, "\tstatus: %s\n",
		   intel_uc_fw_status_repr(uc_fw->status));
876 877 878
	drm_printf(p, "\tversion: wanted %u.%u, found %u.%u\n",
		   uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted,
		   uc_fw->major_ver_found, uc_fw->minor_ver_found);
879
	drm_printf(p, "\tuCode: %u bytes\n", uc_fw->ucode_size);
880
	drm_printf(p, "\tRSA: %u bytes\n", uc_fw->rsa_size);
881
}