amd.c 18.3 KB
Newer Older
1 2
/*
 *  AMD CPU Microcode Update Driver for Linux
3 4 5 6
 *
 *  This driver allows to upgrade microcode on F10h AMD
 *  CPUs and later.
 *
7
 *  Copyright (C) 2008-2011 Advanced Micro Devices Inc.
8
 *	          2013-2016 Borislav Petkov <bp@alien8.de>
9 10 11 12 13 14
 *
 *  Author: Peter Oruba <peter.oruba@amd.com>
 *
 *  Based on work by:
 *  Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
 *
15 16 17 18 19
 *  early loader:
 *  Copyright (C) 2013 Advanced Micro Devices, Inc.
 *
 *  Author: Jacob Shin <jacob.shin@amd.com>
 *  Fixes: Borislav Petkov <bp@suse.de>
20
 *
21
 *  Licensed under the terms of the GNU General Public
22
 *  License version 2. See file COPYING for details.
I
Ingo Molnar 已提交
23
 */
24
#define pr_fmt(fmt) "microcode: " fmt
25

26
#include <linux/earlycpio.h>
I
Ingo Molnar 已提交
27 28 29
#include <linux/firmware.h>
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
30
#include <linux/initrd.h>
I
Ingo Molnar 已提交
31
#include <linux/kernel.h>
32 33
#include <linux/pci.h>

34
#include <asm/microcode_amd.h>
35
#include <asm/microcode.h>
I
Ingo Molnar 已提交
36
#include <asm/processor.h>
37 38
#include <asm/setup.h>
#include <asm/cpu.h>
I
Ingo Molnar 已提交
39
#include <asm/msr.h>
40

D
Dmitry Adamushko 已提交
41
static struct equiv_cpu_entry *equiv_cpu_table;
42

43 44
/*
 * This points to the current valid container of microcode patches which we will
45 46
 * save from the initrd/builtin before jettisoning its contents. @mc is the
 * microcode patch we found to match.
47
 */
48
struct cont_desc {
49
	struct microcode_amd *mc;
50
	u32		     cpuid_1_eax;
51 52 53 54
	u32		     psize;
	u16		     eq_id;
	u8		     *data;
	size_t		     size;
55
};
56 57

static u32 ucode_new_rev;
58
static u8 amd_ucode_patch[PATCH_MAX_SIZE];
59

60 61 62 63 64 65
/*
 * Microcode patch container file is prepended to the initrd in cpio
 * format. See Documentation/x86/early-microcode.txt
 */
static const char
ucode_path[] __maybe_unused = "kernel/x86/microcode/AuthenticAMD.bin";
66

67
static u16 find_equiv_id(struct equiv_cpu_entry *equiv_table, u32 sig)
68
{
69 70 71
	for (; equiv_table && equiv_table->installed_cpu; equiv_table++) {
		if (sig == equiv_table->installed_cpu)
			return equiv_table->equiv_cpu;
72
	}
73

74 75 76
	return 0;
}

77
/*
78
 * This scans the ucode blob for the proper container as we can have multiple
79 80
 * containers glued together. Returns the equivalence ID from the equivalence
 * table or 0 if none found.
81 82
 * Returns the amount of bytes consumed while scanning. @desc contains all the
 * data we're going to use in later stages of the application.
83
 */
84
static ssize_t parse_container(u8 *ucode, ssize_t size, struct cont_desc *desc)
85 86
{
	struct equiv_cpu_entry *eq;
87 88 89 90 91 92 93 94 95 96 97 98
	ssize_t orig_size = size;
	u32 *hdr = (u32 *)ucode;
	u16 eq_id;
	u8 *buf;

	/* Am I looking at an equivalence table header? */
	if (hdr[0] != UCODE_MAGIC ||
	    hdr[1] != UCODE_EQUIV_CPU_TABLE_TYPE ||
	    hdr[2] == 0) {
		desc->eq_id = 0;
		return CONTAINER_HDR_SZ;
	}
99

100
	buf = ucode;
101

102
	eq = (struct equiv_cpu_entry *)(buf + CONTAINER_HDR_SZ);
103

104
	/* Find the equivalence ID of our CPU in this table: */
105
	eq_id = find_equiv_id(eq, desc->cpuid_1_eax);
106

107 108
	buf  += hdr[2] + CONTAINER_HDR_SZ;
	size -= hdr[2] + CONTAINER_HDR_SZ;
109

110 111 112 113 114 115 116
	/*
	 * Scan through the rest of the container to find where it ends. We do
	 * some basic sanity-checking too.
	 */
	while (size > 0) {
		struct microcode_amd *mc;
		u32 patch_size;
117

118
		hdr = (u32 *)buf;
119

120 121
		if (hdr[0] != UCODE_UCODE_TYPE)
			break;
122

123 124 125 126
		/* Sanity-check patch size. */
		patch_size = hdr[1];
		if (patch_size > PATCH_MAX_SIZE)
			break;
127

128 129 130
		/* Skip patch section header: */
		buf  += SECTION_HDR_SIZE;
		size -= SECTION_HDR_SIZE;
131

132 133 134 135
		mc = (struct microcode_amd *)buf;
		if (eq_id == mc->hdr.processor_rev_id) {
			desc->psize = patch_size;
			desc->mc = mc;
136 137
		}

138 139
		buf  += patch_size;
		size -= patch_size;
140 141
	}

142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175
	/*
	 * If we have found a patch (desc->mc), it means we're looking at the
	 * container which has a patch for this CPU so return 0 to mean, @ucode
	 * already points to the proper container. Otherwise, we return the size
	 * we scanned so that we can advance to the next container in the
	 * buffer.
	 */
	if (desc->mc) {
		desc->eq_id = eq_id;
		desc->data  = ucode;
		desc->size  = orig_size - size;

		return 0;
	}

	return orig_size - size;
}

/*
 * Scan the ucode blob for the proper container as we can have multiple
 * containers glued together.
 */
static void scan_containers(u8 *ucode, size_t size, struct cont_desc *desc)
{
	ssize_t rem = size;

	while (rem >= 0) {
		ssize_t s = parse_container(ucode, rem, desc);
		if (!s)
			return;

		ucode += s;
		rem   -= s;
	}
176 177
}

178
static int __apply_microcode_amd(struct microcode_amd *mc)
179 180 181
{
	u32 rev, dummy;

182
	native_wrmsrl(MSR_AMD64_PATCH_LOADER, (u64)(long)&mc->hdr.data_code);
183 184 185

	/* verify patch application was successful */
	native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
186
	if (rev != mc->hdr.patch_id)
187 188 189 190 191 192 193 194 195 196 197 198 199
		return -1;

	return 0;
}

/*
 * Early load occurs before we can vmalloc(). So we look for the microcode
 * patch container file in initrd, traverse equivalent cpu table, look for a
 * matching microcode patch, and update, all in initrd memory in place.
 * When vmalloc() is available for use later -- on 64-bit during first AP load,
 * and on 32-bit during save_microcode_in_initrd_amd() -- we can call
 * load_microcode_amd() to save equivalent cpu table and microcode patches in
 * kernel heap memory.
200
 *
201
 * Returns true if container found (sets @desc), false otherwise.
202
 */
203
static bool
204
apply_microcode_early_amd(u32 cpuid_1_eax, void *ucode, size_t size, bool save_patch)
205
{
206
	struct cont_desc desc = { 0 };
207
	u8 (*patch)[PATCH_MAX_SIZE];
208
	struct microcode_amd *mc;
209
	u32 rev, dummy, *new_rev;
210
	bool ret = false;
211 212 213 214 215 216 217 218

#ifdef CONFIG_X86_32
	new_rev = (u32 *)__pa_nodebug(&ucode_new_rev);
	patch	= (u8 (*)[PATCH_MAX_SIZE])__pa_nodebug(&amd_ucode_patch);
#else
	new_rev = &ucode_new_rev;
	patch	= &amd_ucode_patch;
#endif
219

220 221
	desc.cpuid_1_eax = cpuid_1_eax;

222 223 224
	scan_containers(ucode, size, &desc);
	if (!desc.eq_id)
		return ret;
225

226 227 228
	mc = desc.mc;
	if (!mc)
		return ret;
229

230
	native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
231 232
	if (rev >= mc->hdr.patch_id)
		return ret;
233

234 235 236
	if (!__apply_microcode_amd(mc)) {
		*new_rev = mc->hdr.patch_id;
		ret      = true;
237

238 239
		if (save_patch)
			memcpy(patch, mc, min_t(u32, desc.psize, PATCH_MAX_SIZE));
240
	}
241

242
	return ret;
243 244
}

245
static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family)
246 247 248 249 250 251 252 253 254 255 256 257 258 259
{
#ifdef CONFIG_X86_64
	char fw_name[36] = "amd-ucode/microcode_amd.bin";

	if (family >= 0x15)
		snprintf(fw_name, sizeof(fw_name),
			 "amd-ucode/microcode_amd_fam%.2xh.bin", family);

	return get_builtin_firmware(cp, fw_name);
#else
	return false;
#endif
}

260
void __load_ucode_amd(unsigned int cpuid_1_eax, struct cpio_data *ret)
261
{
262
	struct ucode_cpu_info *uci;
263
	struct cpio_data cp;
264 265
	const char *path;
	bool use_pa;
266

267 268 269 270 271 272 273 274 275
	if (IS_ENABLED(CONFIG_X86_32)) {
		uci	= (struct ucode_cpu_info *)__pa_nodebug(ucode_cpu_info);
		path	= (const char *)__pa_nodebug(ucode_path);
		use_pa	= true;
	} else {
		uci     = ucode_cpu_info;
		path	= ucode_path;
		use_pa	= false;
	}
276

277
	if (!get_builtin_microcode(&cp, x86_family(cpuid_1_eax)))
278
		cp = find_microcode_in_initrd(path, use_pa);
279

280 281
	/* Needed in load_microcode_amd() */
	uci->cpu_sig.sig = cpuid_1_eax;
282

283
	*ret = cp;
284 285
}

286
void __init load_ucode_amd_bsp(unsigned int cpuid_1_eax)
287
{
288
	struct cpio_data cp = { };
289

290
	__load_ucode_amd(cpuid_1_eax, &cp);
291
	if (!(cp.data && cp.size))
292 293
		return;

294
	apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, true);
295
}
296

297
void load_ucode_amd_ap(unsigned int cpuid_1_eax)
298 299
{
	struct microcode_amd *mc;
300 301
	struct cpio_data cp;
	u32 *new_rev, rev, dummy;
302

303
	if (IS_ENABLED(CONFIG_X86_32)) {
304 305
		mc	= (struct microcode_amd *)__pa_nodebug(amd_ucode_patch);
		new_rev = (u32 *)__pa_nodebug(&ucode_new_rev);
306
	} else {
307 308
		mc	= (struct microcode_amd *)amd_ucode_patch;
		new_rev = &ucode_new_rev;
309 310
	}

311
	native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
312

313 314 315 316
	/* Check whether we have saved a new patch already: */
	if (*new_rev && rev < mc->hdr.patch_id) {
		if (!__apply_microcode_amd(mc)) {
			*new_rev = mc->hdr.patch_id;
317 318 319
			return;
		}
	}
320

321 322
	__load_ucode_amd(cpuid_1_eax, &cp);
	if (!(cp.data && cp.size))
323 324
		return;

325
	apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, false);
326 327
}

328 329 330
static enum ucode_state
load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size);

331
int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
332
{
333
	struct cont_desc desc = { 0 };
334
	enum ucode_state ret;
335
	struct cpio_data cp;
336

337 338 339
	cp = find_microcode_in_initrd(ucode_path, false);
	if (!(cp.data && cp.size))
		return -EINVAL;
340

341
	desc.cpuid_1_eax = cpuid_1_eax;
342

343 344 345
	scan_containers(cp.data, cp.size, &desc);
	if (!desc.eq_id)
		return -EINVAL;
346

347 348
	ret = load_microcode_amd(smp_processor_id(), x86_family(cpuid_1_eax),
				 desc.data, desc.size);
349
	if (ret != UCODE_OK)
350
		return -EINVAL;
351

352
	return 0;
353 354 355 356 357
}

void reload_ucode_amd(void)
{
	struct microcode_amd *mc;
358
	u32 rev, dummy;
359 360

	mc = (struct microcode_amd *)amd_ucode_patch;
361 362
	if (!mc)
		return;
363

364 365
	rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);

366
	if (rev < mc->hdr.patch_id) {
367 368
		if (!__apply_microcode_amd(mc)) {
			ucode_new_rev = mc->hdr.patch_id;
369
			pr_info("reload patch_level=0x%08x\n", ucode_new_rev);
370 371 372
		}
	}
}
373
static u16 __find_equiv_id(unsigned int cpu)
374 375
{
	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
376
	return find_equiv_id(equiv_cpu_table, uci->cpu_sig.sig);
377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392
}

static u32 find_cpu_family_by_equiv_cpu(u16 equiv_cpu)
{
	int i = 0;

	BUG_ON(!equiv_cpu_table);

	while (equiv_cpu_table[i].equiv_cpu != 0) {
		if (equiv_cpu == equiv_cpu_table[i].equiv_cpu)
			return equiv_cpu_table[i].installed_cpu;
		i++;
	}
	return 0;
}

393 394 395 396 397 398 399
/*
 * a small, trivial cache of per-family ucode patches
 */
static struct ucode_patch *cache_find_patch(u16 equiv_cpu)
{
	struct ucode_patch *p;

400
	list_for_each_entry(p, &microcode_cache, plist)
401 402 403 404 405 406 407 408 409
		if (p->equiv_cpu == equiv_cpu)
			return p;
	return NULL;
}

static void update_cache(struct ucode_patch *new_patch)
{
	struct ucode_patch *p;

410
	list_for_each_entry(p, &microcode_cache, plist) {
411 412 413 414 415 416 417 418 419 420 421 422
		if (p->equiv_cpu == new_patch->equiv_cpu) {
			if (p->patch_id >= new_patch->patch_id)
				/* we already have the latest patch */
				return;

			list_replace(&p->plist, &new_patch->plist);
			kfree(p->data);
			kfree(p);
			return;
		}
	}
	/* no patch found, add it */
423
	list_add_tail(&new_patch->plist, &microcode_cache);
424 425 426 427
}

static void free_cache(void)
{
428
	struct ucode_patch *p, *tmp;
429

430
	list_for_each_entry_safe(p, tmp, &microcode_cache, plist) {
431 432 433 434 435 436 437 438 439 440
		__list_del(p->plist.prev, p->plist.next);
		kfree(p->data);
		kfree(p);
	}
}

static struct ucode_patch *find_patch(unsigned int cpu)
{
	u16 equiv_id;

441
	equiv_id = __find_equiv_id(cpu);
442 443 444 445 446 447
	if (!equiv_id)
		return NULL;

	return cache_find_patch(equiv_id);
}

448
static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
449
{
450
	struct cpuinfo_x86 *c = &cpu_data(cpu);
451 452
	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
	struct ucode_patch *p;
453

454
	csig->sig = cpuid_eax(0x00000001);
455
	csig->rev = c->microcode;
456 457 458 459 460 461 462 463 464

	/*
	 * a patch could have been loaded early, set uci->mc so that
	 * mc_bp_resume() can call apply_microcode()
	 */
	p = find_patch(cpu);
	if (p && (p->patch_id == csig->rev))
		uci->mc = p->data;

465 466
	pr_info("CPU%d: patch_level=0x%08x\n", cpu, csig->rev);

467
	return 0;
468 469
}

470
static unsigned int verify_patch_size(u8 family, u32 patch_size,
471
				      unsigned int size)
472
{
473 474 475 476 477
	u32 max_size;

#define F1XH_MPB_MAX_SIZE 2048
#define F14H_MPB_MAX_SIZE 1824
#define F15H_MPB_MAX_SIZE 4096
478
#define F16H_MPB_MAX_SIZE 3458
479

480
	switch (family) {
481 482 483 484 485 486
	case 0x14:
		max_size = F14H_MPB_MAX_SIZE;
		break;
	case 0x15:
		max_size = F15H_MPB_MAX_SIZE;
		break;
487 488 489
	case 0x16:
		max_size = F16H_MPB_MAX_SIZE;
		break;
490 491 492 493 494 495 496 497 498 499 500 501 502
	default:
		max_size = F1XH_MPB_MAX_SIZE;
		break;
	}

	if (patch_size > min_t(u32, size, max_size)) {
		pr_err("patch size mismatch\n");
		return 0;
	}

	return patch_size;
}

503
static int apply_microcode_amd(int cpu)
504
{
505
	struct cpuinfo_x86 *c = &cpu_data(cpu);
506 507 508
	struct microcode_amd *mc_amd;
	struct ucode_cpu_info *uci;
	struct ucode_patch *p;
509
	u32 rev, dummy;
510 511

	BUG_ON(raw_smp_processor_id() != cpu);
512

513
	uci = ucode_cpu_info + cpu;
514

515 516
	p = find_patch(cpu);
	if (!p)
517
		return 0;
518

519 520 521
	mc_amd  = p->data;
	uci->mc = p->data;

522
	rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
523

524 525 526
	/* need to apply patch? */
	if (rev >= mc_amd->hdr.patch_id) {
		c->microcode = rev;
527
		uci->cpu_sig.rev = rev;
528 529 530
		return 0;
	}

531
	if (__apply_microcode_amd(mc_amd)) {
532
		pr_err("CPU%d: update failed for patch_level=0x%08x\n",
533
			cpu, mc_amd->hdr.patch_id);
534 535 536 537
		return -1;
	}
	pr_info("CPU%d: new patch_level=0x%08x\n", cpu,
		mc_amd->hdr.patch_id);
538

539 540
	uci->cpu_sig.rev = mc_amd->hdr.patch_id;
	c->microcode = mc_amd->hdr.patch_id;
541 542

	return 0;
543 544
}

545
static int install_equiv_cpu_table(const u8 *buf)
546
{
547 548 549
	unsigned int *ibuf = (unsigned int *)buf;
	unsigned int type = ibuf[1];
	unsigned int size = ibuf[2];
550

551
	if (type != UCODE_EQUIV_CPU_TABLE_TYPE || !size) {
552 553
		pr_err("empty section/"
		       "invalid type field in container file section header\n");
554
		return -EINVAL;
555 556
	}

557
	equiv_cpu_table = vmalloc(size);
558
	if (!equiv_cpu_table) {
559
		pr_err("failed to allocate equivalent CPU table\n");
560
		return -ENOMEM;
561 562
	}

563
	memcpy(equiv_cpu_table, buf + CONTAINER_HDR_SZ, size);
564

565 566
	/* add header length */
	return size + CONTAINER_HDR_SZ;
567 568
}

D
Dmitry Adamushko 已提交
569
static void free_equiv_cpu_table(void)
570
{
571 572
	vfree(equiv_cpu_table);
	equiv_cpu_table = NULL;
D
Dmitry Adamushko 已提交
573
}
574

575
static void cleanup(void)
D
Dmitry Adamushko 已提交
576
{
577 578 579 580 581 582 583 584 585 586 587
	free_equiv_cpu_table();
	free_cache();
}

/*
 * We return the current size even if some of the checks failed so that
 * we can skip over the next patch. If we return a negative value, we
 * signal a grave error like a memory allocation has failed and the
 * driver cannot continue functioning normally. In such cases, we tear
 * down everything we've used up so far and exit.
 */
588
static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover)
589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608
{
	struct microcode_header_amd *mc_hdr;
	struct ucode_patch *patch;
	unsigned int patch_size, crnt_size, ret;
	u32 proc_fam;
	u16 proc_id;

	patch_size  = *(u32 *)(fw + 4);
	crnt_size   = patch_size + SECTION_HDR_SIZE;
	mc_hdr	    = (struct microcode_header_amd *)(fw + SECTION_HDR_SIZE);
	proc_id	    = mc_hdr->processor_rev_id;

	proc_fam = find_cpu_family_by_equiv_cpu(proc_id);
	if (!proc_fam) {
		pr_err("No patch family for equiv ID: 0x%04x\n", proc_id);
		return crnt_size;
	}

	/* check if patch is for the current family */
	proc_fam = ((proc_fam >> 8) & 0xf) + ((proc_fam >> 20) & 0xff);
609
	if (proc_fam != family)
610 611 612 613 614 615 616 617
		return crnt_size;

	if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) {
		pr_err("Patch-ID 0x%08x: chipset-specific code unsupported.\n",
			mc_hdr->patch_id);
		return crnt_size;
	}

618
	ret = verify_patch_size(family, patch_size, leftover);
619 620 621 622 623 624 625 626 627 628 629
	if (!ret) {
		pr_err("Patch-ID 0x%08x: size mismatch.\n", mc_hdr->patch_id);
		return crnt_size;
	}

	patch = kzalloc(sizeof(*patch), GFP_KERNEL);
	if (!patch) {
		pr_err("Patch allocation failure.\n");
		return -EINVAL;
	}

630
	patch->data = kmemdup(fw + SECTION_HDR_SIZE, patch_size, GFP_KERNEL);
631 632 633 634 635 636 637 638 639 640
	if (!patch->data) {
		pr_err("Patch data allocation failure.\n");
		kfree(patch);
		return -EINVAL;
	}

	INIT_LIST_HEAD(&patch->plist);
	patch->patch_id  = mc_hdr->patch_id;
	patch->equiv_cpu = proc_id;

641 642 643
	pr_debug("%s: Added patch_id: 0x%08x, proc_id: 0x%04x\n",
		 __func__, patch->patch_id, proc_id);

644 645 646 647 648 649
	/* ... and add to cache. */
	update_cache(patch);

	return crnt_size;
}

650 651
static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
					     size_t size)
652 653 654 655 656
{
	enum ucode_state ret = UCODE_ERROR;
	unsigned int leftover;
	u8 *fw = (u8 *)data;
	int crnt_size = 0;
657
	int offset;
658

659
	offset = install_equiv_cpu_table(data);
660
	if (offset < 0) {
661
		pr_err("failed to create equivalent cpu table\n");
662
		return ret;
663
	}
664
	fw += offset;
D
Dmitry Adamushko 已提交
665 666
	leftover = size - offset;

667
	if (*(u32 *)fw != UCODE_UCODE_TYPE) {
668
		pr_err("invalid type field in container file section header\n");
669 670
		free_equiv_cpu_table();
		return ret;
671
	}
D
Dmitry Adamushko 已提交
672

673
	while (leftover) {
674
		crnt_size = verify_and_add_patch(family, fw, leftover);
675 676
		if (crnt_size < 0)
			return ret;
677

678 679
		fw	 += crnt_size;
		leftover -= crnt_size;
680
	}
D
Dmitry Adamushko 已提交
681

682
	return UCODE_OK;
D
Dmitry Adamushko 已提交
683 684
}

685 686
static enum ucode_state
load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size)
687 688 689 690 691 692
{
	enum ucode_state ret;

	/* free old equiv table */
	free_equiv_cpu_table();

693
	ret = __load_microcode_amd(family, data, size);
694 695 696 697

	if (ret != UCODE_OK)
		cleanup();

698
#ifdef CONFIG_X86_32
699
	/* save BSP's matching patch for early load */
700 701
	if (cpu_data(cpu).cpu_index == boot_cpu_data.cpu_index) {
		struct ucode_patch *p = find_patch(cpu);
702
		if (p) {
703 704 705
			memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
			memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data),
							       PATCH_MAX_SIZE));
706 707 708
		}
	}
#endif
709 710 711
	return ret;
}

712 713 714 715 716 717 718 719
/*
 * AMD microcode firmware naming convention, up to family 15h they are in
 * the legacy file:
 *
 *    amd-ucode/microcode_amd.bin
 *
 * This legacy file is always smaller than 2K in size.
 *
720
 * Beginning with family 15h, they are in family-specific firmware files:
721 722 723 724 725 726 727
 *
 *    amd-ucode/microcode_amd_fam15h.bin
 *    amd-ucode/microcode_amd_fam16h.bin
 *    ...
 *
 * These might be larger than 2K.
 */
728 729
static enum ucode_state request_microcode_amd(int cpu, struct device *device,
					      bool refresh_fw)
D
Dmitry Adamushko 已提交
730
{
731 732
	char fw_name[36] = "amd-ucode/microcode_amd.bin";
	struct cpuinfo_x86 *c = &cpu_data(cpu);
733 734 735 736 737 738
	enum ucode_state ret = UCODE_NFOUND;
	const struct firmware *fw;

	/* reload ucode container only on the boot cpu */
	if (!refresh_fw || c->cpu_index != boot_cpu_data.cpu_index)
		return UCODE_OK;
739 740 741

	if (c->x86 >= 0x15)
		snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86);
D
Dmitry Adamushko 已提交
742

743
	if (request_firmware_direct(&fw, (const char *)fw_name, device)) {
744
		pr_debug("failed to load file %s\n", fw_name);
745
		goto out;
746
	}
D
Dmitry Adamushko 已提交
747

748 749
	ret = UCODE_ERROR;
	if (*(u32 *)fw->data != UCODE_MAGIC) {
750
		pr_err("invalid magic value (0x%08x)\n", *(u32 *)fw->data);
751
		goto fw_release;
752 753
	}

754
	ret = load_microcode_amd(cpu, c->x86, fw->data, fw->size);
D
Dmitry Adamushko 已提交
755

756
 fw_release:
757
	release_firmware(fw);
758

759
 out:
D
Dmitry Adamushko 已提交
760 761 762
	return ret;
}

763 764
static enum ucode_state
request_microcode_user(int cpu, const void __user *buf, size_t size)
D
Dmitry Adamushko 已提交
765
{
766
	return UCODE_ERROR;
767 768 769 770 771 772
}

static void microcode_fini_cpu_amd(int cpu)
{
	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;

773
	uci->mc = NULL;
774 775 776
}

static struct microcode_ops microcode_amd_ops = {
D
Dmitry Adamushko 已提交
777
	.request_microcode_user           = request_microcode_user,
778
	.request_microcode_fw             = request_microcode_amd,
779 780 781 782 783
	.collect_cpu_info                 = collect_cpu_info_amd,
	.apply_microcode                  = apply_microcode_amd,
	.microcode_fini_cpu               = microcode_fini_cpu_amd,
};

784
struct microcode_ops * __init init_amd_microcode(void)
785
{
786
	struct cpuinfo_x86 *c = &boot_cpu_data;
787 788

	if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) {
789
		pr_warn("AMD CPU family 0x%x not supported\n", c->x86);
790 791 792
		return NULL;
	}

793 794 795 796
	if (ucode_new_rev)
		pr_info_once("microcode updated early to new patch_level=0x%08x\n",
			     ucode_new_rev);

797
	return &microcode_amd_ops;
798
}
799 800 801

void __exit exit_amd_microcode(void)
{
802
	cleanup();
803
}