amd.c 20.1 KB
Newer Older
1 2
/*
 *  AMD CPU Microcode Update Driver for Linux
3 4 5 6
 *
 *  This driver allows to upgrade microcode on F10h AMD
 *  CPUs and later.
 *
7
 *  Copyright (C) 2008-2011 Advanced Micro Devices Inc.
8
 *	          2013-2016 Borislav Petkov <bp@alien8.de>
9 10 11 12 13 14
 *
 *  Author: Peter Oruba <peter.oruba@amd.com>
 *
 *  Based on work by:
 *  Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
 *
15 16 17 18 19
 *  early loader:
 *  Copyright (C) 2013 Advanced Micro Devices, Inc.
 *
 *  Author: Jacob Shin <jacob.shin@amd.com>
 *  Fixes: Borislav Petkov <bp@suse.de>
20
 *
21
 *  Licensed under the terms of the GNU General Public
22
 *  License version 2. See file COPYING for details.
I
Ingo Molnar 已提交
23
 */
24
#define pr_fmt(fmt) "microcode: " fmt
25

26
#include <linux/earlycpio.h>
I
Ingo Molnar 已提交
27 28 29
#include <linux/firmware.h>
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
30
#include <linux/initrd.h>
I
Ingo Molnar 已提交
31
#include <linux/kernel.h>
32 33
#include <linux/pci.h>

34
#include <asm/microcode_amd.h>
35
#include <asm/microcode.h>
I
Ingo Molnar 已提交
36
#include <asm/processor.h>
37 38
#include <asm/setup.h>
#include <asm/cpu.h>
I
Ingo Molnar 已提交
39
#include <asm/msr.h>
40

D
Dmitry Adamushko 已提交
41
static struct equiv_cpu_entry *equiv_cpu_table;
42

43 44
/*
 * This points to the current valid container of microcode patches which we will
45 46
 * save from the initrd/builtin before jettisoning its contents. @mc is the
 * microcode patch we found to match.
47
 */
48 49
static struct cont_desc {
	struct microcode_amd *mc;
50
	u32		     cpuid_1_eax;
51 52 53 54
	u32		     psize;
	u16		     eq_id;
	u8		     *data;
	size_t		     size;
55
} cont;
56 57

static u32 ucode_new_rev;
58
static u8 amd_ucode_patch[PATCH_MAX_SIZE];
59

60 61 62 63 64 65
/*
 * Microcode patch container file is prepended to the initrd in cpio
 * format. See Documentation/x86/early-microcode.txt
 */
static const char
ucode_path[] __maybe_unused = "kernel/x86/microcode/AuthenticAMD.bin";
66

67
static u16 find_equiv_id(struct equiv_cpu_entry *equiv_table, u32 sig)
68
{
69 70 71
	for (; equiv_table && equiv_table->installed_cpu; equiv_table++) {
		if (sig == equiv_table->installed_cpu)
			return equiv_table->equiv_cpu;
72
	}
73

74 75 76
	return 0;
}

77
/*
78
 * This scans the ucode blob for the proper container as we can have multiple
79 80
 * containers glued together. Returns the equivalence ID from the equivalence
 * table or 0 if none found.
81 82
 * Returns the amount of bytes consumed while scanning. @desc contains all the
 * data we're going to use in later stages of the application.
83
 */
84
static ssize_t parse_container(u8 *ucode, ssize_t size, struct cont_desc *desc)
85 86
{
	struct equiv_cpu_entry *eq;
87 88 89 90 91 92 93 94 95 96 97 98
	ssize_t orig_size = size;
	u32 *hdr = (u32 *)ucode;
	u16 eq_id;
	u8 *buf;

	/* Am I looking at an equivalence table header? */
	if (hdr[0] != UCODE_MAGIC ||
	    hdr[1] != UCODE_EQUIV_CPU_TABLE_TYPE ||
	    hdr[2] == 0) {
		desc->eq_id = 0;
		return CONTAINER_HDR_SZ;
	}
99

100
	buf = ucode;
101

102
	eq = (struct equiv_cpu_entry *)(buf + CONTAINER_HDR_SZ);
103

104
	/* Find the equivalence ID of our CPU in this table: */
105
	eq_id = find_equiv_id(eq, desc->cpuid_1_eax);
106

107 108
	buf  += hdr[2] + CONTAINER_HDR_SZ;
	size -= hdr[2] + CONTAINER_HDR_SZ;
109

110 111 112 113 114 115 116
	/*
	 * Scan through the rest of the container to find where it ends. We do
	 * some basic sanity-checking too.
	 */
	while (size > 0) {
		struct microcode_amd *mc;
		u32 patch_size;
117

118
		hdr = (u32 *)buf;
119

120 121
		if (hdr[0] != UCODE_UCODE_TYPE)
			break;
122

123 124 125 126
		/* Sanity-check patch size. */
		patch_size = hdr[1];
		if (patch_size > PATCH_MAX_SIZE)
			break;
127

128 129 130
		/* Skip patch section header: */
		buf  += SECTION_HDR_SIZE;
		size -= SECTION_HDR_SIZE;
131

132 133 134 135
		mc = (struct microcode_amd *)buf;
		if (eq_id == mc->hdr.processor_rev_id) {
			desc->psize = patch_size;
			desc->mc = mc;
136 137
		}

138 139
		buf  += patch_size;
		size -= patch_size;
140 141
	}

142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175
	/*
	 * If we have found a patch (desc->mc), it means we're looking at the
	 * container which has a patch for this CPU so return 0 to mean, @ucode
	 * already points to the proper container. Otherwise, we return the size
	 * we scanned so that we can advance to the next container in the
	 * buffer.
	 */
	if (desc->mc) {
		desc->eq_id = eq_id;
		desc->data  = ucode;
		desc->size  = orig_size - size;

		return 0;
	}

	return orig_size - size;
}

/*
 * Scan the ucode blob for the proper container as we can have multiple
 * containers glued together.
 */
static void scan_containers(u8 *ucode, size_t size, struct cont_desc *desc)
{
	ssize_t rem = size;

	while (rem >= 0) {
		ssize_t s = parse_container(ucode, rem, desc);
		if (!s)
			return;

		ucode += s;
		rem   -= s;
	}
176 177
}

178
static int __apply_microcode_amd(struct microcode_amd *mc)
179 180 181
{
	u32 rev, dummy;

182
	native_wrmsrl(MSR_AMD64_PATCH_LOADER, (u64)(long)&mc->hdr.data_code);
183 184 185

	/* verify patch application was successful */
	native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
186
	if (rev != mc->hdr.patch_id)
187 188 189 190 191 192 193 194 195 196 197 198 199
		return -1;

	return 0;
}

/*
 * Early load occurs before we can vmalloc(). So we look for the microcode
 * patch container file in initrd, traverse equivalent cpu table, look for a
 * matching microcode patch, and update, all in initrd memory in place.
 * When vmalloc() is available for use later -- on 64-bit during first AP load,
 * and on 32-bit during save_microcode_in_initrd_amd() -- we can call
 * load_microcode_amd() to save equivalent cpu table and microcode patches in
 * kernel heap memory.
200
 *
201
 * Returns true if container found (sets @desc), false otherwise.
202
 */
203 204 205
static bool
apply_microcode_early_amd(u32 cpuid_1_eax, void *ucode, size_t size,
			  bool save_patch, struct cont_desc *ret_desc)
206
{
207
	struct cont_desc desc = { 0 };
208
	u8 (*patch)[PATCH_MAX_SIZE];
209
	struct microcode_amd *mc;
210
	u32 rev, dummy, *new_rev;
211
	bool ret = false;
212 213 214 215 216 217 218 219

#ifdef CONFIG_X86_32
	new_rev = (u32 *)__pa_nodebug(&ucode_new_rev);
	patch	= (u8 (*)[PATCH_MAX_SIZE])__pa_nodebug(&amd_ucode_patch);
#else
	new_rev = &ucode_new_rev;
	patch	= &amd_ucode_patch;
#endif
220

221 222
	desc.cpuid_1_eax = cpuid_1_eax;

223 224 225
	scan_containers(ucode, size, &desc);
	if (!desc.eq_id)
		return ret;
226

227 228 229
	mc = desc.mc;
	if (!mc)
		return ret;
230

231
	native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
232 233
	if (rev >= mc->hdr.patch_id)
		return ret;
234

235 236 237
	if (!__apply_microcode_amd(mc)) {
		*new_rev = mc->hdr.patch_id;
		ret      = true;
238

239 240
		if (save_patch)
			memcpy(patch, mc, min_t(u32, desc.psize, PATCH_MAX_SIZE));
241
	}
242

243 244
	if (ret_desc)
		*ret_desc = desc;
245

246
	return ret;
247 248
}

249
static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family)
250 251 252 253 254 255 256 257 258 259 260 261 262 263
{
#ifdef CONFIG_X86_64
	char fw_name[36] = "amd-ucode/microcode_amd.bin";

	if (family >= 0x15)
		snprintf(fw_name, sizeof(fw_name),
			 "amd-ucode/microcode_amd_fam%.2xh.bin", family);

	return get_builtin_firmware(cp, fw_name);
#else
	return false;
#endif
}

264
void __init load_ucode_amd_bsp(unsigned int cpuid_1_eax)
265
{
266
	struct ucode_cpu_info *uci;
267
	struct cpio_data cp;
268 269
	const char *path;
	bool use_pa;
270

271 272 273 274 275 276 277 278 279
	if (IS_ENABLED(CONFIG_X86_32)) {
		uci	= (struct ucode_cpu_info *)__pa_nodebug(ucode_cpu_info);
		path	= (const char *)__pa_nodebug(ucode_path);
		use_pa	= true;
	} else {
		uci     = ucode_cpu_info;
		path	= ucode_path;
		use_pa	= false;
	}
280

281
	if (!get_builtin_microcode(&cp, x86_family(cpuid_1_eax)))
282
		cp = find_microcode_in_initrd(path, use_pa);
283 284 285

	if (!(cp.data && cp.size))
		return;
286

287 288
	/* Needed in load_microcode_amd() */
	uci->cpu_sig.sig = cpuid_1_eax;
289

290
	apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, true, NULL);
291 292 293 294 295
}

#ifdef CONFIG_X86_32
/*
 * On 32-bit, since AP's early load occurs before paging is turned on, we
296 297 298
 * cannot traverse cpu_equiv_table and microcode_cache in kernel heap memory.
 * So during cold boot, AP will apply_ucode_in_initrd() just like the BSP.
 * In save_microcode_in_initrd_amd() BSP's patch is copied to amd_ucode_patch,
299 300
 * which is used upon resume from suspend.
 */
301
void load_ucode_amd_ap(unsigned int cpuid_1_eax)
302 303
{
	struct microcode_amd *mc;
304
	struct cpio_data cp;
305 306 307 308 309 310 311

	mc = (struct microcode_amd *)__pa_nodebug(amd_ucode_patch);
	if (mc->hdr.patch_id && mc->hdr.processor_rev_id) {
		__apply_microcode_amd(mc);
		return;
	}

312
	if (!get_builtin_microcode(&cp, x86_family(cpuid_1_eax)))
313
		cp = find_microcode_in_initrd((const char *)__pa_nodebug(ucode_path), true);
314

315
	if (!(cp.data && cp.size))
316 317
		return;

318 319 320 321
	/*
	 * This would set amd_ucode_patch above so that the following APs can
	 * use it directly instead of going down this path again.
	 */
322
	apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, true, NULL);
323 324
}
#else
325
void load_ucode_amd_ap(unsigned int cpuid_1_eax)
326 327 328 329 330
{
	struct equiv_cpu_entry *eq;
	struct microcode_amd *mc;
	u16 eq_id;

331 332
	/* First AP hasn't cached it yet, go through the blob. */
	if (!cont.data) {
333
		struct cpio_data cp;
334 335 336 337 338

		if (cont.size == -1)
			return;

reget:
339
		if (!get_builtin_microcode(&cp, x86_family(cpuid_1_eax))) {
340 341
			cp = find_microcode_in_initrd(ucode_path, false);

342 343 344 345 346 347 348 349 350 351 352
			if (!(cp.data && cp.size)) {
				/*
				 * Mark it so that other APs do not scan again
				 * for no real reason and slow down boot
				 * needlessly.
				 */
				cont.size = -1;
				return;
			}
		}

353
		if (!apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, false, &cont)) {
354
			cont.data = NULL;
355 356 357 358
			cont.size = -1;
			return;
		}
	}
359

360
	eq  = (struct equiv_cpu_entry *)(cont.data + CONTAINER_HDR_SZ);
361

362
	eq_id = find_equiv_id(eq, cpuid_1_eax);
363 364 365
	if (!eq_id)
		return;

366
	if (eq_id == cont.eq_id) {
367 368 369 370
		u32 rev, dummy;

		native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);

371 372 373 374 375 376 377 378 379 380 381 382 383
		mc = (struct microcode_amd *)amd_ucode_patch;

		if (mc && rev < mc->hdr.patch_id) {
			if (!__apply_microcode_amd(mc))
				ucode_new_rev = mc->hdr.patch_id;
		}

	} else {

		/*
		 * AP has a different equivalence ID than BSP, looks like
		 * mixed-steppings silicon so go through the ucode blob anew.
		 */
384
		goto reget;
385 386
	}
}
387
#endif /* CONFIG_X86_32 */
388

389 390 391
static enum ucode_state
load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size);

392
int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
393 394
{
	enum ucode_state ret;
395
	int retval = 0;
396

397 398
	if (!cont.data) {
		if (IS_ENABLED(CONFIG_X86_32) && (cont.size != -1)) {
399
			struct cpio_data cp;
400

401
			cp = find_microcode_in_initrd(ucode_path, false);
402 403 404 405
			if (!(cp.data && cp.size)) {
				cont.size = -1;
				return -EINVAL;
			}
406

407 408
			cont.cpuid_1_eax = cpuid_1_eax;

409 410
			scan_containers(cp.data, cp.size, &cont);
			if (!cont.eq_id) {
411 412 413
				cont.size = -1;
				return -EINVAL;
			}
414

415 416 417
		} else
			return -EINVAL;
	}
418

419
	ret = load_microcode_amd(smp_processor_id(), x86_family(cpuid_1_eax), cont.data, cont.size);
420 421 422 423 424 425 426
	if (ret != UCODE_OK)
		retval = -EINVAL;

	/*
	 * This will be freed any msec now, stash patches for the current
	 * family and switch to patch cache for cpu hotplug, etc later.
	 */
427 428
	cont.data = NULL;
	cont.size = 0;
429 430 431 432 433 434 435

	return retval;
}

void reload_ucode_amd(void)
{
	struct microcode_amd *mc;
436
	u32 rev, dummy;
437 438

	mc = (struct microcode_amd *)amd_ucode_patch;
439 440
	if (!mc)
		return;
441

442 443
	rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);

444
	if (rev < mc->hdr.patch_id) {
445 446
		if (!__apply_microcode_amd(mc)) {
			ucode_new_rev = mc->hdr.patch_id;
447
			pr_info("reload patch_level=0x%08x\n", ucode_new_rev);
448 449 450
		}
	}
}
451
static u16 __find_equiv_id(unsigned int cpu)
452 453
{
	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
454
	return find_equiv_id(equiv_cpu_table, uci->cpu_sig.sig);
455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470
}

static u32 find_cpu_family_by_equiv_cpu(u16 equiv_cpu)
{
	int i = 0;

	BUG_ON(!equiv_cpu_table);

	while (equiv_cpu_table[i].equiv_cpu != 0) {
		if (equiv_cpu == equiv_cpu_table[i].equiv_cpu)
			return equiv_cpu_table[i].installed_cpu;
		i++;
	}
	return 0;
}

471 472 473 474 475 476 477
/*
 * a small, trivial cache of per-family ucode patches
 */
static struct ucode_patch *cache_find_patch(u16 equiv_cpu)
{
	struct ucode_patch *p;

478
	list_for_each_entry(p, &microcode_cache, plist)
479 480 481 482 483 484 485 486 487
		if (p->equiv_cpu == equiv_cpu)
			return p;
	return NULL;
}

static void update_cache(struct ucode_patch *new_patch)
{
	struct ucode_patch *p;

488
	list_for_each_entry(p, &microcode_cache, plist) {
489 490 491 492 493 494 495 496 497 498 499 500
		if (p->equiv_cpu == new_patch->equiv_cpu) {
			if (p->patch_id >= new_patch->patch_id)
				/* we already have the latest patch */
				return;

			list_replace(&p->plist, &new_patch->plist);
			kfree(p->data);
			kfree(p);
			return;
		}
	}
	/* no patch found, add it */
501
	list_add_tail(&new_patch->plist, &microcode_cache);
502 503 504 505
}

static void free_cache(void)
{
506
	struct ucode_patch *p, *tmp;
507

508
	list_for_each_entry_safe(p, tmp, &microcode_cache, plist) {
509 510 511 512 513 514 515 516 517 518
		__list_del(p->plist.prev, p->plist.next);
		kfree(p->data);
		kfree(p);
	}
}

static struct ucode_patch *find_patch(unsigned int cpu)
{
	u16 equiv_id;

519
	equiv_id = __find_equiv_id(cpu);
520 521 522 523 524 525
	if (!equiv_id)
		return NULL;

	return cache_find_patch(equiv_id);
}

526
static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
527
{
528
	struct cpuinfo_x86 *c = &cpu_data(cpu);
529 530
	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
	struct ucode_patch *p;
531

532
	csig->sig = cpuid_eax(0x00000001);
533
	csig->rev = c->microcode;
534 535 536 537 538 539 540 541 542

	/*
	 * a patch could have been loaded early, set uci->mc so that
	 * mc_bp_resume() can call apply_microcode()
	 */
	p = find_patch(cpu);
	if (p && (p->patch_id == csig->rev))
		uci->mc = p->data;

543 544
	pr_info("CPU%d: patch_level=0x%08x\n", cpu, csig->rev);

545
	return 0;
546 547
}

548
static unsigned int verify_patch_size(u8 family, u32 patch_size,
549
				      unsigned int size)
550
{
551 552 553 554 555
	u32 max_size;

#define F1XH_MPB_MAX_SIZE 2048
#define F14H_MPB_MAX_SIZE 1824
#define F15H_MPB_MAX_SIZE 4096
556
#define F16H_MPB_MAX_SIZE 3458
557

558
	switch (family) {
559 560 561 562 563 564
	case 0x14:
		max_size = F14H_MPB_MAX_SIZE;
		break;
	case 0x15:
		max_size = F15H_MPB_MAX_SIZE;
		break;
565 566 567
	case 0x16:
		max_size = F16H_MPB_MAX_SIZE;
		break;
568 569 570 571 572 573 574 575 576 577 578 579 580
	default:
		max_size = F1XH_MPB_MAX_SIZE;
		break;
	}

	if (patch_size > min_t(u32, size, max_size)) {
		pr_err("patch size mismatch\n");
		return 0;
	}

	return patch_size;
}

581
static int apply_microcode_amd(int cpu)
582
{
583
	struct cpuinfo_x86 *c = &cpu_data(cpu);
584 585 586
	struct microcode_amd *mc_amd;
	struct ucode_cpu_info *uci;
	struct ucode_patch *p;
587
	u32 rev, dummy;
588 589

	BUG_ON(raw_smp_processor_id() != cpu);
590

591
	uci = ucode_cpu_info + cpu;
592

593 594
	p = find_patch(cpu);
	if (!p)
595
		return 0;
596

597 598 599
	mc_amd  = p->data;
	uci->mc = p->data;

600
	rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
601

602 603 604
	/* need to apply patch? */
	if (rev >= mc_amd->hdr.patch_id) {
		c->microcode = rev;
605
		uci->cpu_sig.rev = rev;
606 607 608
		return 0;
	}

609
	if (__apply_microcode_amd(mc_amd)) {
610
		pr_err("CPU%d: update failed for patch_level=0x%08x\n",
611
			cpu, mc_amd->hdr.patch_id);
612 613 614 615
		return -1;
	}
	pr_info("CPU%d: new patch_level=0x%08x\n", cpu,
		mc_amd->hdr.patch_id);
616

617 618
	uci->cpu_sig.rev = mc_amd->hdr.patch_id;
	c->microcode = mc_amd->hdr.patch_id;
619 620

	return 0;
621 622
}

623
static int install_equiv_cpu_table(const u8 *buf)
624
{
625 626 627
	unsigned int *ibuf = (unsigned int *)buf;
	unsigned int type = ibuf[1];
	unsigned int size = ibuf[2];
628

629
	if (type != UCODE_EQUIV_CPU_TABLE_TYPE || !size) {
630 631
		pr_err("empty section/"
		       "invalid type field in container file section header\n");
632
		return -EINVAL;
633 634
	}

635
	equiv_cpu_table = vmalloc(size);
636
	if (!equiv_cpu_table) {
637
		pr_err("failed to allocate equivalent CPU table\n");
638
		return -ENOMEM;
639 640
	}

641
	memcpy(equiv_cpu_table, buf + CONTAINER_HDR_SZ, size);
642

643 644
	/* add header length */
	return size + CONTAINER_HDR_SZ;
645 646
}

D
Dmitry Adamushko 已提交
647
static void free_equiv_cpu_table(void)
648
{
649 650
	vfree(equiv_cpu_table);
	equiv_cpu_table = NULL;
D
Dmitry Adamushko 已提交
651
}
652

653
static void cleanup(void)
D
Dmitry Adamushko 已提交
654
{
655 656 657 658 659 660 661 662 663 664 665
	free_equiv_cpu_table();
	free_cache();
}

/*
 * We return the current size even if some of the checks failed so that
 * we can skip over the next patch. If we return a negative value, we
 * signal a grave error like a memory allocation has failed and the
 * driver cannot continue functioning normally. In such cases, we tear
 * down everything we've used up so far and exit.
 */
666
static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover)
667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686
{
	struct microcode_header_amd *mc_hdr;
	struct ucode_patch *patch;
	unsigned int patch_size, crnt_size, ret;
	u32 proc_fam;
	u16 proc_id;

	patch_size  = *(u32 *)(fw + 4);
	crnt_size   = patch_size + SECTION_HDR_SIZE;
	mc_hdr	    = (struct microcode_header_amd *)(fw + SECTION_HDR_SIZE);
	proc_id	    = mc_hdr->processor_rev_id;

	proc_fam = find_cpu_family_by_equiv_cpu(proc_id);
	if (!proc_fam) {
		pr_err("No patch family for equiv ID: 0x%04x\n", proc_id);
		return crnt_size;
	}

	/* check if patch is for the current family */
	proc_fam = ((proc_fam >> 8) & 0xf) + ((proc_fam >> 20) & 0xff);
687
	if (proc_fam != family)
688 689 690 691 692 693 694 695
		return crnt_size;

	if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) {
		pr_err("Patch-ID 0x%08x: chipset-specific code unsupported.\n",
			mc_hdr->patch_id);
		return crnt_size;
	}

696
	ret = verify_patch_size(family, patch_size, leftover);
697 698 699 700 701 702 703 704 705 706 707
	if (!ret) {
		pr_err("Patch-ID 0x%08x: size mismatch.\n", mc_hdr->patch_id);
		return crnt_size;
	}

	patch = kzalloc(sizeof(*patch), GFP_KERNEL);
	if (!patch) {
		pr_err("Patch allocation failure.\n");
		return -EINVAL;
	}

708
	patch->data = kmemdup(fw + SECTION_HDR_SIZE, patch_size, GFP_KERNEL);
709 710 711 712 713 714 715 716 717 718
	if (!patch->data) {
		pr_err("Patch data allocation failure.\n");
		kfree(patch);
		return -EINVAL;
	}

	INIT_LIST_HEAD(&patch->plist);
	patch->patch_id  = mc_hdr->patch_id;
	patch->equiv_cpu = proc_id;

719 720 721
	pr_debug("%s: Added patch_id: 0x%08x, proc_id: 0x%04x\n",
		 __func__, patch->patch_id, proc_id);

722 723 724 725 726 727
	/* ... and add to cache. */
	update_cache(patch);

	return crnt_size;
}

728 729
static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
					     size_t size)
730 731 732 733 734
{
	enum ucode_state ret = UCODE_ERROR;
	unsigned int leftover;
	u8 *fw = (u8 *)data;
	int crnt_size = 0;
735
	int offset;
736

737
	offset = install_equiv_cpu_table(data);
738
	if (offset < 0) {
739
		pr_err("failed to create equivalent cpu table\n");
740
		return ret;
741
	}
742
	fw += offset;
D
Dmitry Adamushko 已提交
743 744
	leftover = size - offset;

745
	if (*(u32 *)fw != UCODE_UCODE_TYPE) {
746
		pr_err("invalid type field in container file section header\n");
747 748
		free_equiv_cpu_table();
		return ret;
749
	}
D
Dmitry Adamushko 已提交
750

751
	while (leftover) {
752
		crnt_size = verify_and_add_patch(family, fw, leftover);
753 754
		if (crnt_size < 0)
			return ret;
755

756 757
		fw	 += crnt_size;
		leftover -= crnt_size;
758
	}
D
Dmitry Adamushko 已提交
759

760
	return UCODE_OK;
D
Dmitry Adamushko 已提交
761 762
}

763 764
static enum ucode_state
load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size)
765 766 767 768 769 770
{
	enum ucode_state ret;

	/* free old equiv table */
	free_equiv_cpu_table();

771
	ret = __load_microcode_amd(family, data, size);
772 773 774 775

	if (ret != UCODE_OK)
		cleanup();

776
#ifdef CONFIG_X86_32
777
	/* save BSP's matching patch for early load */
778 779
	if (cpu_data(cpu).cpu_index == boot_cpu_data.cpu_index) {
		struct ucode_patch *p = find_patch(cpu);
780
		if (p) {
781 782 783
			memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
			memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data),
							       PATCH_MAX_SIZE));
784 785 786
		}
	}
#endif
787 788 789
	return ret;
}

790 791 792 793 794 795 796 797
/*
 * AMD microcode firmware naming convention, up to family 15h they are in
 * the legacy file:
 *
 *    amd-ucode/microcode_amd.bin
 *
 * This legacy file is always smaller than 2K in size.
 *
798
 * Beginning with family 15h, they are in family-specific firmware files:
799 800 801 802 803 804 805
 *
 *    amd-ucode/microcode_amd_fam15h.bin
 *    amd-ucode/microcode_amd_fam16h.bin
 *    ...
 *
 * These might be larger than 2K.
 */
806 807
static enum ucode_state request_microcode_amd(int cpu, struct device *device,
					      bool refresh_fw)
D
Dmitry Adamushko 已提交
808
{
809 810
	char fw_name[36] = "amd-ucode/microcode_amd.bin";
	struct cpuinfo_x86 *c = &cpu_data(cpu);
811 812 813 814 815 816
	enum ucode_state ret = UCODE_NFOUND;
	const struct firmware *fw;

	/* reload ucode container only on the boot cpu */
	if (!refresh_fw || c->cpu_index != boot_cpu_data.cpu_index)
		return UCODE_OK;
817 818 819

	if (c->x86 >= 0x15)
		snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86);
D
Dmitry Adamushko 已提交
820

821
	if (request_firmware_direct(&fw, (const char *)fw_name, device)) {
822
		pr_debug("failed to load file %s\n", fw_name);
823
		goto out;
824
	}
D
Dmitry Adamushko 已提交
825

826 827
	ret = UCODE_ERROR;
	if (*(u32 *)fw->data != UCODE_MAGIC) {
828
		pr_err("invalid magic value (0x%08x)\n", *(u32 *)fw->data);
829
		goto fw_release;
830 831
	}

832
	ret = load_microcode_amd(cpu, c->x86, fw->data, fw->size);
D
Dmitry Adamushko 已提交
833

834
 fw_release:
835
	release_firmware(fw);
836

837
 out:
D
Dmitry Adamushko 已提交
838 839 840
	return ret;
}

841 842
static enum ucode_state
request_microcode_user(int cpu, const void __user *buf, size_t size)
D
Dmitry Adamushko 已提交
843
{
844
	return UCODE_ERROR;
845 846 847 848 849 850
}

static void microcode_fini_cpu_amd(int cpu)
{
	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;

851
	uci->mc = NULL;
852 853 854
}

static struct microcode_ops microcode_amd_ops = {
D
Dmitry Adamushko 已提交
855
	.request_microcode_user           = request_microcode_user,
856
	.request_microcode_fw             = request_microcode_amd,
857 858 859 860 861
	.collect_cpu_info                 = collect_cpu_info_amd,
	.apply_microcode                  = apply_microcode_amd,
	.microcode_fini_cpu               = microcode_fini_cpu_amd,
};

862
struct microcode_ops * __init init_amd_microcode(void)
863
{
864
	struct cpuinfo_x86 *c = &boot_cpu_data;
865 866

	if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) {
867
		pr_warn("AMD CPU family 0x%x not supported\n", c->x86);
868 869 870
		return NULL;
	}

871 872 873 874
	if (ucode_new_rev)
		pr_info_once("microcode updated early to new patch_level=0x%08x\n",
			     ucode_new_rev);

875
	return &microcode_amd_ops;
876
}
877 878 879

void __exit exit_amd_microcode(void)
{
880
	cleanup();
881
}