amd.c 21.4 KB
Newer Older
1 2
/*
 *  AMD CPU Microcode Update Driver for Linux
3 4 5 6
 *
 *  This driver allows to upgrade microcode on F10h AMD
 *  CPUs and later.
 *
7
 *  Copyright (C) 2008-2011 Advanced Micro Devices Inc.
8
 *	          2013-2016 Borislav Petkov <bp@alien8.de>
9 10 11 12 13 14
 *
 *  Author: Peter Oruba <peter.oruba@amd.com>
 *
 *  Based on work by:
 *  Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
 *
15 16 17 18 19
 *  early loader:
 *  Copyright (C) 2013 Advanced Micro Devices, Inc.
 *
 *  Author: Jacob Shin <jacob.shin@amd.com>
 *  Fixes: Borislav Petkov <bp@suse.de>
20
 *
21
 *  Licensed under the terms of the GNU General Public
22
 *  License version 2. See file COPYING for details.
I
Ingo Molnar 已提交
23
 */
24
#define pr_fmt(fmt) "microcode: " fmt
25

26
#include <linux/earlycpio.h>
I
Ingo Molnar 已提交
27 28 29
#include <linux/firmware.h>
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
30
#include <linux/initrd.h>
I
Ingo Molnar 已提交
31
#include <linux/kernel.h>
32 33
#include <linux/pci.h>

34
#include <asm/microcode_amd.h>
35
#include <asm/microcode.h>
I
Ingo Molnar 已提交
36
#include <asm/processor.h>
37 38
#include <asm/setup.h>
#include <asm/cpu.h>
I
Ingo Molnar 已提交
39
#include <asm/msr.h>
40

D
Dmitry Adamushko 已提交
41
static struct equiv_cpu_entry *equiv_cpu_table;
42

43 44
/*
 * This points to the current valid container of microcode patches which we will
45 46
 * save from the initrd/builtin before jettisoning its contents. @mc is the
 * microcode patch we found to match.
47
 */
48 49
static struct cont_desc {
	struct microcode_amd *mc;
50
	u32		     cpuid_1_eax;
51 52 53 54
	u32		     psize;
	u16		     eq_id;
	u8		     *data;
	size_t		     size;
55
} cont;
56 57

static u32 ucode_new_rev;
58
static u8 amd_ucode_patch[PATCH_MAX_SIZE];
59 60
static u16 this_equiv_id;

61 62 63 64 65 66
/*
 * Microcode patch container file is prepended to the initrd in cpio
 * format. See Documentation/x86/early-microcode.txt
 */
static const char
ucode_path[] __maybe_unused = "kernel/x86/microcode/AuthenticAMD.bin";
67

68
static u16 find_equiv_id(struct equiv_cpu_entry *equiv_table, u32 sig)
69
{
70 71 72
	for (; equiv_table && equiv_table->installed_cpu; equiv_table++) {
		if (sig == equiv_table->installed_cpu)
			return equiv_table->equiv_cpu;
73
	}
74

75 76 77
	return 0;
}

78
/*
79
 * This scans the ucode blob for the proper container as we can have multiple
80 81
 * containers glued together. Returns the equivalence ID from the equivalence
 * table or 0 if none found.
82 83
 * Returns the amount of bytes consumed while scanning. @desc contains all the
 * data we're going to use in later stages of the application.
84
 */
85
static ssize_t parse_container(u8 *ucode, ssize_t size, struct cont_desc *desc)
86 87
{
	struct equiv_cpu_entry *eq;
88 89 90 91 92 93 94 95 96 97 98 99
	ssize_t orig_size = size;
	u32 *hdr = (u32 *)ucode;
	u16 eq_id;
	u8 *buf;

	/* Am I looking at an equivalence table header? */
	if (hdr[0] != UCODE_MAGIC ||
	    hdr[1] != UCODE_EQUIV_CPU_TABLE_TYPE ||
	    hdr[2] == 0) {
		desc->eq_id = 0;
		return CONTAINER_HDR_SZ;
	}
100

101
	buf = ucode;
102

103
	eq = (struct equiv_cpu_entry *)(buf + CONTAINER_HDR_SZ);
104

105
	/* Find the equivalence ID of our CPU in this table: */
106
	eq_id = find_equiv_id(eq, desc->cpuid_1_eax);
107

108 109
	buf  += hdr[2] + CONTAINER_HDR_SZ;
	size -= hdr[2] + CONTAINER_HDR_SZ;
110

111 112 113 114 115 116 117
	/*
	 * Scan through the rest of the container to find where it ends. We do
	 * some basic sanity-checking too.
	 */
	while (size > 0) {
		struct microcode_amd *mc;
		u32 patch_size;
118

119
		hdr = (u32 *)buf;
120

121 122
		if (hdr[0] != UCODE_UCODE_TYPE)
			break;
123

124 125 126 127
		/* Sanity-check patch size. */
		patch_size = hdr[1];
		if (patch_size > PATCH_MAX_SIZE)
			break;
128

129 130 131
		/* Skip patch section header: */
		buf  += SECTION_HDR_SIZE;
		size -= SECTION_HDR_SIZE;
132

133 134 135 136
		mc = (struct microcode_amd *)buf;
		if (eq_id == mc->hdr.processor_rev_id) {
			desc->psize = patch_size;
			desc->mc = mc;
137 138
		}

139 140
		buf  += patch_size;
		size -= patch_size;
141 142
	}

143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176
	/*
	 * If we have found a patch (desc->mc), it means we're looking at the
	 * container which has a patch for this CPU so return 0 to mean, @ucode
	 * already points to the proper container. Otherwise, we return the size
	 * we scanned so that we can advance to the next container in the
	 * buffer.
	 */
	if (desc->mc) {
		desc->eq_id = eq_id;
		desc->data  = ucode;
		desc->size  = orig_size - size;

		return 0;
	}

	return orig_size - size;
}

/*
 * Scan the ucode blob for the proper container as we can have multiple
 * containers glued together.
 */
static void scan_containers(u8 *ucode, size_t size, struct cont_desc *desc)
{
	ssize_t rem = size;

	while (rem >= 0) {
		ssize_t s = parse_container(ucode, rem, desc);
		if (!s)
			return;

		ucode += s;
		rem   -= s;
	}
177 178
}

179
static int __apply_microcode_amd(struct microcode_amd *mc)
180 181 182
{
	u32 rev, dummy;

183
	native_wrmsrl(MSR_AMD64_PATCH_LOADER, (u64)(long)&mc->hdr.data_code);
184 185 186

	/* verify patch application was successful */
	native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
187
	if (rev != mc->hdr.patch_id)
188 189 190 191 192 193 194 195 196 197 198 199 200
		return -1;

	return 0;
}

/*
 * Early load occurs before we can vmalloc(). So we look for the microcode
 * patch container file in initrd, traverse equivalent cpu table, look for a
 * matching microcode patch, and update, all in initrd memory in place.
 * When vmalloc() is available for use later -- on 64-bit during first AP load,
 * and on 32-bit during save_microcode_in_initrd_amd() -- we can call
 * load_microcode_amd() to save equivalent cpu table and microcode patches in
 * kernel heap memory.
201
 *
202
 * Returns true if container found (sets @desc), false otherwise.
203
 */
204 205 206
static bool
apply_microcode_early_amd(u32 cpuid_1_eax, void *ucode, size_t size,
			  bool save_patch, struct cont_desc *ret_desc)
207
{
208
	struct cont_desc desc = { 0 };
209
	u8 (*patch)[PATCH_MAX_SIZE];
210 211 212
	struct microcode_amd *mc;
	u32 rev, *new_rev;
	bool ret = false;
213 214 215 216 217 218 219 220

#ifdef CONFIG_X86_32
	new_rev = (u32 *)__pa_nodebug(&ucode_new_rev);
	patch	= (u8 (*)[PATCH_MAX_SIZE])__pa_nodebug(&amd_ucode_patch);
#else
	new_rev = &ucode_new_rev;
	patch	= &amd_ucode_patch;
#endif
221 222

	if (check_current_patch_level(&rev, true))
223
		return false;
224

225 226
	desc.cpuid_1_eax = cpuid_1_eax;

227 228 229
	scan_containers(ucode, size, &desc);
	if (!desc.eq_id)
		return ret;
230

231
	this_equiv_id = desc.eq_id;
232

233 234 235
	mc = desc.mc;
	if (!mc)
		return ret;
236

237 238
	if (rev >= mc->hdr.patch_id)
		return ret;
239

240 241 242
	if (!__apply_microcode_amd(mc)) {
		*new_rev = mc->hdr.patch_id;
		ret      = true;
243

244 245
		if (save_patch)
			memcpy(patch, mc, min_t(u32, desc.psize, PATCH_MAX_SIZE));
246
	}
247

248 249
	if (ret_desc)
		*ret_desc = desc;
250

251
	return ret;
252 253
}

254
static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family)
255 256 257 258 259 260 261 262 263 264 265 266 267 268
{
#ifdef CONFIG_X86_64
	char fw_name[36] = "amd-ucode/microcode_amd.bin";

	if (family >= 0x15)
		snprintf(fw_name, sizeof(fw_name),
			 "amd-ucode/microcode_amd_fam%.2xh.bin", family);

	return get_builtin_firmware(cp, fw_name);
#else
	return false;
#endif
}

269
void __init load_ucode_amd_bsp(unsigned int cpuid_1_eax)
270
{
271
	struct ucode_cpu_info *uci;
272
	struct cpio_data cp;
273 274
	const char *path;
	bool use_pa;
275

276 277 278 279 280 281 282 283 284
	if (IS_ENABLED(CONFIG_X86_32)) {
		uci	= (struct ucode_cpu_info *)__pa_nodebug(ucode_cpu_info);
		path	= (const char *)__pa_nodebug(ucode_path);
		use_pa	= true;
	} else {
		uci     = ucode_cpu_info;
		path	= ucode_path;
		use_pa	= false;
	}
285

286
	if (!get_builtin_microcode(&cp, x86_family(cpuid_1_eax)))
287
		cp = find_microcode_in_initrd(path, use_pa);
288 289 290

	if (!(cp.data && cp.size))
		return;
291

292 293
	/* Needed in load_microcode_amd() */
	uci->cpu_sig.sig = cpuid_1_eax;
294

295
	apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, true, NULL);
296 297 298 299 300
}

#ifdef CONFIG_X86_32
/*
 * On 32-bit, since AP's early load occurs before paging is turned on, we
301 302 303
 * cannot traverse cpu_equiv_table and microcode_cache in kernel heap memory.
 * So during cold boot, AP will apply_ucode_in_initrd() just like the BSP.
 * In save_microcode_in_initrd_amd() BSP's patch is copied to amd_ucode_patch,
304 305
 * which is used upon resume from suspend.
 */
306
void load_ucode_amd_ap(unsigned int cpuid_1_eax)
307 308
{
	struct microcode_amd *mc;
309
	struct cpio_data cp;
310 311 312 313 314 315 316

	mc = (struct microcode_amd *)__pa_nodebug(amd_ucode_patch);
	if (mc->hdr.patch_id && mc->hdr.processor_rev_id) {
		__apply_microcode_amd(mc);
		return;
	}

317
	if (!get_builtin_microcode(&cp, x86_family(cpuid_1_eax)))
318
		cp = find_microcode_in_initrd((const char *)__pa_nodebug(ucode_path), true);
319

320
	if (!(cp.data && cp.size))
321 322
		return;

323 324 325 326
	/*
	 * This would set amd_ucode_patch above so that the following APs can
	 * use it directly instead of going down this path again.
	 */
327
	apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, true, NULL);
328 329
}
#else
330
void load_ucode_amd_ap(unsigned int cpuid_1_eax)
331 332 333
{
	struct equiv_cpu_entry *eq;
	struct microcode_amd *mc;
334
	u32 rev;
335 336
	u16 eq_id;

337
	/* 64-bit runs with paging enabled, thus early==false. */
338 339 340
	if (check_current_patch_level(&rev, false))
		return;

341 342 343 344 345 346 347 348
	/* First AP hasn't cached it yet, go through the blob. */
	if (!cont.data) {
		struct cpio_data cp = { NULL, 0, "" };

		if (cont.size == -1)
			return;

reget:
349
		if (!get_builtin_microcode(&cp, x86_family(cpuid_1_eax))) {
350 351 352 353 354 355 356 357 358 359 360 361 362 363 364
#ifdef CONFIG_BLK_DEV_INITRD
			cp = find_cpio_data(ucode_path, (void *)initrd_start,
					    initrd_end - initrd_start, NULL);
#endif
			if (!(cp.data && cp.size)) {
				/*
				 * Mark it so that other APs do not scan again
				 * for no real reason and slow down boot
				 * needlessly.
				 */
				cont.size = -1;
				return;
			}
		}

365
		if (!apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, false, &cont)) {
366
			cont.data = NULL;
367 368 369 370
			cont.size = -1;
			return;
		}
	}
371

372
	eq  = (struct equiv_cpu_entry *)(cont.data + CONTAINER_HDR_SZ);
373

374
	eq_id = find_equiv_id(eq, cpuid_1_eax);
375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391
	if (!eq_id)
		return;

	if (eq_id == this_equiv_id) {
		mc = (struct microcode_amd *)amd_ucode_patch;

		if (mc && rev < mc->hdr.patch_id) {
			if (!__apply_microcode_amd(mc))
				ucode_new_rev = mc->hdr.patch_id;
		}

	} else {

		/*
		 * AP has a different equivalence ID than BSP, looks like
		 * mixed-steppings silicon so go through the ucode blob anew.
		 */
392
		goto reget;
393 394
	}
}
395
#endif /* CONFIG_X86_32 */
396

397 398 399
static enum ucode_state
load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size);

400
int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
401 402
{
	enum ucode_state ret;
403
	int retval = 0;
404

405 406 407
	if (!cont.data) {
		if (IS_ENABLED(CONFIG_X86_32) && (cont.size != -1)) {
			struct cpio_data cp = { NULL, 0, "" };
408

409 410 411
#ifdef CONFIG_BLK_DEV_INITRD
			cp = find_cpio_data(ucode_path, (void *)initrd_start,
					    initrd_end - initrd_start, NULL);
412 413
#endif

414 415 416 417
			if (!(cp.data && cp.size)) {
				cont.size = -1;
				return -EINVAL;
			}
418

419 420
			cont.cpuid_1_eax = cpuid_1_eax;

421 422
			scan_containers(cp.data, cp.size, &cont);
			if (!cont.eq_id) {
423 424 425
				cont.size = -1;
				return -EINVAL;
			}
426

427 428 429
		} else
			return -EINVAL;
	}
430

431
	ret = load_microcode_amd(smp_processor_id(), x86_family(cpuid_1_eax), cont.data, cont.size);
432 433 434 435 436 437 438
	if (ret != UCODE_OK)
		retval = -EINVAL;

	/*
	 * This will be freed any msec now, stash patches for the current
	 * family and switch to patch cache for cpu hotplug, etc later.
	 */
439 440
	cont.data = NULL;
	cont.size = 0;
441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457

	return retval;
}

void reload_ucode_amd(void)
{
	struct microcode_amd *mc;
	u32 rev;

	/*
	 * early==false because this is a syscore ->resume path and by
	 * that time paging is long enabled.
	 */
	if (check_current_patch_level(&rev, false))
		return;

	mc = (struct microcode_amd *)amd_ucode_patch;
458 459
	if (!mc)
		return;
460

461
	if (rev < mc->hdr.patch_id) {
462 463
		if (!__apply_microcode_amd(mc)) {
			ucode_new_rev = mc->hdr.patch_id;
464
			pr_info("reload patch_level=0x%08x\n", ucode_new_rev);
465 466 467
		}
	}
}
468
static u16 __find_equiv_id(unsigned int cpu)
469 470
{
	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
471
	return find_equiv_id(equiv_cpu_table, uci->cpu_sig.sig);
472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487
}

static u32 find_cpu_family_by_equiv_cpu(u16 equiv_cpu)
{
	int i = 0;

	BUG_ON(!equiv_cpu_table);

	while (equiv_cpu_table[i].equiv_cpu != 0) {
		if (equiv_cpu == equiv_cpu_table[i].equiv_cpu)
			return equiv_cpu_table[i].installed_cpu;
		i++;
	}
	return 0;
}

488 489 490 491 492 493 494
/*
 * a small, trivial cache of per-family ucode patches
 */
static struct ucode_patch *cache_find_patch(u16 equiv_cpu)
{
	struct ucode_patch *p;

495
	list_for_each_entry(p, &microcode_cache, plist)
496 497 498 499 500 501 502 503 504
		if (p->equiv_cpu == equiv_cpu)
			return p;
	return NULL;
}

static void update_cache(struct ucode_patch *new_patch)
{
	struct ucode_patch *p;

505
	list_for_each_entry(p, &microcode_cache, plist) {
506 507 508 509 510 511 512 513 514 515 516 517
		if (p->equiv_cpu == new_patch->equiv_cpu) {
			if (p->patch_id >= new_patch->patch_id)
				/* we already have the latest patch */
				return;

			list_replace(&p->plist, &new_patch->plist);
			kfree(p->data);
			kfree(p);
			return;
		}
	}
	/* no patch found, add it */
518
	list_add_tail(&new_patch->plist, &microcode_cache);
519 520 521 522
}

static void free_cache(void)
{
523
	struct ucode_patch *p, *tmp;
524

525
	list_for_each_entry_safe(p, tmp, &microcode_cache, plist) {
526 527 528 529 530 531 532 533 534 535
		__list_del(p->plist.prev, p->plist.next);
		kfree(p->data);
		kfree(p);
	}
}

static struct ucode_patch *find_patch(unsigned int cpu)
{
	u16 equiv_id;

536
	equiv_id = __find_equiv_id(cpu);
537 538 539 540 541 542
	if (!equiv_id)
		return NULL;

	return cache_find_patch(equiv_id);
}

543
static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
544
{
545
	struct cpuinfo_x86 *c = &cpu_data(cpu);
546 547
	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
	struct ucode_patch *p;
548

549
	csig->sig = cpuid_eax(0x00000001);
550
	csig->rev = c->microcode;
551 552 553 554 555 556 557 558 559

	/*
	 * a patch could have been loaded early, set uci->mc so that
	 * mc_bp_resume() can call apply_microcode()
	 */
	p = find_patch(cpu);
	if (p && (p->patch_id == csig->rev))
		uci->mc = p->data;

560 561
	pr_info("CPU%d: patch_level=0x%08x\n", cpu, csig->rev);

562
	return 0;
563 564
}

565
static unsigned int verify_patch_size(u8 family, u32 patch_size,
566
				      unsigned int size)
567
{
568 569 570 571 572
	u32 max_size;

#define F1XH_MPB_MAX_SIZE 2048
#define F14H_MPB_MAX_SIZE 1824
#define F15H_MPB_MAX_SIZE 4096
573
#define F16H_MPB_MAX_SIZE 3458
574

575
	switch (family) {
576 577 578 579 580 581
	case 0x14:
		max_size = F14H_MPB_MAX_SIZE;
		break;
	case 0x15:
		max_size = F15H_MPB_MAX_SIZE;
		break;
582 583 584
	case 0x16:
		max_size = F16H_MPB_MAX_SIZE;
		break;
585 586 587 588 589 590 591 592 593 594 595 596 597
	default:
		max_size = F1XH_MPB_MAX_SIZE;
		break;
	}

	if (patch_size > min_t(u32, size, max_size)) {
		pr_err("patch size mismatch\n");
		return 0;
	}

	return patch_size;
}

598 599 600 601 602 603 604 605 606 607
/*
 * Those patch levels cannot be updated to newer ones and thus should be final.
 */
static u32 final_levels[] = {
	0x01000098,
	0x0100009f,
	0x010000af,
	0, /* T-101 terminator */
};

608 609 610 611 612 613 614 615 616 617
/*
 * Check the current patch level on this CPU.
 *
 * @rev: Use it to return the patch level. It is set to 0 in the case of
 * error.
 *
 * Returns:
 *  - true: if update should stop
 *  - false: otherwise
 */
618
bool check_current_patch_level(u32 *rev, bool early)
619
{
620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637
	u32 lvl, dummy, i;
	bool ret = false;
	u32 *levels;

	native_rdmsr(MSR_AMD64_PATCH_LEVEL, lvl, dummy);

	if (IS_ENABLED(CONFIG_X86_32) && early)
		levels = (u32 *)__pa_nodebug(&final_levels);
	else
		levels = final_levels;

	for (i = 0; levels[i]; i++) {
		if (lvl == levels[i]) {
			lvl = 0;
			ret = true;
			break;
		}
	}
638

639 640
	if (rev)
		*rev = lvl;
641

642
	return ret;
643 644
}

645
static int apply_microcode_amd(int cpu)
646
{
647
	struct cpuinfo_x86 *c = &cpu_data(cpu);
648 649 650
	struct microcode_amd *mc_amd;
	struct ucode_cpu_info *uci;
	struct ucode_patch *p;
651
	u32 rev;
652 653

	BUG_ON(raw_smp_processor_id() != cpu);
654

655
	uci = ucode_cpu_info + cpu;
656

657 658
	p = find_patch(cpu);
	if (!p)
659
		return 0;
660

661 662 663
	mc_amd  = p->data;
	uci->mc = p->data;

664
	if (check_current_patch_level(&rev, false))
665
		return -1;
666

667 668 669
	/* need to apply patch? */
	if (rev >= mc_amd->hdr.patch_id) {
		c->microcode = rev;
670
		uci->cpu_sig.rev = rev;
671 672 673
		return 0;
	}

674
	if (__apply_microcode_amd(mc_amd)) {
675
		pr_err("CPU%d: update failed for patch_level=0x%08x\n",
676
			cpu, mc_amd->hdr.patch_id);
677 678 679 680
		return -1;
	}
	pr_info("CPU%d: new patch_level=0x%08x\n", cpu,
		mc_amd->hdr.patch_id);
681

682 683
	uci->cpu_sig.rev = mc_amd->hdr.patch_id;
	c->microcode = mc_amd->hdr.patch_id;
684 685

	return 0;
686 687
}

688
static int install_equiv_cpu_table(const u8 *buf)
689
{
690 691 692
	unsigned int *ibuf = (unsigned int *)buf;
	unsigned int type = ibuf[1];
	unsigned int size = ibuf[2];
693

694
	if (type != UCODE_EQUIV_CPU_TABLE_TYPE || !size) {
695 696
		pr_err("empty section/"
		       "invalid type field in container file section header\n");
697
		return -EINVAL;
698 699
	}

700
	equiv_cpu_table = vmalloc(size);
701
	if (!equiv_cpu_table) {
702
		pr_err("failed to allocate equivalent CPU table\n");
703
		return -ENOMEM;
704 705
	}

706
	memcpy(equiv_cpu_table, buf + CONTAINER_HDR_SZ, size);
707

708 709
	/* add header length */
	return size + CONTAINER_HDR_SZ;
710 711
}

D
Dmitry Adamushko 已提交
712
static void free_equiv_cpu_table(void)
713
{
714 715
	vfree(equiv_cpu_table);
	equiv_cpu_table = NULL;
D
Dmitry Adamushko 已提交
716
}
717

718
static void cleanup(void)
D
Dmitry Adamushko 已提交
719
{
720 721 722 723 724 725 726 727 728 729 730
	free_equiv_cpu_table();
	free_cache();
}

/*
 * We return the current size even if some of the checks failed so that
 * we can skip over the next patch. If we return a negative value, we
 * signal a grave error like a memory allocation has failed and the
 * driver cannot continue functioning normally. In such cases, we tear
 * down everything we've used up so far and exit.
 */
731
static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover)
732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751
{
	struct microcode_header_amd *mc_hdr;
	struct ucode_patch *patch;
	unsigned int patch_size, crnt_size, ret;
	u32 proc_fam;
	u16 proc_id;

	patch_size  = *(u32 *)(fw + 4);
	crnt_size   = patch_size + SECTION_HDR_SIZE;
	mc_hdr	    = (struct microcode_header_amd *)(fw + SECTION_HDR_SIZE);
	proc_id	    = mc_hdr->processor_rev_id;

	proc_fam = find_cpu_family_by_equiv_cpu(proc_id);
	if (!proc_fam) {
		pr_err("No patch family for equiv ID: 0x%04x\n", proc_id);
		return crnt_size;
	}

	/* check if patch is for the current family */
	proc_fam = ((proc_fam >> 8) & 0xf) + ((proc_fam >> 20) & 0xff);
752
	if (proc_fam != family)
753 754 755 756 757 758 759 760
		return crnt_size;

	if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) {
		pr_err("Patch-ID 0x%08x: chipset-specific code unsupported.\n",
			mc_hdr->patch_id);
		return crnt_size;
	}

761
	ret = verify_patch_size(family, patch_size, leftover);
762 763 764 765 766 767 768 769 770 771 772
	if (!ret) {
		pr_err("Patch-ID 0x%08x: size mismatch.\n", mc_hdr->patch_id);
		return crnt_size;
	}

	patch = kzalloc(sizeof(*patch), GFP_KERNEL);
	if (!patch) {
		pr_err("Patch allocation failure.\n");
		return -EINVAL;
	}

773
	patch->data = kmemdup(fw + SECTION_HDR_SIZE, patch_size, GFP_KERNEL);
774 775 776 777 778 779 780 781 782 783
	if (!patch->data) {
		pr_err("Patch data allocation failure.\n");
		kfree(patch);
		return -EINVAL;
	}

	INIT_LIST_HEAD(&patch->plist);
	patch->patch_id  = mc_hdr->patch_id;
	patch->equiv_cpu = proc_id;

784 785 786
	pr_debug("%s: Added patch_id: 0x%08x, proc_id: 0x%04x\n",
		 __func__, patch->patch_id, proc_id);

787 788 789 790 791 792
	/* ... and add to cache. */
	update_cache(patch);

	return crnt_size;
}

793 794
static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
					     size_t size)
795 796 797 798 799
{
	enum ucode_state ret = UCODE_ERROR;
	unsigned int leftover;
	u8 *fw = (u8 *)data;
	int crnt_size = 0;
800
	int offset;
801

802
	offset = install_equiv_cpu_table(data);
803
	if (offset < 0) {
804
		pr_err("failed to create equivalent cpu table\n");
805
		return ret;
806
	}
807
	fw += offset;
D
Dmitry Adamushko 已提交
808 809
	leftover = size - offset;

810
	if (*(u32 *)fw != UCODE_UCODE_TYPE) {
811
		pr_err("invalid type field in container file section header\n");
812 813
		free_equiv_cpu_table();
		return ret;
814
	}
D
Dmitry Adamushko 已提交
815

816
	while (leftover) {
817
		crnt_size = verify_and_add_patch(family, fw, leftover);
818 819
		if (crnt_size < 0)
			return ret;
820

821 822
		fw	 += crnt_size;
		leftover -= crnt_size;
823
	}
D
Dmitry Adamushko 已提交
824

825
	return UCODE_OK;
D
Dmitry Adamushko 已提交
826 827
}

828 829
static enum ucode_state
load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size)
830 831 832 833 834 835
{
	enum ucode_state ret;

	/* free old equiv table */
	free_equiv_cpu_table();

836
	ret = __load_microcode_amd(family, data, size);
837 838 839 840

	if (ret != UCODE_OK)
		cleanup();

841
#ifdef CONFIG_X86_32
842
	/* save BSP's matching patch for early load */
843 844
	if (cpu_data(cpu).cpu_index == boot_cpu_data.cpu_index) {
		struct ucode_patch *p = find_patch(cpu);
845
		if (p) {
846 847 848
			memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
			memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data),
							       PATCH_MAX_SIZE));
849 850 851
		}
	}
#endif
852 853 854
	return ret;
}

855 856 857 858 859 860 861 862
/*
 * AMD microcode firmware naming convention, up to family 15h they are in
 * the legacy file:
 *
 *    amd-ucode/microcode_amd.bin
 *
 * This legacy file is always smaller than 2K in size.
 *
863
 * Beginning with family 15h, they are in family-specific firmware files:
864 865 866 867 868 869 870
 *
 *    amd-ucode/microcode_amd_fam15h.bin
 *    amd-ucode/microcode_amd_fam16h.bin
 *    ...
 *
 * These might be larger than 2K.
 */
871 872
static enum ucode_state request_microcode_amd(int cpu, struct device *device,
					      bool refresh_fw)
D
Dmitry Adamushko 已提交
873
{
874 875
	char fw_name[36] = "amd-ucode/microcode_amd.bin";
	struct cpuinfo_x86 *c = &cpu_data(cpu);
876 877 878 879 880 881
	enum ucode_state ret = UCODE_NFOUND;
	const struct firmware *fw;

	/* reload ucode container only on the boot cpu */
	if (!refresh_fw || c->cpu_index != boot_cpu_data.cpu_index)
		return UCODE_OK;
882 883 884

	if (c->x86 >= 0x15)
		snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86);
D
Dmitry Adamushko 已提交
885

886
	if (request_firmware_direct(&fw, (const char *)fw_name, device)) {
887
		pr_debug("failed to load file %s\n", fw_name);
888
		goto out;
889
	}
D
Dmitry Adamushko 已提交
890

891 892
	ret = UCODE_ERROR;
	if (*(u32 *)fw->data != UCODE_MAGIC) {
893
		pr_err("invalid magic value (0x%08x)\n", *(u32 *)fw->data);
894
		goto fw_release;
895 896
	}

897
	ret = load_microcode_amd(cpu, c->x86, fw->data, fw->size);
D
Dmitry Adamushko 已提交
898

899
 fw_release:
900
	release_firmware(fw);
901

902
 out:
D
Dmitry Adamushko 已提交
903 904 905
	return ret;
}

906 907
static enum ucode_state
request_microcode_user(int cpu, const void __user *buf, size_t size)
D
Dmitry Adamushko 已提交
908
{
909
	return UCODE_ERROR;
910 911 912 913 914 915
}

static void microcode_fini_cpu_amd(int cpu)
{
	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;

916
	uci->mc = NULL;
917 918 919
}

static struct microcode_ops microcode_amd_ops = {
D
Dmitry Adamushko 已提交
920
	.request_microcode_user           = request_microcode_user,
921
	.request_microcode_fw             = request_microcode_amd,
922 923 924 925 926
	.collect_cpu_info                 = collect_cpu_info_amd,
	.apply_microcode                  = apply_microcode_amd,
	.microcode_fini_cpu               = microcode_fini_cpu_amd,
};

927
struct microcode_ops * __init init_amd_microcode(void)
928
{
929
	struct cpuinfo_x86 *c = &boot_cpu_data;
930 931

	if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) {
932
		pr_warn("AMD CPU family 0x%x not supported\n", c->x86);
933 934 935
		return NULL;
	}

936 937 938 939
	if (ucode_new_rev)
		pr_info_once("microcode updated early to new patch_level=0x%08x\n",
			     ucode_new_rev);

940
	return &microcode_amd_ops;
941
}
942 943 944

void __exit exit_amd_microcode(void)
{
945
	cleanup();
946
}