amd.c 21.1 KB
Newer Older
1 2
/*
 *  AMD CPU Microcode Update Driver for Linux
3 4 5 6
 *
 *  This driver allows to upgrade microcode on F10h AMD
 *  CPUs and later.
 *
7
 *  Copyright (C) 2008-2011 Advanced Micro Devices Inc.
8
 *	          2013-2016 Borislav Petkov <bp@alien8.de>
9 10 11 12 13 14
 *
 *  Author: Peter Oruba <peter.oruba@amd.com>
 *
 *  Based on work by:
 *  Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
 *
15 16 17 18 19
 *  early loader:
 *  Copyright (C) 2013 Advanced Micro Devices, Inc.
 *
 *  Author: Jacob Shin <jacob.shin@amd.com>
 *  Fixes: Borislav Petkov <bp@suse.de>
20
 *
21
 *  Licensed under the terms of the GNU General Public
22
 *  License version 2. See file COPYING for details.
I
Ingo Molnar 已提交
23
 */
24
#define pr_fmt(fmt) "microcode: " fmt
25

26
#include <linux/earlycpio.h>
I
Ingo Molnar 已提交
27 28 29
#include <linux/firmware.h>
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
30
#include <linux/initrd.h>
I
Ingo Molnar 已提交
31
#include <linux/kernel.h>
32 33
#include <linux/pci.h>

34
#include <asm/microcode_amd.h>
35
#include <asm/microcode.h>
I
Ingo Molnar 已提交
36
#include <asm/processor.h>
37 38
#include <asm/setup.h>
#include <asm/cpu.h>
I
Ingo Molnar 已提交
39
#include <asm/msr.h>
40

D
Dmitry Adamushko 已提交
41
static struct equiv_cpu_entry *equiv_cpu_table;
42

43 44
/*
 * This points to the current valid container of microcode patches which we will
45 46
 * save from the initrd/builtin before jettisoning its contents. @mc is the
 * microcode patch we found to match.
47
 */
48 49
static struct cont_desc {
	struct microcode_amd *mc;
50
	u32		     cpuid_1_eax;
51 52 53 54
	u32		     psize;
	u16		     eq_id;
	u8		     *data;
	size_t		     size;
55
} cont;
56 57

static u32 ucode_new_rev;
58
static u8 amd_ucode_patch[PATCH_MAX_SIZE];
59

60 61 62 63 64 65
/*
 * Microcode patch container file is prepended to the initrd in cpio
 * format. See Documentation/x86/early-microcode.txt
 */
static const char
ucode_path[] __maybe_unused = "kernel/x86/microcode/AuthenticAMD.bin";
66

67
static u16 find_equiv_id(struct equiv_cpu_entry *equiv_table, u32 sig)
68
{
69 70 71
	for (; equiv_table && equiv_table->installed_cpu; equiv_table++) {
		if (sig == equiv_table->installed_cpu)
			return equiv_table->equiv_cpu;
72
	}
73

74 75 76
	return 0;
}

77
/*
78
 * This scans the ucode blob for the proper container as we can have multiple
79 80
 * containers glued together. Returns the equivalence ID from the equivalence
 * table or 0 if none found.
81 82
 * Returns the amount of bytes consumed while scanning. @desc contains all the
 * data we're going to use in later stages of the application.
83
 */
84
static ssize_t parse_container(u8 *ucode, ssize_t size, struct cont_desc *desc)
85 86
{
	struct equiv_cpu_entry *eq;
87 88 89 90 91 92 93 94 95 96 97 98
	ssize_t orig_size = size;
	u32 *hdr = (u32 *)ucode;
	u16 eq_id;
	u8 *buf;

	/* Am I looking at an equivalence table header? */
	if (hdr[0] != UCODE_MAGIC ||
	    hdr[1] != UCODE_EQUIV_CPU_TABLE_TYPE ||
	    hdr[2] == 0) {
		desc->eq_id = 0;
		return CONTAINER_HDR_SZ;
	}
99

100
	buf = ucode;
101

102
	eq = (struct equiv_cpu_entry *)(buf + CONTAINER_HDR_SZ);
103

104
	/* Find the equivalence ID of our CPU in this table: */
105
	eq_id = find_equiv_id(eq, desc->cpuid_1_eax);
106

107 108
	buf  += hdr[2] + CONTAINER_HDR_SZ;
	size -= hdr[2] + CONTAINER_HDR_SZ;
109

110 111 112 113 114 115 116
	/*
	 * Scan through the rest of the container to find where it ends. We do
	 * some basic sanity-checking too.
	 */
	while (size > 0) {
		struct microcode_amd *mc;
		u32 patch_size;
117

118
		hdr = (u32 *)buf;
119

120 121
		if (hdr[0] != UCODE_UCODE_TYPE)
			break;
122

123 124 125 126
		/* Sanity-check patch size. */
		patch_size = hdr[1];
		if (patch_size > PATCH_MAX_SIZE)
			break;
127

128 129 130
		/* Skip patch section header: */
		buf  += SECTION_HDR_SIZE;
		size -= SECTION_HDR_SIZE;
131

132 133 134 135
		mc = (struct microcode_amd *)buf;
		if (eq_id == mc->hdr.processor_rev_id) {
			desc->psize = patch_size;
			desc->mc = mc;
136 137
		}

138 139
		buf  += patch_size;
		size -= patch_size;
140 141
	}

142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175
	/*
	 * If we have found a patch (desc->mc), it means we're looking at the
	 * container which has a patch for this CPU so return 0 to mean, @ucode
	 * already points to the proper container. Otherwise, we return the size
	 * we scanned so that we can advance to the next container in the
	 * buffer.
	 */
	if (desc->mc) {
		desc->eq_id = eq_id;
		desc->data  = ucode;
		desc->size  = orig_size - size;

		return 0;
	}

	return orig_size - size;
}

/*
 * Scan the ucode blob for the proper container as we can have multiple
 * containers glued together.
 */
static void scan_containers(u8 *ucode, size_t size, struct cont_desc *desc)
{
	ssize_t rem = size;

	while (rem >= 0) {
		ssize_t s = parse_container(ucode, rem, desc);
		if (!s)
			return;

		ucode += s;
		rem   -= s;
	}
176 177
}

178
static int __apply_microcode_amd(struct microcode_amd *mc)
179 180 181
{
	u32 rev, dummy;

182
	native_wrmsrl(MSR_AMD64_PATCH_LOADER, (u64)(long)&mc->hdr.data_code);
183 184 185

	/* verify patch application was successful */
	native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
186
	if (rev != mc->hdr.patch_id)
187 188 189 190 191 192 193 194 195 196 197 198 199
		return -1;

	return 0;
}

/*
 * Early load occurs before we can vmalloc(). So we look for the microcode
 * patch container file in initrd, traverse equivalent cpu table, look for a
 * matching microcode patch, and update, all in initrd memory in place.
 * When vmalloc() is available for use later -- on 64-bit during first AP load,
 * and on 32-bit during save_microcode_in_initrd_amd() -- we can call
 * load_microcode_amd() to save equivalent cpu table and microcode patches in
 * kernel heap memory.
200
 *
201
 * Returns true if container found (sets @desc), false otherwise.
202
 */
203 204 205
static bool
apply_microcode_early_amd(u32 cpuid_1_eax, void *ucode, size_t size,
			  bool save_patch, struct cont_desc *ret_desc)
206
{
207
	struct cont_desc desc = { 0 };
208
	u8 (*patch)[PATCH_MAX_SIZE];
209 210 211
	struct microcode_amd *mc;
	u32 rev, *new_rev;
	bool ret = false;
212 213 214 215 216 217 218 219

#ifdef CONFIG_X86_32
	new_rev = (u32 *)__pa_nodebug(&ucode_new_rev);
	patch	= (u8 (*)[PATCH_MAX_SIZE])__pa_nodebug(&amd_ucode_patch);
#else
	new_rev = &ucode_new_rev;
	patch	= &amd_ucode_patch;
#endif
220 221

	if (check_current_patch_level(&rev, true))
222
		return false;
223

224 225
	desc.cpuid_1_eax = cpuid_1_eax;

226 227 228
	scan_containers(ucode, size, &desc);
	if (!desc.eq_id)
		return ret;
229

230 231 232
	mc = desc.mc;
	if (!mc)
		return ret;
233

234 235
	if (rev >= mc->hdr.patch_id)
		return ret;
236

237 238 239
	if (!__apply_microcode_amd(mc)) {
		*new_rev = mc->hdr.patch_id;
		ret      = true;
240

241 242
		if (save_patch)
			memcpy(patch, mc, min_t(u32, desc.psize, PATCH_MAX_SIZE));
243
	}
244

245 246
	if (ret_desc)
		*ret_desc = desc;
247

248
	return ret;
249 250
}

251
static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family)
252 253 254 255 256 257 258 259 260 261 262 263 264 265
{
#ifdef CONFIG_X86_64
	char fw_name[36] = "amd-ucode/microcode_amd.bin";

	if (family >= 0x15)
		snprintf(fw_name, sizeof(fw_name),
			 "amd-ucode/microcode_amd_fam%.2xh.bin", family);

	return get_builtin_firmware(cp, fw_name);
#else
	return false;
#endif
}

266
void __init load_ucode_amd_bsp(unsigned int cpuid_1_eax)
267
{
268
	struct ucode_cpu_info *uci;
269
	struct cpio_data cp;
270 271
	const char *path;
	bool use_pa;
272

273 274 275 276 277 278 279 280 281
	if (IS_ENABLED(CONFIG_X86_32)) {
		uci	= (struct ucode_cpu_info *)__pa_nodebug(ucode_cpu_info);
		path	= (const char *)__pa_nodebug(ucode_path);
		use_pa	= true;
	} else {
		uci     = ucode_cpu_info;
		path	= ucode_path;
		use_pa	= false;
	}
282

283
	if (!get_builtin_microcode(&cp, x86_family(cpuid_1_eax)))
284
		cp = find_microcode_in_initrd(path, use_pa);
285 286 287

	if (!(cp.data && cp.size))
		return;
288

289 290
	/* Needed in load_microcode_amd() */
	uci->cpu_sig.sig = cpuid_1_eax;
291

292
	apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, true, NULL);
293 294 295 296 297
}

#ifdef CONFIG_X86_32
/*
 * On 32-bit, since AP's early load occurs before paging is turned on, we
298 299 300
 * cannot traverse cpu_equiv_table and microcode_cache in kernel heap memory.
 * So during cold boot, AP will apply_ucode_in_initrd() just like the BSP.
 * In save_microcode_in_initrd_amd() BSP's patch is copied to amd_ucode_patch,
301 302
 * which is used upon resume from suspend.
 */
303
void load_ucode_amd_ap(unsigned int cpuid_1_eax)
304 305
{
	struct microcode_amd *mc;
306
	struct cpio_data cp;
307 308 309 310 311 312 313

	mc = (struct microcode_amd *)__pa_nodebug(amd_ucode_patch);
	if (mc->hdr.patch_id && mc->hdr.processor_rev_id) {
		__apply_microcode_amd(mc);
		return;
	}

314
	if (!get_builtin_microcode(&cp, x86_family(cpuid_1_eax)))
315
		cp = find_microcode_in_initrd((const char *)__pa_nodebug(ucode_path), true);
316

317
	if (!(cp.data && cp.size))
318 319
		return;

320 321 322 323
	/*
	 * This would set amd_ucode_patch above so that the following APs can
	 * use it directly instead of going down this path again.
	 */
324
	apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, true, NULL);
325 326
}
#else
327
void load_ucode_amd_ap(unsigned int cpuid_1_eax)
328 329 330
{
	struct equiv_cpu_entry *eq;
	struct microcode_amd *mc;
331
	u32 rev;
332 333
	u16 eq_id;

334
	/* 64-bit runs with paging enabled, thus early==false. */
335 336 337
	if (check_current_patch_level(&rev, false))
		return;

338 339
	/* First AP hasn't cached it yet, go through the blob. */
	if (!cont.data) {
340
		struct cpio_data cp;
341 342 343 344 345

		if (cont.size == -1)
			return;

reget:
346
		if (!get_builtin_microcode(&cp, x86_family(cpuid_1_eax))) {
347 348
			cp = find_microcode_in_initrd(ucode_path, false);

349 350 351 352 353 354 355 356 357 358 359
			if (!(cp.data && cp.size)) {
				/*
				 * Mark it so that other APs do not scan again
				 * for no real reason and slow down boot
				 * needlessly.
				 */
				cont.size = -1;
				return;
			}
		}

360
		if (!apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, false, &cont)) {
361
			cont.data = NULL;
362 363 364 365
			cont.size = -1;
			return;
		}
	}
366

367
	eq  = (struct equiv_cpu_entry *)(cont.data + CONTAINER_HDR_SZ);
368

369
	eq_id = find_equiv_id(eq, cpuid_1_eax);
370 371 372
	if (!eq_id)
		return;

373
	if (eq_id == cont.eq_id) {
374 375 376 377 378 379 380 381 382 383 384 385 386
		mc = (struct microcode_amd *)amd_ucode_patch;

		if (mc && rev < mc->hdr.patch_id) {
			if (!__apply_microcode_amd(mc))
				ucode_new_rev = mc->hdr.patch_id;
		}

	} else {

		/*
		 * AP has a different equivalence ID than BSP, looks like
		 * mixed-steppings silicon so go through the ucode blob anew.
		 */
387
		goto reget;
388 389
	}
}
390
#endif /* CONFIG_X86_32 */
391

392 393 394
static enum ucode_state
load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size);

395
int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
396 397
{
	enum ucode_state ret;
398
	int retval = 0;
399

400 401
	if (!cont.data) {
		if (IS_ENABLED(CONFIG_X86_32) && (cont.size != -1)) {
402
			struct cpio_data cp;
403

404
			cp = find_microcode_in_initrd(ucode_path, false);
405 406 407 408
			if (!(cp.data && cp.size)) {
				cont.size = -1;
				return -EINVAL;
			}
409

410 411
			cont.cpuid_1_eax = cpuid_1_eax;

412 413
			scan_containers(cp.data, cp.size, &cont);
			if (!cont.eq_id) {
414 415 416
				cont.size = -1;
				return -EINVAL;
			}
417

418 419 420
		} else
			return -EINVAL;
	}
421

422
	ret = load_microcode_amd(smp_processor_id(), x86_family(cpuid_1_eax), cont.data, cont.size);
423 424 425 426 427 428 429
	if (ret != UCODE_OK)
		retval = -EINVAL;

	/*
	 * This will be freed any msec now, stash patches for the current
	 * family and switch to patch cache for cpu hotplug, etc later.
	 */
430 431
	cont.data = NULL;
	cont.size = 0;
432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448

	return retval;
}

void reload_ucode_amd(void)
{
	struct microcode_amd *mc;
	u32 rev;

	/*
	 * early==false because this is a syscore ->resume path and by
	 * that time paging is long enabled.
	 */
	if (check_current_patch_level(&rev, false))
		return;

	mc = (struct microcode_amd *)amd_ucode_patch;
449 450
	if (!mc)
		return;
451

452
	if (rev < mc->hdr.patch_id) {
453 454
		if (!__apply_microcode_amd(mc)) {
			ucode_new_rev = mc->hdr.patch_id;
455
			pr_info("reload patch_level=0x%08x\n", ucode_new_rev);
456 457 458
		}
	}
}
459
static u16 __find_equiv_id(unsigned int cpu)
460 461
{
	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
462
	return find_equiv_id(equiv_cpu_table, uci->cpu_sig.sig);
463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478
}

static u32 find_cpu_family_by_equiv_cpu(u16 equiv_cpu)
{
	int i = 0;

	BUG_ON(!equiv_cpu_table);

	while (equiv_cpu_table[i].equiv_cpu != 0) {
		if (equiv_cpu == equiv_cpu_table[i].equiv_cpu)
			return equiv_cpu_table[i].installed_cpu;
		i++;
	}
	return 0;
}

479 480 481 482 483 484 485
/*
 * a small, trivial cache of per-family ucode patches
 */
static struct ucode_patch *cache_find_patch(u16 equiv_cpu)
{
	struct ucode_patch *p;

486
	list_for_each_entry(p, &microcode_cache, plist)
487 488 489 490 491 492 493 494 495
		if (p->equiv_cpu == equiv_cpu)
			return p;
	return NULL;
}

static void update_cache(struct ucode_patch *new_patch)
{
	struct ucode_patch *p;

496
	list_for_each_entry(p, &microcode_cache, plist) {
497 498 499 500 501 502 503 504 505 506 507 508
		if (p->equiv_cpu == new_patch->equiv_cpu) {
			if (p->patch_id >= new_patch->patch_id)
				/* we already have the latest patch */
				return;

			list_replace(&p->plist, &new_patch->plist);
			kfree(p->data);
			kfree(p);
			return;
		}
	}
	/* no patch found, add it */
509
	list_add_tail(&new_patch->plist, &microcode_cache);
510 511 512 513
}

static void free_cache(void)
{
514
	struct ucode_patch *p, *tmp;
515

516
	list_for_each_entry_safe(p, tmp, &microcode_cache, plist) {
517 518 519 520 521 522 523 524 525 526
		__list_del(p->plist.prev, p->plist.next);
		kfree(p->data);
		kfree(p);
	}
}

static struct ucode_patch *find_patch(unsigned int cpu)
{
	u16 equiv_id;

527
	equiv_id = __find_equiv_id(cpu);
528 529 530 531 532 533
	if (!equiv_id)
		return NULL;

	return cache_find_patch(equiv_id);
}

534
static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
535
{
536
	struct cpuinfo_x86 *c = &cpu_data(cpu);
537 538
	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
	struct ucode_patch *p;
539

540
	csig->sig = cpuid_eax(0x00000001);
541
	csig->rev = c->microcode;
542 543 544 545 546 547 548 549 550

	/*
	 * a patch could have been loaded early, set uci->mc so that
	 * mc_bp_resume() can call apply_microcode()
	 */
	p = find_patch(cpu);
	if (p && (p->patch_id == csig->rev))
		uci->mc = p->data;

551 552
	pr_info("CPU%d: patch_level=0x%08x\n", cpu, csig->rev);

553
	return 0;
554 555
}

556
static unsigned int verify_patch_size(u8 family, u32 patch_size,
557
				      unsigned int size)
558
{
559 560 561 562 563
	u32 max_size;

#define F1XH_MPB_MAX_SIZE 2048
#define F14H_MPB_MAX_SIZE 1824
#define F15H_MPB_MAX_SIZE 4096
564
#define F16H_MPB_MAX_SIZE 3458
565

566
	switch (family) {
567 568 569 570 571 572
	case 0x14:
		max_size = F14H_MPB_MAX_SIZE;
		break;
	case 0x15:
		max_size = F15H_MPB_MAX_SIZE;
		break;
573 574 575
	case 0x16:
		max_size = F16H_MPB_MAX_SIZE;
		break;
576 577 578 579 580 581 582 583 584 585 586 587 588
	default:
		max_size = F1XH_MPB_MAX_SIZE;
		break;
	}

	if (patch_size > min_t(u32, size, max_size)) {
		pr_err("patch size mismatch\n");
		return 0;
	}

	return patch_size;
}

589 590 591 592 593 594 595 596 597 598
/*
 * Those patch levels cannot be updated to newer ones and thus should be final.
 */
static u32 final_levels[] = {
	0x01000098,
	0x0100009f,
	0x010000af,
	0, /* T-101 terminator */
};

599 600 601 602 603 604 605 606 607 608
/*
 * Check the current patch level on this CPU.
 *
 * @rev: Use it to return the patch level. It is set to 0 in the case of
 * error.
 *
 * Returns:
 *  - true: if update should stop
 *  - false: otherwise
 */
609
bool check_current_patch_level(u32 *rev, bool early)
610
{
611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628
	u32 lvl, dummy, i;
	bool ret = false;
	u32 *levels;

	native_rdmsr(MSR_AMD64_PATCH_LEVEL, lvl, dummy);

	if (IS_ENABLED(CONFIG_X86_32) && early)
		levels = (u32 *)__pa_nodebug(&final_levels);
	else
		levels = final_levels;

	for (i = 0; levels[i]; i++) {
		if (lvl == levels[i]) {
			lvl = 0;
			ret = true;
			break;
		}
	}
629

630 631
	if (rev)
		*rev = lvl;
632

633
	return ret;
634 635
}

636
static int apply_microcode_amd(int cpu)
637
{
638
	struct cpuinfo_x86 *c = &cpu_data(cpu);
639 640 641
	struct microcode_amd *mc_amd;
	struct ucode_cpu_info *uci;
	struct ucode_patch *p;
642
	u32 rev;
643 644

	BUG_ON(raw_smp_processor_id() != cpu);
645

646
	uci = ucode_cpu_info + cpu;
647

648 649
	p = find_patch(cpu);
	if (!p)
650
		return 0;
651

652 653 654
	mc_amd  = p->data;
	uci->mc = p->data;

655
	if (check_current_patch_level(&rev, false))
656
		return -1;
657

658 659 660
	/* need to apply patch? */
	if (rev >= mc_amd->hdr.patch_id) {
		c->microcode = rev;
661
		uci->cpu_sig.rev = rev;
662 663 664
		return 0;
	}

665
	if (__apply_microcode_amd(mc_amd)) {
666
		pr_err("CPU%d: update failed for patch_level=0x%08x\n",
667
			cpu, mc_amd->hdr.patch_id);
668 669 670 671
		return -1;
	}
	pr_info("CPU%d: new patch_level=0x%08x\n", cpu,
		mc_amd->hdr.patch_id);
672

673 674
	uci->cpu_sig.rev = mc_amd->hdr.patch_id;
	c->microcode = mc_amd->hdr.patch_id;
675 676

	return 0;
677 678
}

679
static int install_equiv_cpu_table(const u8 *buf)
680
{
681 682 683
	unsigned int *ibuf = (unsigned int *)buf;
	unsigned int type = ibuf[1];
	unsigned int size = ibuf[2];
684

685
	if (type != UCODE_EQUIV_CPU_TABLE_TYPE || !size) {
686 687
		pr_err("empty section/"
		       "invalid type field in container file section header\n");
688
		return -EINVAL;
689 690
	}

691
	equiv_cpu_table = vmalloc(size);
692
	if (!equiv_cpu_table) {
693
		pr_err("failed to allocate equivalent CPU table\n");
694
		return -ENOMEM;
695 696
	}

697
	memcpy(equiv_cpu_table, buf + CONTAINER_HDR_SZ, size);
698

699 700
	/* add header length */
	return size + CONTAINER_HDR_SZ;
701 702
}

D
Dmitry Adamushko 已提交
703
static void free_equiv_cpu_table(void)
704
{
705 706
	vfree(equiv_cpu_table);
	equiv_cpu_table = NULL;
D
Dmitry Adamushko 已提交
707
}
708

709
static void cleanup(void)
D
Dmitry Adamushko 已提交
710
{
711 712 713 714 715 716 717 718 719 720 721
	free_equiv_cpu_table();
	free_cache();
}

/*
 * We return the current size even if some of the checks failed so that
 * we can skip over the next patch. If we return a negative value, we
 * signal a grave error like a memory allocation has failed and the
 * driver cannot continue functioning normally. In such cases, we tear
 * down everything we've used up so far and exit.
 */
722
static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover)
723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742
{
	struct microcode_header_amd *mc_hdr;
	struct ucode_patch *patch;
	unsigned int patch_size, crnt_size, ret;
	u32 proc_fam;
	u16 proc_id;

	patch_size  = *(u32 *)(fw + 4);
	crnt_size   = patch_size + SECTION_HDR_SIZE;
	mc_hdr	    = (struct microcode_header_amd *)(fw + SECTION_HDR_SIZE);
	proc_id	    = mc_hdr->processor_rev_id;

	proc_fam = find_cpu_family_by_equiv_cpu(proc_id);
	if (!proc_fam) {
		pr_err("No patch family for equiv ID: 0x%04x\n", proc_id);
		return crnt_size;
	}

	/* check if patch is for the current family */
	proc_fam = ((proc_fam >> 8) & 0xf) + ((proc_fam >> 20) & 0xff);
743
	if (proc_fam != family)
744 745 746 747 748 749 750 751
		return crnt_size;

	if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) {
		pr_err("Patch-ID 0x%08x: chipset-specific code unsupported.\n",
			mc_hdr->patch_id);
		return crnt_size;
	}

752
	ret = verify_patch_size(family, patch_size, leftover);
753 754 755 756 757 758 759 760 761 762 763
	if (!ret) {
		pr_err("Patch-ID 0x%08x: size mismatch.\n", mc_hdr->patch_id);
		return crnt_size;
	}

	patch = kzalloc(sizeof(*patch), GFP_KERNEL);
	if (!patch) {
		pr_err("Patch allocation failure.\n");
		return -EINVAL;
	}

764
	patch->data = kmemdup(fw + SECTION_HDR_SIZE, patch_size, GFP_KERNEL);
765 766 767 768 769 770 771 772 773 774
	if (!patch->data) {
		pr_err("Patch data allocation failure.\n");
		kfree(patch);
		return -EINVAL;
	}

	INIT_LIST_HEAD(&patch->plist);
	patch->patch_id  = mc_hdr->patch_id;
	patch->equiv_cpu = proc_id;

775 776 777
	pr_debug("%s: Added patch_id: 0x%08x, proc_id: 0x%04x\n",
		 __func__, patch->patch_id, proc_id);

778 779 780 781 782 783
	/* ... and add to cache. */
	update_cache(patch);

	return crnt_size;
}

784 785
static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
					     size_t size)
786 787 788 789 790
{
	enum ucode_state ret = UCODE_ERROR;
	unsigned int leftover;
	u8 *fw = (u8 *)data;
	int crnt_size = 0;
791
	int offset;
792

793
	offset = install_equiv_cpu_table(data);
794
	if (offset < 0) {
795
		pr_err("failed to create equivalent cpu table\n");
796
		return ret;
797
	}
798
	fw += offset;
D
Dmitry Adamushko 已提交
799 800
	leftover = size - offset;

801
	if (*(u32 *)fw != UCODE_UCODE_TYPE) {
802
		pr_err("invalid type field in container file section header\n");
803 804
		free_equiv_cpu_table();
		return ret;
805
	}
D
Dmitry Adamushko 已提交
806

807
	while (leftover) {
808
		crnt_size = verify_and_add_patch(family, fw, leftover);
809 810
		if (crnt_size < 0)
			return ret;
811

812 813
		fw	 += crnt_size;
		leftover -= crnt_size;
814
	}
D
Dmitry Adamushko 已提交
815

816
	return UCODE_OK;
D
Dmitry Adamushko 已提交
817 818
}

819 820
static enum ucode_state
load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size)
821 822 823 824 825 826
{
	enum ucode_state ret;

	/* free old equiv table */
	free_equiv_cpu_table();

827
	ret = __load_microcode_amd(family, data, size);
828 829 830 831

	if (ret != UCODE_OK)
		cleanup();

832
#ifdef CONFIG_X86_32
833
	/* save BSP's matching patch for early load */
834 835
	if (cpu_data(cpu).cpu_index == boot_cpu_data.cpu_index) {
		struct ucode_patch *p = find_patch(cpu);
836
		if (p) {
837 838 839
			memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
			memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data),
							       PATCH_MAX_SIZE));
840 841 842
		}
	}
#endif
843 844 845
	return ret;
}

846 847 848 849 850 851 852 853
/*
 * AMD microcode firmware naming convention, up to family 15h they are in
 * the legacy file:
 *
 *    amd-ucode/microcode_amd.bin
 *
 * This legacy file is always smaller than 2K in size.
 *
854
 * Beginning with family 15h, they are in family-specific firmware files:
855 856 857 858 859 860 861
 *
 *    amd-ucode/microcode_amd_fam15h.bin
 *    amd-ucode/microcode_amd_fam16h.bin
 *    ...
 *
 * These might be larger than 2K.
 */
862 863
static enum ucode_state request_microcode_amd(int cpu, struct device *device,
					      bool refresh_fw)
D
Dmitry Adamushko 已提交
864
{
865 866
	char fw_name[36] = "amd-ucode/microcode_amd.bin";
	struct cpuinfo_x86 *c = &cpu_data(cpu);
867 868 869 870 871 872
	enum ucode_state ret = UCODE_NFOUND;
	const struct firmware *fw;

	/* reload ucode container only on the boot cpu */
	if (!refresh_fw || c->cpu_index != boot_cpu_data.cpu_index)
		return UCODE_OK;
873 874 875

	if (c->x86 >= 0x15)
		snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86);
D
Dmitry Adamushko 已提交
876

877
	if (request_firmware_direct(&fw, (const char *)fw_name, device)) {
878
		pr_debug("failed to load file %s\n", fw_name);
879
		goto out;
880
	}
D
Dmitry Adamushko 已提交
881

882 883
	ret = UCODE_ERROR;
	if (*(u32 *)fw->data != UCODE_MAGIC) {
884
		pr_err("invalid magic value (0x%08x)\n", *(u32 *)fw->data);
885
		goto fw_release;
886 887
	}

888
	ret = load_microcode_amd(cpu, c->x86, fw->data, fw->size);
D
Dmitry Adamushko 已提交
889

890
 fw_release:
891
	release_firmware(fw);
892

893
 out:
D
Dmitry Adamushko 已提交
894 895 896
	return ret;
}

897 898
static enum ucode_state
request_microcode_user(int cpu, const void __user *buf, size_t size)
D
Dmitry Adamushko 已提交
899
{
900
	return UCODE_ERROR;
901 902 903 904 905 906
}

static void microcode_fini_cpu_amd(int cpu)
{
	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;

907
	uci->mc = NULL;
908 909 910
}

static struct microcode_ops microcode_amd_ops = {
D
Dmitry Adamushko 已提交
911
	.request_microcode_user           = request_microcode_user,
912
	.request_microcode_fw             = request_microcode_amd,
913 914 915 916 917
	.collect_cpu_info                 = collect_cpu_info_amd,
	.apply_microcode                  = apply_microcode_amd,
	.microcode_fini_cpu               = microcode_fini_cpu_amd,
};

918
struct microcode_ops * __init init_amd_microcode(void)
919
{
920
	struct cpuinfo_x86 *c = &boot_cpu_data;
921 922

	if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) {
923
		pr_warn("AMD CPU family 0x%x not supported\n", c->x86);
924 925 926
		return NULL;
	}

927 928 929 930
	if (ucode_new_rev)
		pr_info_once("microcode updated early to new patch_level=0x%08x\n",
			     ucode_new_rev);

931
	return &microcode_amd_ops;
932
}
933 934 935

void __exit exit_amd_microcode(void)
{
936
	cleanup();
937
}