intel.c 23.4 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-or-later
L
Linus Torvalds 已提交
2
/*
3
 * Intel CPU Microcode Update Driver for Linux
L
Linus Torvalds 已提交
4
 *
A
Andrew Morton 已提交
5
 * Copyright (C) 2000-2006 Tigran Aivazian <aivazian.tigran@gmail.com>
6
 *		 2006 Shaohua Li <shaohua.li@intel.com>
L
Linus Torvalds 已提交
7
 *
8 9 10 11
 * Intel CPU microcode early update for Linux
 *
 * Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com>
 *		      H Peter Anvin" <hpa@zytor.com>
L
Linus Torvalds 已提交
12
 */
13

14 15 16 17 18 19
/*
 * This needs to be before all headers so that pr_debug in printk.h doesn't turn
 * printk calls into no_printk().
 *
 *#define DEBUG
 */
20
#define pr_fmt(fmt) "microcode: " fmt
21

22
#include <linux/earlycpio.h>
I
Ingo Molnar 已提交
23 24
#include <linux/firmware.h>
#include <linux/uaccess.h>
25 26
#include <linux/vmalloc.h>
#include <linux/initrd.h>
I
Ingo Molnar 已提交
27
#include <linux/kernel.h>
28 29
#include <linux/slab.h>
#include <linux/cpu.h>
30
#include <linux/uio.h>
31
#include <linux/mm.h>
L
Linus Torvalds 已提交
32

33
#include <asm/microcode_intel.h>
34
#include <asm/intel-family.h>
I
Ingo Molnar 已提交
35
#include <asm/processor.h>
36 37
#include <asm/tlbflush.h>
#include <asm/setup.h>
I
Ingo Molnar 已提交
38
#include <asm/msr.h>
L
Linus Torvalds 已提交
39

40
static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin";
41

42
/* Current microcode patch used in early patching on the APs. */
43
static struct microcode_intel *intel_ucode_patch;
44

45 46 47
/* last level cache size per core */
static int llc_size_per_core;

48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102
static inline bool cpu_signatures_match(unsigned int s1, unsigned int p1,
					unsigned int s2, unsigned int p2)
{
	if (s1 != s2)
		return false;

	/* Processor flags are either both 0 ... */
	if (!p1 && !p2)
		return true;

	/* ... or they intersect. */
	return p1 & p2;
}

/*
 * Returns 1 if update has been found, 0 otherwise.
 */
static int find_matching_signature(void *mc, unsigned int csig, int cpf)
{
	struct microcode_header_intel *mc_hdr = mc;
	struct extended_sigtable *ext_hdr;
	struct extended_signature *ext_sig;
	int i;

	if (cpu_signatures_match(csig, cpf, mc_hdr->sig, mc_hdr->pf))
		return 1;

	/* Look for ext. headers: */
	if (get_totalsize(mc_hdr) <= get_datasize(mc_hdr) + MC_HEADER_SIZE)
		return 0;

	ext_hdr = mc + get_datasize(mc_hdr) + MC_HEADER_SIZE;
	ext_sig = (void *)ext_hdr + EXT_HEADER_SIZE;

	for (i = 0; i < ext_hdr->count; i++) {
		if (cpu_signatures_match(csig, cpf, ext_sig->sig, ext_sig->pf))
			return 1;
		ext_sig++;
	}
	return 0;
}

/*
 * Returns 1 if update has been found, 0 otherwise.
 */
static int has_newer_microcode(void *mc, unsigned int csig, int cpf, int new_rev)
{
	struct microcode_header_intel *mc_hdr = mc;

	if (mc_hdr->rev <= new_rev)
		return 0;

	return find_matching_signature(mc, csig, cpf);
}

103 104 105
/*
 * Given CPU signature and a microcode patch, this function finds if the
 * microcode patch has matching family and model with the CPU.
106 107 108
 *
 * %true - if there's a match
 * %false - otherwise
109
 */
110 111
static bool microcode_matches(struct microcode_header_intel *mc_header,
			      unsigned long sig)
112 113 114
{
	unsigned long total_size = get_totalsize(mc_header);
	unsigned long data_size = get_datasize(mc_header);
115 116
	struct extended_sigtable *ext_header;
	unsigned int fam_ucode, model_ucode;
117
	struct extended_signature *ext_sig;
118 119
	unsigned int fam, model;
	int ext_sigcount, i;
120

121
	fam   = x86_family(sig);
122 123
	model = x86_model(sig);

124
	fam_ucode   = x86_family(mc_header->sig);
125 126 127
	model_ucode = x86_model(mc_header->sig);

	if (fam == fam_ucode && model == model_ucode)
128
		return true;
129 130 131

	/* Look for ext. headers: */
	if (total_size <= data_size + MC_HEADER_SIZE)
132
		return false;
133 134 135 136 137 138

	ext_header   = (void *) mc_header + data_size + MC_HEADER_SIZE;
	ext_sig      = (void *)ext_header + EXT_HEADER_SIZE;
	ext_sigcount = ext_header->count;

	for (i = 0; i < ext_sigcount; i++) {
139
		fam_ucode   = x86_family(ext_sig->sig);
140 141 142
		model_ucode = x86_model(ext_sig->sig);

		if (fam == fam_ucode && model == model_ucode)
143
			return true;
144 145 146

		ext_sig++;
	}
147
	return false;
148 149
}

150
static struct ucode_patch *memdup_patch(void *data, unsigned int size)
151
{
152
	struct ucode_patch *p;
153

154
	p = kzalloc(sizeof(struct ucode_patch), GFP_KERNEL);
155
	if (!p)
156
		return NULL;
157

158 159 160
	p->data = kmemdup(data, size, GFP_KERNEL);
	if (!p->data) {
		kfree(p);
161
		return NULL;
162 163
	}

164
	return p;
165 166
}

167
static void save_microcode_patch(void *data, unsigned int size)
168 169
{
	struct microcode_header_intel *mc_hdr, *mc_saved_hdr;
170
	struct ucode_patch *iter, *tmp, *p = NULL;
171
	bool prev_found = false;
172 173
	unsigned int sig, pf;

174
	mc_hdr = (struct microcode_header_intel *)data;
175

176 177
	list_for_each_entry_safe(iter, tmp, &microcode_cache, plist) {
		mc_saved_hdr = (struct microcode_header_intel *)iter->data;
178 179 180
		sig	     = mc_saved_hdr->sig;
		pf	     = mc_saved_hdr->pf;

181 182
		if (find_matching_signature(data, sig, pf)) {
			prev_found = true;
183

184 185
			if (mc_hdr->rev <= mc_saved_hdr->rev)
				continue;
186

187 188
			p = memdup_patch(data, size);
			if (!p)
189
				pr_err("Error allocating buffer %p\n", data);
190
			else {
191
				list_replace(&iter->plist, &p->plist);
192 193 194
				kfree(iter->data);
				kfree(iter);
			}
195
		}
196 197
	}

198 199 200 201 202
	/*
	 * There weren't any previous patches found in the list cache; save the
	 * newly found.
	 */
	if (!prev_found) {
203 204
		p = memdup_patch(data, size);
		if (!p)
205 206 207 208
			pr_err("Error allocating buffer for %p\n", data);
		else
			list_add_tail(&p->plist, &microcode_cache);
	}
209

210 211 212
	if (!p)
		return;

213 214 215 216 217
	/*
	 * Save for early loading. On 32-bit, that needs to be a physical
	 * address as the APs are running from physical addresses, before
	 * paging has been enabled.
	 */
218 219 220 221
	if (IS_ENABLED(CONFIG_X86_32))
		intel_ucode_patch = (struct microcode_intel *)__pa_nodebug(p->data);
	else
		intel_ucode_patch = p->data;
222 223
}

224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321
static int microcode_sanity_check(void *mc, int print_err)
{
	unsigned long total_size, data_size, ext_table_size;
	struct microcode_header_intel *mc_header = mc;
	struct extended_sigtable *ext_header = NULL;
	u32 sum, orig_sum, ext_sigcount = 0, i;
	struct extended_signature *ext_sig;

	total_size = get_totalsize(mc_header);
	data_size = get_datasize(mc_header);

	if (data_size + MC_HEADER_SIZE > total_size) {
		if (print_err)
			pr_err("Error: bad microcode data file size.\n");
		return -EINVAL;
	}

	if (mc_header->ldrver != 1 || mc_header->hdrver != 1) {
		if (print_err)
			pr_err("Error: invalid/unknown microcode update format.\n");
		return -EINVAL;
	}

	ext_table_size = total_size - (MC_HEADER_SIZE + data_size);
	if (ext_table_size) {
		u32 ext_table_sum = 0;
		u32 *ext_tablep;

		if ((ext_table_size < EXT_HEADER_SIZE)
		 || ((ext_table_size - EXT_HEADER_SIZE) % EXT_SIGNATURE_SIZE)) {
			if (print_err)
				pr_err("Error: truncated extended signature table.\n");
			return -EINVAL;
		}

		ext_header = mc + MC_HEADER_SIZE + data_size;
		if (ext_table_size != exttable_size(ext_header)) {
			if (print_err)
				pr_err("Error: extended signature table size mismatch.\n");
			return -EFAULT;
		}

		ext_sigcount = ext_header->count;

		/*
		 * Check extended table checksum: the sum of all dwords that
		 * comprise a valid table must be 0.
		 */
		ext_tablep = (u32 *)ext_header;

		i = ext_table_size / sizeof(u32);
		while (i--)
			ext_table_sum += ext_tablep[i];

		if (ext_table_sum) {
			if (print_err)
				pr_warn("Bad extended signature table checksum, aborting.\n");
			return -EINVAL;
		}
	}

	/*
	 * Calculate the checksum of update data and header. The checksum of
	 * valid update data and header including the extended signature table
	 * must be 0.
	 */
	orig_sum = 0;
	i = (MC_HEADER_SIZE + data_size) / sizeof(u32);
	while (i--)
		orig_sum += ((u32 *)mc)[i];

	if (orig_sum) {
		if (print_err)
			pr_err("Bad microcode data checksum, aborting.\n");
		return -EINVAL;
	}

	if (!ext_table_size)
		return 0;

	/*
	 * Check extended signature checksum: 0 => valid.
	 */
	for (i = 0; i < ext_sigcount; i++) {
		ext_sig = (void *)ext_header + EXT_HEADER_SIZE +
			  EXT_SIGNATURE_SIZE * i;

		sum = (mc_header->sig + mc_header->pf + mc_header->cksum) -
		      (ext_sig->sig + ext_sig->pf + ext_sig->cksum);
		if (sum) {
			if (print_err)
				pr_err("Bad extended signature checksum, aborting.\n");
			return -EINVAL;
		}
	}
	return 0;
}

322 323 324 325
/*
 * Get microcode matching with BSP's model. Only CPUs with the same model as
 * BSP can stay in the platform.
 */
326 327
static struct microcode_intel *
scan_microcode(void *data, size_t size, struct ucode_cpu_info *uci, bool save)
328
{
329
	struct microcode_header_intel *mc_header;
330
	struct microcode_intel *patch = NULL;
331
	unsigned int mc_size;
332

333 334
	while (size) {
		if (size < sizeof(struct microcode_header_intel))
335 336
			break;

337
		mc_header = (struct microcode_header_intel *)data;
338 339

		mc_size = get_totalsize(mc_header);
340 341 342
		if (!mc_size ||
		    mc_size > size ||
		    microcode_sanity_check(data, 0) < 0)
343 344
			break;

345
		size -= mc_size;
346

347 348
		if (!microcode_matches(mc_header, uci->cpu_sig.sig)) {
			data += mc_size;
349 350 351
			continue;
		}

352 353 354 355
		if (save) {
			save_microcode_patch(data, mc_size);
			goto next;
		}
356 357


358 359 360 361 362 363
		if (!patch) {
			if (!has_newer_microcode(data,
						 uci->cpu_sig.sig,
						 uci->cpu_sig.pf,
						 uci->cpu_sig.rev))
				goto next;
364

365 366 367 368 369 370 371 372 373
		} else {
			struct microcode_header_intel *phdr = &patch->hdr;

			if (!has_newer_microcode(data,
						 phdr->sig,
						 phdr->pf,
						 phdr->rev))
				goto next;
		}
374

375 376
		/* We have a newer patch, save it. */
		patch = data;
377

378 379 380
next:
		data += mc_size;
	}
381

382 383 384 385
	if (size)
		return NULL;

	return patch;
386 387 388 389 390 391
}

static int collect_cpu_info_early(struct ucode_cpu_info *uci)
{
	unsigned int val[2];
	unsigned int family, model;
392
	struct cpu_signature csig = { 0 };
393 394 395 396 397 398 399 400 401
	unsigned int eax, ebx, ecx, edx;

	memset(uci, 0, sizeof(*uci));

	eax = 0x00000001;
	ecx = 0;
	native_cpuid(&eax, &ebx, &ecx, &edx);
	csig.sig = eax;

402 403
	family = x86_family(eax);
	model  = x86_model(eax);
404 405 406 407 408 409 410

	if ((model >= 5) || (family > 6)) {
		/* get processor flags from MSR 0x17 */
		native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
		csig.pf = 1 << ((val[1] >> 18) & 7);
	}

411
	csig.rev = intel_get_microcode_revision();
412 413 414 415 416 417 418 419 420

	uci->cpu_sig = csig;
	uci->valid = 1;

	return 0;
}

static void show_saved_mc(void)
{
421
#ifdef DEBUG
422
	int i = 0, j;
423 424
	unsigned int sig, pf, rev, total_size, data_size, date;
	struct ucode_cpu_info uci;
425
	struct ucode_patch *p;
426

427
	if (list_empty(&microcode_cache)) {
428 429 430 431 432 433
		pr_debug("no microcode data saved.\n");
		return;
	}

	collect_cpu_info_early(&uci);

434 435 436
	sig	= uci.cpu_sig.sig;
	pf	= uci.cpu_sig.pf;
	rev	= uci.cpu_sig.rev;
437 438
	pr_debug("CPU: sig=0x%x, pf=0x%x, rev=0x%x\n", sig, pf, rev);

439
	list_for_each_entry(p, &microcode_cache, plist) {
440 441 442
		struct microcode_header_intel *mc_saved_header;
		struct extended_sigtable *ext_header;
		struct extended_signature *ext_sig;
443 444 445 446 447 448 449 450
		int ext_sigcount;

		mc_saved_header = (struct microcode_header_intel *)p->data;

		sig	= mc_saved_header->sig;
		pf	= mc_saved_header->pf;
		rev	= mc_saved_header->rev;
		date	= mc_saved_header->date;
451

452 453
		total_size	= get_totalsize(mc_saved_header);
		data_size	= get_datasize(mc_saved_header);
454

M
Masanari Iida 已提交
455
		pr_debug("mc_saved[%d]: sig=0x%x, pf=0x%x, rev=0x%x, total size=0x%x, date = %04x-%02x-%02x\n",
456
			 i++, sig, pf, rev, total_size,
457 458 459 460 461 462 463 464
			 date & 0xffff,
			 date >> 24,
			 (date >> 16) & 0xff);

		/* Look for ext. headers: */
		if (total_size <= data_size + MC_HEADER_SIZE)
			continue;

465
		ext_header = (void *)mc_saved_header + data_size + MC_HEADER_SIZE;
466 467 468 469 470 471 472 473 474 475 476 477 478 479
		ext_sigcount = ext_header->count;
		ext_sig = (void *)ext_header + EXT_HEADER_SIZE;

		for (j = 0; j < ext_sigcount; j++) {
			sig = ext_sig->sig;
			pf = ext_sig->pf;

			pr_debug("\tExtended[%d]: sig=0x%x, pf=0x%x\n",
				 j, sig, pf);

			ext_sig++;
		}
	}
#endif
480
}
481 482

/*
483 484
 * Save this microcode patch. It will be loaded early when a CPU is
 * hot-added or resumes.
485
 */
486
static void save_mc_for_early(u8 *mc, unsigned int size)
487
{
488
	/* Synchronization during CPU hotplug. */
489 490
	static DEFINE_MUTEX(x86_cpu_microcode_mutex);

491 492
	mutex_lock(&x86_cpu_microcode_mutex);

493
	save_microcode_patch(mc, size);
494 495 496
	show_saved_mc();

	mutex_unlock(&x86_cpu_microcode_mutex);
497
}
498

499
static bool load_builtin_intel_microcode(struct cpio_data *cp)
500
{
501
	unsigned int eax = 1, ebx, ecx = 0, edx;
502 503
	char name[30];

504 505 506
	if (IS_ENABLED(CONFIG_X86_32))
		return false;

507 508
	native_cpuid(&eax, &ebx, &ecx, &edx);

509 510
	sprintf(name, "intel-ucode/%02x-%02x-%02x",
		      x86_family(eax), x86_model(eax), x86_stepping(eax));
511 512 513 514 515 516 517 518 519 520

	return get_builtin_firmware(cp, name);
}

/*
 * Print ucode update info.
 */
static void
print_ucode_info(struct ucode_cpu_info *uci, unsigned int date)
{
521 522 523 524 525
	pr_info_once("microcode updated early to revision 0x%x, date = %04x-%02x-%02x\n",
		     uci->cpu_sig.rev,
		     date & 0xffff,
		     date >> 24,
		     (date >> 16) & 0xff);
526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547
}

#ifdef CONFIG_X86_32

static int delay_ucode_info;
static int current_mc_date;

/*
 * Print early updated ucode info after printk works. This is delayed info dump.
 */
void show_ucode_info_early(void)
{
	struct ucode_cpu_info uci;

	if (delay_ucode_info) {
		collect_cpu_info_early(&uci);
		print_ucode_info(&uci, current_mc_date);
		delay_ucode_info = 0;
	}
}

/*
548
 * At this point, we can not call printk() yet. Delay printing microcode info in
549 550 551 552
 * show_ucode_info_early() until printk() works.
 */
static void print_ucode(struct ucode_cpu_info *uci)
{
553
	struct microcode_intel *mc;
554 555 556
	int *delay_ucode_info_p;
	int *current_mc_date_p;

557 558
	mc = uci->mc;
	if (!mc)
559 560 561 562 563 564
		return;

	delay_ucode_info_p = (int *)__pa_nodebug(&delay_ucode_info);
	current_mc_date_p = (int *)__pa_nodebug(&current_mc_date);

	*delay_ucode_info_p = 1;
565
	*current_mc_date_p = mc->hdr.date;
566 567 568 569 570
}
#else

static inline void print_ucode(struct ucode_cpu_info *uci)
{
571
	struct microcode_intel *mc;
572

573 574
	mc = uci->mc;
	if (!mc)
575 576
		return;

577
	print_ucode_info(uci, mc->hdr.date);
578 579 580 581 582
}
#endif

static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
{
583
	struct microcode_intel *mc;
584
	u32 rev;
585

586 587
	mc = uci->mc;
	if (!mc)
588 589
		return 0;

590 591 592 593 594 595 596 597 598 599 600
	/*
	 * Save us the MSR write below - which is a particular expensive
	 * operation - when the other hyperthread has updated the microcode
	 * already.
	 */
	rev = intel_get_microcode_revision();
	if (rev >= mc->hdr.rev) {
		uci->cpu_sig.rev = rev;
		return UCODE_OK;
	}

601 602 603 604 605 606
	/*
	 * Writeback and invalidate caches before updating microcode to avoid
	 * internal issues depending on what the microcode is updating.
	 */
	native_wbinvd();

607
	/* write microcode via MSR 0x79 */
608
	native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
609

610 611
	rev = intel_get_microcode_revision();
	if (rev != mc->hdr.rev)
612 613
		return -1;

614
	uci->cpu_sig.rev = rev;
615 616 617 618

	if (early)
		print_ucode(uci);
	else
619
		print_ucode_info(uci, mc->hdr.date);
620 621 622 623 624 625

	return 0;
}

int __init save_microcode_in_initrd_intel(void)
{
626 627
	struct ucode_cpu_info uci;
	struct cpio_data cp;
628

629 630 631 632 633 634 635 636
	/*
	 * initrd is going away, clear patch ptr. We will scan the microcode one
	 * last time before jettisoning and save a patch, if found. Then we will
	 * update that pointer too, with a stable patch address to use when
	 * resuming the cores.
	 */
	intel_ucode_patch = NULL;

637 638
	if (!load_builtin_intel_microcode(&cp))
		cp = find_microcode_in_initrd(ucode_path, false);
639

640 641
	if (!(cp.data && cp.size))
		return 0;
642

643
	collect_cpu_info_early(&uci);
644

645
	scan_microcode(cp.data, cp.size, &uci, true);
646

647
	show_saved_mc();
648

649 650
	return 0;
}
651

652 653 654 655 656 657 658 659
/*
 * @res_patch, output: a pointer to the patch we found.
 */
static struct microcode_intel *__load_ucode_intel(struct ucode_cpu_info *uci)
{
	static const char *path;
	struct cpio_data cp;
	bool use_pa;
660

661 662 663 664 665 666
	if (IS_ENABLED(CONFIG_X86_32)) {
		path	  = (const char *)__pa_nodebug(ucode_path);
		use_pa	  = true;
	} else {
		path	  = ucode_path;
		use_pa	  = false;
667 668
	}

669 670 671
	/* try built-in microcode first */
	if (!load_builtin_intel_microcode(&cp))
		cp = find_microcode_in_initrd(path, use_pa);
672

673 674
	if (!(cp.data && cp.size))
		return NULL;
675

676
	collect_cpu_info_early(uci);
677

678
	return scan_microcode(cp.data, cp.size, uci, false);
679 680
}

681
void __init load_ucode_intel_bsp(void)
682
{
683
	struct microcode_intel *patch;
684 685
	struct ucode_cpu_info uci;

686 687
	patch = __load_ucode_intel(&uci);
	if (!patch)
688 689
		return;

690
	uci.mc = patch;
691 692 693 694

	apply_microcode_early(&uci, true);
}

695
void load_ucode_intel_ap(void)
696
{
697 698
	struct microcode_intel *patch, **iup;
	struct ucode_cpu_info uci;
699

700 701 702 703 704 705 706 707 708 709
	if (IS_ENABLED(CONFIG_X86_32))
		iup = (struct microcode_intel **) __pa_nodebug(&intel_ucode_patch);
	else
		iup = &intel_ucode_patch;

reget:
	if (!*iup) {
		patch = __load_ucode_intel(&uci);
		if (!patch)
			return;
710

711 712 713 714 715 716 717 718 719 720 721
		*iup = patch;
	}

	uci.mc = *iup;

	if (apply_microcode_early(&uci, true)) {
		/* Mixed-silicon system? Try to refetch the proper patch: */
		*iup = NULL;

		goto reget;
	}
722 723
}

724
static struct microcode_intel *find_patch(struct ucode_cpu_info *uci)
725
{
726 727
	struct microcode_header_intel *phdr;
	struct ucode_patch *iter, *tmp;
728

729
	list_for_each_entry_safe(iter, tmp, &microcode_cache, plist) {
730

731
		phdr = (struct microcode_header_intel *)iter->data;
732

733 734
		if (phdr->rev <= uci->cpu_sig.rev)
			continue;
735

736 737 738 739
		if (!find_matching_signature(phdr,
					     uci->cpu_sig.sig,
					     uci->cpu_sig.pf))
			continue;
740

741 742 743
		return iter->data;
	}
	return NULL;
744 745 746 747
}

void reload_ucode_intel(void)
{
748
	struct microcode_intel *p;
749 750 751 752
	struct ucode_cpu_info uci;

	collect_cpu_info_early(&uci);

753 754
	p = find_patch(&uci);
	if (!p)
755 756
		return;

757 758
	uci.mc = p;

759 760 761
	apply_microcode_early(&uci, false);
}

762
static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)
L
Linus Torvalds 已提交
763
{
764
	static struct cpu_signature prev;
765
	struct cpuinfo_x86 *c = &cpu_data(cpu_num);
L
Linus Torvalds 已提交
766 767
	unsigned int val[2];

768
	memset(csig, 0, sizeof(*csig));
L
Linus Torvalds 已提交
769

770
	csig->sig = cpuid_eax(0x00000001);
771 772 773 774

	if ((c->x86_model >= 5) || (c->x86 > 6)) {
		/* get processor flags from MSR 0x17 */
		rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
775
		csig->pf = 1 << ((val[1] >> 18) & 7);
L
Linus Torvalds 已提交
776 777
	}

778
	csig->rev = c->microcode;
779 780 781 782 783 784 785

	/* No extra locking on prev, races are harmless. */
	if (csig->sig != prev.sig || csig->pf != prev.pf || csig->rev != prev.rev) {
		pr_info("sig=0x%x, pf=0x%x, revision=0x%x\n",
			csig->sig, csig->pf, csig->rev);
		prev = *csig;
	}
786 787

	return 0;
L
Linus Torvalds 已提交
788 789
}

790
static enum ucode_state apply_microcode_intel(int cpu)
L
Linus Torvalds 已提交
791
{
792
	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
793
	struct cpuinfo_x86 *c = &cpu_data(cpu);
794
	bool bsp = c->cpu_index == boot_cpu_data.cpu_index;
795
	struct microcode_intel *mc;
796
	enum ucode_state ret;
797
	static int prev_rev;
798
	u32 rev;
I
Ingo Molnar 已提交
799

800
	/* We should bind the task to the CPU */
801
	if (WARN_ON(raw_smp_processor_id() != cpu))
802
		return UCODE_ERROR;
803

804 805
	/* Look for a newer patch in our cache: */
	mc = find_patch(uci);
806
	if (!mc) {
807
		mc = uci->mc;
808
		if (!mc)
809
			return UCODE_NFOUND;
810
	}
811

812 813 814 815 816 817 818
	/*
	 * Save us the MSR write below - which is a particular expensive
	 * operation - when the other hyperthread has updated the microcode
	 * already.
	 */
	rev = intel_get_microcode_revision();
	if (rev >= mc->hdr.rev) {
819 820
		ret = UCODE_OK;
		goto out;
821 822
	}

823 824 825 826 827 828
	/*
	 * Writeback and invalidate caches before updating microcode to avoid
	 * internal issues depending on what the microcode is updating.
	 */
	native_wbinvd();

L
Linus Torvalds 已提交
829
	/* write microcode via MSR 0x79 */
830
	wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
831

832
	rev = intel_get_microcode_revision();
L
Linus Torvalds 已提交
833

834
	if (rev != mc->hdr.rev) {
835
		pr_err("CPU%d update to revision 0x%x failed\n",
836
		       cpu, mc->hdr.rev);
837
		return UCODE_ERROR;
838
	}
839

840
	if (bsp && rev != prev_rev) {
841
		pr_info("updated to revision 0x%x, date = %04x-%02x-%02x\n",
842
			rev,
843 844 845
			mc->hdr.date & 0xffff,
			mc->hdr.date >> 24,
			(mc->hdr.date >> 16) & 0xff);
846
		prev_rev = rev;
847
	}
I
Ingo Molnar 已提交
848

849 850 851
	ret = UCODE_UPDATED;

out:
852
	uci->cpu_sig.rev = rev;
853
	c->microcode	 = rev;
854

855
	/* Update boot_cpu_data's revision too, if we're on the BSP: */
856
	if (bsp)
857 858
		boot_cpu_data.microcode = rev;

859
	return ret;
L
Linus Torvalds 已提交
860 861
}

862
static enum ucode_state generic_load_microcode(int cpu, struct iov_iter *iter)
863
{
D
Dmitry Adamushko 已提交
864
	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
865
	unsigned int curr_mc_size = 0, new_mc_size = 0;
866
	enum ucode_state ret = UCODE_OK;
867 868 869
	int new_rev = uci->cpu_sig.rev;
	u8 *new_mc = NULL, *mc = NULL;
	unsigned int csig, cpf;
870

871
	while (iov_iter_count(iter)) {
D
Dmitry Adamushko 已提交
872
		struct microcode_header_intel mc_header;
873 874
		unsigned int mc_size, data_size;
		u8 *data;
875

876 877
		if (!copy_from_iter_full(&mc_header, sizeof(mc_header), iter)) {
			pr_err("error! Truncated or inaccessible header in microcode data file\n");
878 879 880
			break;
		}

D
Dmitry Adamushko 已提交
881
		mc_size = get_totalsize(&mc_header);
882 883 884 885 886 887 888
		if (mc_size < sizeof(mc_header)) {
			pr_err("error! Bad data in microcode data file (totalsize too small)\n");
			break;
		}
		data_size = mc_size - sizeof(mc_header);
		if (data_size > iov_iter_count(iter)) {
			pr_err("error! Bad data in microcode data file (truncated file?)\n");
D
Dmitry Adamushko 已提交
889 890
			break;
		}
891

892 893
		/* For performance reasons, reuse mc area when possible */
		if (!mc || mc_size > curr_mc_size) {
894
			vfree(mc);
895 896 897 898 899
			mc = vmalloc(mc_size);
			if (!mc)
				break;
			curr_mc_size = mc_size;
		}
D
Dmitry Adamushko 已提交
900

901 902 903
		memcpy(mc, &mc_header, sizeof(mc_header));
		data = mc + sizeof(mc_header);
		if (!copy_from_iter_full(data, data_size, iter) ||
904
		    microcode_sanity_check(mc, 1) < 0) {
D
Dmitry Adamushko 已提交
905 906 907
			break;
		}

908 909
		csig = uci->cpu_sig.sig;
		cpf = uci->cpu_sig.pf;
910
		if (has_newer_microcode(mc, csig, cpf, new_rev)) {
911
			vfree(new_mc);
D
Dmitry Adamushko 已提交
912 913
			new_rev = mc_header.rev;
			new_mc  = mc;
914
			new_mc_size = mc_size;
915
			mc = NULL;	/* trigger new vmalloc */
916
			ret = UCODE_NEW;
917
		}
918 919
	}

920
	vfree(mc);
921

922
	if (iov_iter_count(iter)) {
923
		vfree(new_mc);
924
		return UCODE_ERROR;
925
	}
I
Ingo Molnar 已提交
926

927 928
	if (!new_mc)
		return UCODE_NFOUND;
D
Dmitry Adamushko 已提交
929

930
	vfree(uci->mc);
I
Ingo Molnar 已提交
931 932
	uci->mc = (struct microcode_intel *)new_mc;

933 934 935 936 937
	/*
	 * If early loading microcode is supported, save this mc into
	 * permanent memory. So it will be loaded early when a CPU is hot added
	 * or resumes.
	 */
938
	save_mc_for_early(new_mc, new_mc_size);
939

940 941
	pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
		 cpu, new_rev, uci->cpu_sig.rev);
942

943
	return ret;
944 945
}

946 947 948 949
static bool is_blacklisted(unsigned int cpu)
{
	struct cpuinfo_x86 *c = &cpu_data(cpu);

950 951
	/*
	 * Late loading on model 79 with microcode revision less than 0x0b000021
952 953 954
	 * and LLC size per core bigger than 2.5MB may result in a system hang.
	 * This behavior is documented in item BDF90, #334165 (Intel Xeon
	 * Processor E7-8800/4800 v4 Product Family).
955 956 957
	 */
	if (c->x86 == 6 &&
	    c->x86_model == INTEL_FAM6_BROADWELL_X &&
958
	    c->x86_stepping == 0x01 &&
959
	    llc_size_per_core > 2621440 &&
960 961 962
	    c->microcode < 0x0b000021) {
		pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode);
		pr_err_once("Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
963 964 965 966 967 968
		return true;
	}

	return false;
}

969 970
static enum ucode_state request_microcode_fw(int cpu, struct device *device,
					     bool refresh_fw)
971
{
972
	struct cpuinfo_x86 *c = &cpu_data(cpu);
973
	const struct firmware *firmware;
974
	struct iov_iter iter;
975
	enum ucode_state ret;
976 977
	struct kvec kvec;
	char name[30];
978

979 980 981
	if (is_blacklisted(cpu))
		return UCODE_NFOUND;

P
Peter Oruba 已提交
982
	sprintf(name, "intel-ucode/%02x-%02x-%02x",
983
		c->x86, c->x86_model, c->x86_stepping);
984

985
	if (request_firmware_direct(&firmware, name, device)) {
986
		pr_debug("data file %s load failed\n", name);
987
		return UCODE_NFOUND;
988
	}
D
Dmitry Adamushko 已提交
989

990 991 992 993
	kvec.iov_base = (void *)firmware->data;
	kvec.iov_len = firmware->size;
	iov_iter_kvec(&iter, WRITE, &kvec, 1, firmware->size);
	ret = generic_load_microcode(cpu, &iter);
D
Dmitry Adamushko 已提交
994

995 996
	release_firmware(firmware);

D
Dmitry Adamushko 已提交
997 998 999
	return ret;
}

1000 1001
static enum ucode_state
request_microcode_user(int cpu, const void __user *buf, size_t size)
D
Dmitry Adamushko 已提交
1002
{
1003 1004 1005
	struct iov_iter iter;
	struct iovec iov;

1006 1007 1008
	if (is_blacklisted(cpu))
		return UCODE_NFOUND;

1009 1010 1011 1012 1013
	iov.iov_base = (void __user *)buf;
	iov.iov_len = size;
	iov_iter_init(&iter, WRITE, &iov, 1, size);

	return generic_load_microcode(cpu, &iter);
1014 1015
}

H
Hannes Eder 已提交
1016
static struct microcode_ops microcode_intel_ops = {
D
Dmitry Adamushko 已提交
1017 1018
	.request_microcode_user		  = request_microcode_user,
	.request_microcode_fw             = request_microcode_fw,
P
Peter Oruba 已提交
1019
	.collect_cpu_info                 = collect_cpu_info,
1020
	.apply_microcode                  = apply_microcode_intel,
P
Peter Oruba 已提交
1021 1022
};

1023 1024
static int __init calc_llc_size_per_core(struct cpuinfo_x86 *c)
{
1025
	u64 llc_size = c->x86_cache_size * 1024ULL;
1026 1027 1028 1029 1030 1031

	do_div(llc_size, c->x86_max_cores);

	return (int)llc_size;
}

1032
struct microcode_ops * __init init_intel_microcode(void)
P
Peter Oruba 已提交
1033
{
1034
	struct cpuinfo_x86 *c = &boot_cpu_data;
1035 1036 1037 1038 1039 1040 1041

	if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 ||
	    cpu_has(c, X86_FEATURE_IA64)) {
		pr_err("Intel CPU family 0x%x not supported\n", c->x86);
		return NULL;
	}

1042 1043
	llc_size_per_core = calc_llc_size_per_core(c);

1044
	return &microcode_intel_ops;
P
Peter Oruba 已提交
1045
}