intel.c 23.4 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2
 * Intel CPU Microcode Update Driver for Linux
L
Linus Torvalds 已提交
3
 *
A
Andrew Morton 已提交
4
 * Copyright (C) 2000-2006 Tigran Aivazian <aivazian.tigran@gmail.com>
5
 *		 2006 Shaohua Li <shaohua.li@intel.com>
L
Linus Torvalds 已提交
6
 *
7 8 9 10 11
 * Intel CPU microcode early update for Linux
 *
 * Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com>
 *		      H Peter Anvin" <hpa@zytor.com>
 *
12 13 14 15
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
L
Linus Torvalds 已提交
16
 */
17

18 19 20 21 22 23
/*
 * This needs to be before all headers so that pr_debug in printk.h doesn't turn
 * printk calls into no_printk().
 *
 *#define DEBUG
 */
24
#define pr_fmt(fmt) "microcode: " fmt
25

26
#include <linux/earlycpio.h>
I
Ingo Molnar 已提交
27 28
#include <linux/firmware.h>
#include <linux/uaccess.h>
29 30
#include <linux/vmalloc.h>
#include <linux/initrd.h>
I
Ingo Molnar 已提交
31
#include <linux/kernel.h>
32 33 34
#include <linux/slab.h>
#include <linux/cpu.h>
#include <linux/mm.h>
L
Linus Torvalds 已提交
35

36
#include <asm/microcode_intel.h>
37
#include <asm/intel-family.h>
I
Ingo Molnar 已提交
38
#include <asm/processor.h>
39 40
#include <asm/tlbflush.h>
#include <asm/setup.h>
I
Ingo Molnar 已提交
41
#include <asm/msr.h>
L
Linus Torvalds 已提交
42

43
static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin";
44

45
/* Current microcode patch used in early patching on the APs. */
46
static struct microcode_intel *intel_ucode_patch;
47

48 49 50
/* last level cache size per core */
static int llc_size_per_core;

51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105
static inline bool cpu_signatures_match(unsigned int s1, unsigned int p1,
					unsigned int s2, unsigned int p2)
{
	if (s1 != s2)
		return false;

	/* Processor flags are either both 0 ... */
	if (!p1 && !p2)
		return true;

	/* ... or they intersect. */
	return p1 & p2;
}

/*
 * Returns 1 if update has been found, 0 otherwise.
 */
static int find_matching_signature(void *mc, unsigned int csig, int cpf)
{
	struct microcode_header_intel *mc_hdr = mc;
	struct extended_sigtable *ext_hdr;
	struct extended_signature *ext_sig;
	int i;

	if (cpu_signatures_match(csig, cpf, mc_hdr->sig, mc_hdr->pf))
		return 1;

	/* Look for ext. headers: */
	if (get_totalsize(mc_hdr) <= get_datasize(mc_hdr) + MC_HEADER_SIZE)
		return 0;

	ext_hdr = mc + get_datasize(mc_hdr) + MC_HEADER_SIZE;
	ext_sig = (void *)ext_hdr + EXT_HEADER_SIZE;

	for (i = 0; i < ext_hdr->count; i++) {
		if (cpu_signatures_match(csig, cpf, ext_sig->sig, ext_sig->pf))
			return 1;
		ext_sig++;
	}
	return 0;
}

/*
 * Returns 1 if update has been found, 0 otherwise.
 */
static int has_newer_microcode(void *mc, unsigned int csig, int cpf, int new_rev)
{
	struct microcode_header_intel *mc_hdr = mc;

	if (mc_hdr->rev <= new_rev)
		return 0;

	return find_matching_signature(mc, csig, cpf);
}

106 107 108
/*
 * Given CPU signature and a microcode patch, this function finds if the
 * microcode patch has matching family and model with the CPU.
109 110 111
 *
 * %true - if there's a match
 * %false - otherwise
112
 */
113 114
static bool microcode_matches(struct microcode_header_intel *mc_header,
			      unsigned long sig)
115 116 117
{
	unsigned long total_size = get_totalsize(mc_header);
	unsigned long data_size = get_datasize(mc_header);
118 119
	struct extended_sigtable *ext_header;
	unsigned int fam_ucode, model_ucode;
120
	struct extended_signature *ext_sig;
121 122
	unsigned int fam, model;
	int ext_sigcount, i;
123

124
	fam   = x86_family(sig);
125 126
	model = x86_model(sig);

127
	fam_ucode   = x86_family(mc_header->sig);
128 129 130
	model_ucode = x86_model(mc_header->sig);

	if (fam == fam_ucode && model == model_ucode)
131
		return true;
132 133 134

	/* Look for ext. headers: */
	if (total_size <= data_size + MC_HEADER_SIZE)
135
		return false;
136 137 138 139 140 141

	ext_header   = (void *) mc_header + data_size + MC_HEADER_SIZE;
	ext_sig      = (void *)ext_header + EXT_HEADER_SIZE;
	ext_sigcount = ext_header->count;

	for (i = 0; i < ext_sigcount; i++) {
142
		fam_ucode   = x86_family(ext_sig->sig);
143 144 145
		model_ucode = x86_model(ext_sig->sig);

		if (fam == fam_ucode && model == model_ucode)
146
			return true;
147 148 149

		ext_sig++;
	}
150
	return false;
151 152
}

153
static struct ucode_patch *memdup_patch(void *data, unsigned int size)
154
{
155
	struct ucode_patch *p;
156

157
	p = kzalloc(sizeof(struct ucode_patch), GFP_KERNEL);
158
	if (!p)
159
		return NULL;
160

161 162 163
	p->data = kmemdup(data, size, GFP_KERNEL);
	if (!p->data) {
		kfree(p);
164
		return NULL;
165 166
	}

167
	return p;
168 169
}

170
static void save_microcode_patch(void *data, unsigned int size)
171 172
{
	struct microcode_header_intel *mc_hdr, *mc_saved_hdr;
173
	struct ucode_patch *iter, *tmp, *p = NULL;
174
	bool prev_found = false;
175 176
	unsigned int sig, pf;

177
	mc_hdr = (struct microcode_header_intel *)data;
178

179 180
	list_for_each_entry_safe(iter, tmp, &microcode_cache, plist) {
		mc_saved_hdr = (struct microcode_header_intel *)iter->data;
181 182 183
		sig	     = mc_saved_hdr->sig;
		pf	     = mc_saved_hdr->pf;

184 185
		if (find_matching_signature(data, sig, pf)) {
			prev_found = true;
186

187 188
			if (mc_hdr->rev <= mc_saved_hdr->rev)
				continue;
189

190 191
			p = memdup_patch(data, size);
			if (!p)
192
				pr_err("Error allocating buffer %p\n", data);
193
			else {
194
				list_replace(&iter->plist, &p->plist);
195 196 197
				kfree(iter->data);
				kfree(iter);
			}
198
		}
199 200
	}

201 202 203 204 205
	/*
	 * There weren't any previous patches found in the list cache; save the
	 * newly found.
	 */
	if (!prev_found) {
206 207
		p = memdup_patch(data, size);
		if (!p)
208 209 210 211
			pr_err("Error allocating buffer for %p\n", data);
		else
			list_add_tail(&p->plist, &microcode_cache);
	}
212

213 214 215
	if (!p)
		return;

216 217 218 219 220
	/*
	 * Save for early loading. On 32-bit, that needs to be a physical
	 * address as the APs are running from physical addresses, before
	 * paging has been enabled.
	 */
221 222 223 224
	if (IS_ENABLED(CONFIG_X86_32))
		intel_ucode_patch = (struct microcode_intel *)__pa_nodebug(p->data);
	else
		intel_ucode_patch = p->data;
225 226
}

227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324
static int microcode_sanity_check(void *mc, int print_err)
{
	unsigned long total_size, data_size, ext_table_size;
	struct microcode_header_intel *mc_header = mc;
	struct extended_sigtable *ext_header = NULL;
	u32 sum, orig_sum, ext_sigcount = 0, i;
	struct extended_signature *ext_sig;

	total_size = get_totalsize(mc_header);
	data_size = get_datasize(mc_header);

	if (data_size + MC_HEADER_SIZE > total_size) {
		if (print_err)
			pr_err("Error: bad microcode data file size.\n");
		return -EINVAL;
	}

	if (mc_header->ldrver != 1 || mc_header->hdrver != 1) {
		if (print_err)
			pr_err("Error: invalid/unknown microcode update format.\n");
		return -EINVAL;
	}

	ext_table_size = total_size - (MC_HEADER_SIZE + data_size);
	if (ext_table_size) {
		u32 ext_table_sum = 0;
		u32 *ext_tablep;

		if ((ext_table_size < EXT_HEADER_SIZE)
		 || ((ext_table_size - EXT_HEADER_SIZE) % EXT_SIGNATURE_SIZE)) {
			if (print_err)
				pr_err("Error: truncated extended signature table.\n");
			return -EINVAL;
		}

		ext_header = mc + MC_HEADER_SIZE + data_size;
		if (ext_table_size != exttable_size(ext_header)) {
			if (print_err)
				pr_err("Error: extended signature table size mismatch.\n");
			return -EFAULT;
		}

		ext_sigcount = ext_header->count;

		/*
		 * Check extended table checksum: the sum of all dwords that
		 * comprise a valid table must be 0.
		 */
		ext_tablep = (u32 *)ext_header;

		i = ext_table_size / sizeof(u32);
		while (i--)
			ext_table_sum += ext_tablep[i];

		if (ext_table_sum) {
			if (print_err)
				pr_warn("Bad extended signature table checksum, aborting.\n");
			return -EINVAL;
		}
	}

	/*
	 * Calculate the checksum of update data and header. The checksum of
	 * valid update data and header including the extended signature table
	 * must be 0.
	 */
	orig_sum = 0;
	i = (MC_HEADER_SIZE + data_size) / sizeof(u32);
	while (i--)
		orig_sum += ((u32 *)mc)[i];

	if (orig_sum) {
		if (print_err)
			pr_err("Bad microcode data checksum, aborting.\n");
		return -EINVAL;
	}

	if (!ext_table_size)
		return 0;

	/*
	 * Check extended signature checksum: 0 => valid.
	 */
	for (i = 0; i < ext_sigcount; i++) {
		ext_sig = (void *)ext_header + EXT_HEADER_SIZE +
			  EXT_SIGNATURE_SIZE * i;

		sum = (mc_header->sig + mc_header->pf + mc_header->cksum) -
		      (ext_sig->sig + ext_sig->pf + ext_sig->cksum);
		if (sum) {
			if (print_err)
				pr_err("Bad extended signature checksum, aborting.\n");
			return -EINVAL;
		}
	}
	return 0;
}

325 326 327 328
/*
 * Get microcode matching with BSP's model. Only CPUs with the same model as
 * BSP can stay in the platform.
 */
329 330
static struct microcode_intel *
scan_microcode(void *data, size_t size, struct ucode_cpu_info *uci, bool save)
331
{
332
	struct microcode_header_intel *mc_header;
333
	struct microcode_intel *patch = NULL;
334
	unsigned int mc_size;
335

336 337
	while (size) {
		if (size < sizeof(struct microcode_header_intel))
338 339
			break;

340
		mc_header = (struct microcode_header_intel *)data;
341 342

		mc_size = get_totalsize(mc_header);
343 344 345
		if (!mc_size ||
		    mc_size > size ||
		    microcode_sanity_check(data, 0) < 0)
346 347
			break;

348
		size -= mc_size;
349

350 351
		if (!microcode_matches(mc_header, uci->cpu_sig.sig)) {
			data += mc_size;
352 353 354
			continue;
		}

355 356 357 358
		if (save) {
			save_microcode_patch(data, mc_size);
			goto next;
		}
359 360


361 362 363 364 365 366
		if (!patch) {
			if (!has_newer_microcode(data,
						 uci->cpu_sig.sig,
						 uci->cpu_sig.pf,
						 uci->cpu_sig.rev))
				goto next;
367

368 369 370 371 372 373 374 375 376
		} else {
			struct microcode_header_intel *phdr = &patch->hdr;

			if (!has_newer_microcode(data,
						 phdr->sig,
						 phdr->pf,
						 phdr->rev))
				goto next;
		}
377

378 379
		/* We have a newer patch, save it. */
		patch = data;
380

381 382 383
next:
		data += mc_size;
	}
384

385 386 387 388
	if (size)
		return NULL;

	return patch;
389 390 391 392 393 394
}

static int collect_cpu_info_early(struct ucode_cpu_info *uci)
{
	unsigned int val[2];
	unsigned int family, model;
395
	struct cpu_signature csig = { 0 };
396 397 398 399 400 401 402 403 404
	unsigned int eax, ebx, ecx, edx;

	memset(uci, 0, sizeof(*uci));

	eax = 0x00000001;
	ecx = 0;
	native_cpuid(&eax, &ebx, &ecx, &edx);
	csig.sig = eax;

405 406
	family = x86_family(eax);
	model  = x86_model(eax);
407 408 409 410 411 412 413

	if ((model >= 5) || (family > 6)) {
		/* get processor flags from MSR 0x17 */
		native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
		csig.pf = 1 << ((val[1] >> 18) & 7);
	}

414
	csig.rev = intel_get_microcode_revision();
415 416 417 418 419 420 421 422 423

	uci->cpu_sig = csig;
	uci->valid = 1;

	return 0;
}

static void show_saved_mc(void)
{
424
#ifdef DEBUG
425
	int i = 0, j;
426 427
	unsigned int sig, pf, rev, total_size, data_size, date;
	struct ucode_cpu_info uci;
428
	struct ucode_patch *p;
429

430
	if (list_empty(&microcode_cache)) {
431 432 433 434 435 436
		pr_debug("no microcode data saved.\n");
		return;
	}

	collect_cpu_info_early(&uci);

437 438 439
	sig	= uci.cpu_sig.sig;
	pf	= uci.cpu_sig.pf;
	rev	= uci.cpu_sig.rev;
440 441
	pr_debug("CPU: sig=0x%x, pf=0x%x, rev=0x%x\n", sig, pf, rev);

442
	list_for_each_entry(p, &microcode_cache, plist) {
443 444 445
		struct microcode_header_intel *mc_saved_header;
		struct extended_sigtable *ext_header;
		struct extended_signature *ext_sig;
446 447 448 449 450 451 452 453
		int ext_sigcount;

		mc_saved_header = (struct microcode_header_intel *)p->data;

		sig	= mc_saved_header->sig;
		pf	= mc_saved_header->pf;
		rev	= mc_saved_header->rev;
		date	= mc_saved_header->date;
454

455 456
		total_size	= get_totalsize(mc_saved_header);
		data_size	= get_datasize(mc_saved_header);
457

M
Masanari Iida 已提交
458
		pr_debug("mc_saved[%d]: sig=0x%x, pf=0x%x, rev=0x%x, total size=0x%x, date = %04x-%02x-%02x\n",
459
			 i++, sig, pf, rev, total_size,
460 461 462 463 464 465 466 467
			 date & 0xffff,
			 date >> 24,
			 (date >> 16) & 0xff);

		/* Look for ext. headers: */
		if (total_size <= data_size + MC_HEADER_SIZE)
			continue;

468
		ext_header = (void *)mc_saved_header + data_size + MC_HEADER_SIZE;
469 470 471 472 473 474 475 476 477 478 479 480 481 482
		ext_sigcount = ext_header->count;
		ext_sig = (void *)ext_header + EXT_HEADER_SIZE;

		for (j = 0; j < ext_sigcount; j++) {
			sig = ext_sig->sig;
			pf = ext_sig->pf;

			pr_debug("\tExtended[%d]: sig=0x%x, pf=0x%x\n",
				 j, sig, pf);

			ext_sig++;
		}
	}
#endif
483
}
484 485

/*
486 487
 * Save this microcode patch. It will be loaded early when a CPU is
 * hot-added or resumes.
488
 */
489
static void save_mc_for_early(u8 *mc, unsigned int size)
490
{
491
	/* Synchronization during CPU hotplug. */
492 493
	static DEFINE_MUTEX(x86_cpu_microcode_mutex);

494 495
	mutex_lock(&x86_cpu_microcode_mutex);

496
	save_microcode_patch(mc, size);
497 498 499
	show_saved_mc();

	mutex_unlock(&x86_cpu_microcode_mutex);
500
}
501

502
static bool load_builtin_intel_microcode(struct cpio_data *cp)
503
{
504
	unsigned int eax = 1, ebx, ecx = 0, edx;
505 506
	char name[30];

507 508 509
	if (IS_ENABLED(CONFIG_X86_32))
		return false;

510 511
	native_cpuid(&eax, &ebx, &ecx, &edx);

512 513
	sprintf(name, "intel-ucode/%02x-%02x-%02x",
		      x86_family(eax), x86_model(eax), x86_stepping(eax));
514 515 516 517 518 519 520 521 522 523

	return get_builtin_firmware(cp, name);
}

/*
 * Print ucode update info.
 */
static void
print_ucode_info(struct ucode_cpu_info *uci, unsigned int date)
{
524 525 526 527 528
	pr_info_once("microcode updated early to revision 0x%x, date = %04x-%02x-%02x\n",
		     uci->cpu_sig.rev,
		     date & 0xffff,
		     date >> 24,
		     (date >> 16) & 0xff);
529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550
}

#ifdef CONFIG_X86_32

static int delay_ucode_info;
static int current_mc_date;

/*
 * Print early updated ucode info after printk works. This is delayed info dump.
 */
void show_ucode_info_early(void)
{
	struct ucode_cpu_info uci;

	if (delay_ucode_info) {
		collect_cpu_info_early(&uci);
		print_ucode_info(&uci, current_mc_date);
		delay_ucode_info = 0;
	}
}

/*
551
 * At this point, we can not call printk() yet. Delay printing microcode info in
552 553 554 555
 * show_ucode_info_early() until printk() works.
 */
static void print_ucode(struct ucode_cpu_info *uci)
{
556
	struct microcode_intel *mc;
557 558 559
	int *delay_ucode_info_p;
	int *current_mc_date_p;

560 561
	mc = uci->mc;
	if (!mc)
562 563 564 565 566 567
		return;

	delay_ucode_info_p = (int *)__pa_nodebug(&delay_ucode_info);
	current_mc_date_p = (int *)__pa_nodebug(&current_mc_date);

	*delay_ucode_info_p = 1;
568
	*current_mc_date_p = mc->hdr.date;
569 570 571 572 573
}
#else

static inline void print_ucode(struct ucode_cpu_info *uci)
{
574
	struct microcode_intel *mc;
575

576 577
	mc = uci->mc;
	if (!mc)
578 579
		return;

580
	print_ucode_info(uci, mc->hdr.date);
581 582 583 584 585
}
#endif

static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
{
586
	struct microcode_intel *mc;
587
	u32 rev;
588

589 590
	mc = uci->mc;
	if (!mc)
591 592
		return 0;

593 594 595 596 597 598 599 600 601 602 603
	/*
	 * Save us the MSR write below - which is a particular expensive
	 * operation - when the other hyperthread has updated the microcode
	 * already.
	 */
	rev = intel_get_microcode_revision();
	if (rev >= mc->hdr.rev) {
		uci->cpu_sig.rev = rev;
		return UCODE_OK;
	}

604 605 606 607 608 609
	/*
	 * Writeback and invalidate caches before updating microcode to avoid
	 * internal issues depending on what the microcode is updating.
	 */
	native_wbinvd();

610
	/* write microcode via MSR 0x79 */
611
	native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
612

613 614
	rev = intel_get_microcode_revision();
	if (rev != mc->hdr.rev)
615 616
		return -1;

617
	uci->cpu_sig.rev = rev;
618 619 620 621

	if (early)
		print_ucode(uci);
	else
622
		print_ucode_info(uci, mc->hdr.date);
623 624 625 626 627 628

	return 0;
}

int __init save_microcode_in_initrd_intel(void)
{
629 630
	struct ucode_cpu_info uci;
	struct cpio_data cp;
631

632 633 634 635 636 637 638 639
	/*
	 * initrd is going away, clear patch ptr. We will scan the microcode one
	 * last time before jettisoning and save a patch, if found. Then we will
	 * update that pointer too, with a stable patch address to use when
	 * resuming the cores.
	 */
	intel_ucode_patch = NULL;

640 641
	if (!load_builtin_intel_microcode(&cp))
		cp = find_microcode_in_initrd(ucode_path, false);
642

643 644
	if (!(cp.data && cp.size))
		return 0;
645

646
	collect_cpu_info_early(&uci);
647

648
	scan_microcode(cp.data, cp.size, &uci, true);
649

650
	show_saved_mc();
651

652 653
	return 0;
}
654

655 656 657 658 659 660 661 662
/*
 * @res_patch, output: a pointer to the patch we found.
 */
static struct microcode_intel *__load_ucode_intel(struct ucode_cpu_info *uci)
{
	static const char *path;
	struct cpio_data cp;
	bool use_pa;
663

664 665 666 667 668 669
	if (IS_ENABLED(CONFIG_X86_32)) {
		path	  = (const char *)__pa_nodebug(ucode_path);
		use_pa	  = true;
	} else {
		path	  = ucode_path;
		use_pa	  = false;
670 671
	}

672 673 674
	/* try built-in microcode first */
	if (!load_builtin_intel_microcode(&cp))
		cp = find_microcode_in_initrd(path, use_pa);
675

676 677
	if (!(cp.data && cp.size))
		return NULL;
678

679
	collect_cpu_info_early(uci);
680

681
	return scan_microcode(cp.data, cp.size, uci, false);
682 683
}

684
void __init load_ucode_intel_bsp(void)
685
{
686
	struct microcode_intel *patch;
687 688
	struct ucode_cpu_info uci;

689 690
	patch = __load_ucode_intel(&uci);
	if (!patch)
691 692
		return;

693
	uci.mc = patch;
694 695 696 697

	apply_microcode_early(&uci, true);
}

698
void load_ucode_intel_ap(void)
699
{
700 701
	struct microcode_intel *patch, **iup;
	struct ucode_cpu_info uci;
702

703 704 705 706 707 708 709 710 711 712
	if (IS_ENABLED(CONFIG_X86_32))
		iup = (struct microcode_intel **) __pa_nodebug(&intel_ucode_patch);
	else
		iup = &intel_ucode_patch;

reget:
	if (!*iup) {
		patch = __load_ucode_intel(&uci);
		if (!patch)
			return;
713

714 715 716 717 718 719 720 721 722 723 724
		*iup = patch;
	}

	uci.mc = *iup;

	if (apply_microcode_early(&uci, true)) {
		/* Mixed-silicon system? Try to refetch the proper patch: */
		*iup = NULL;

		goto reget;
	}
725 726
}

727
static struct microcode_intel *find_patch(struct ucode_cpu_info *uci)
728
{
729 730
	struct microcode_header_intel *phdr;
	struct ucode_patch *iter, *tmp;
731

732
	list_for_each_entry_safe(iter, tmp, &microcode_cache, plist) {
733

734
		phdr = (struct microcode_header_intel *)iter->data;
735

736 737
		if (phdr->rev <= uci->cpu_sig.rev)
			continue;
738

739 740 741 742
		if (!find_matching_signature(phdr,
					     uci->cpu_sig.sig,
					     uci->cpu_sig.pf))
			continue;
743

744 745 746
		return iter->data;
	}
	return NULL;
747 748 749 750
}

void reload_ucode_intel(void)
{
751
	struct microcode_intel *p;
752 753 754 755
	struct ucode_cpu_info uci;

	collect_cpu_info_early(&uci);

756 757
	p = find_patch(&uci);
	if (!p)
758 759
		return;

760 761
	uci.mc = p;

762 763 764
	apply_microcode_early(&uci, false);
}

765
static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)
L
Linus Torvalds 已提交
766
{
767
	static struct cpu_signature prev;
768
	struct cpuinfo_x86 *c = &cpu_data(cpu_num);
L
Linus Torvalds 已提交
769 770
	unsigned int val[2];

771
	memset(csig, 0, sizeof(*csig));
L
Linus Torvalds 已提交
772

773
	csig->sig = cpuid_eax(0x00000001);
774 775 776 777

	if ((c->x86_model >= 5) || (c->x86 > 6)) {
		/* get processor flags from MSR 0x17 */
		rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
778
		csig->pf = 1 << ((val[1] >> 18) & 7);
L
Linus Torvalds 已提交
779 780
	}

781
	csig->rev = c->microcode;
782 783 784 785 786 787 788

	/* No extra locking on prev, races are harmless. */
	if (csig->sig != prev.sig || csig->pf != prev.pf || csig->rev != prev.rev) {
		pr_info("sig=0x%x, pf=0x%x, revision=0x%x\n",
			csig->sig, csig->pf, csig->rev);
		prev = *csig;
	}
789 790

	return 0;
L
Linus Torvalds 已提交
791 792
}

793
static enum ucode_state apply_microcode_intel(int cpu)
L
Linus Torvalds 已提交
794
{
795
	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
796
	struct cpuinfo_x86 *c = &cpu_data(cpu);
797
	struct microcode_intel *mc;
798
	static int prev_rev;
799
	u32 rev;
I
Ingo Molnar 已提交
800

801
	/* We should bind the task to the CPU */
802
	if (WARN_ON(raw_smp_processor_id() != cpu))
803
		return UCODE_ERROR;
804

805 806
	/* Look for a newer patch in our cache: */
	mc = find_patch(uci);
807
	if (!mc) {
808
		mc = uci->mc;
809
		if (!mc)
810
			return UCODE_NFOUND;
811
	}
812

813 814 815 816 817 818 819 820 821 822 823 824
	/*
	 * Save us the MSR write below - which is a particular expensive
	 * operation - when the other hyperthread has updated the microcode
	 * already.
	 */
	rev = intel_get_microcode_revision();
	if (rev >= mc->hdr.rev) {
		uci->cpu_sig.rev = rev;
		c->microcode = rev;
		return UCODE_OK;
	}

825 826 827 828 829 830
	/*
	 * Writeback and invalidate caches before updating microcode to avoid
	 * internal issues depending on what the microcode is updating.
	 */
	native_wbinvd();

L
Linus Torvalds 已提交
831
	/* write microcode via MSR 0x79 */
832
	wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
833

834
	rev = intel_get_microcode_revision();
L
Linus Torvalds 已提交
835

836
	if (rev != mc->hdr.rev) {
837
		pr_err("CPU%d update to revision 0x%x failed\n",
838
		       cpu, mc->hdr.rev);
839
		return UCODE_ERROR;
840
	}
841

842
	if (rev != prev_rev) {
843
		pr_info("updated to revision 0x%x, date = %04x-%02x-%02x\n",
844
			rev,
845 846 847
			mc->hdr.date & 0xffff,
			mc->hdr.date >> 24,
			(mc->hdr.date >> 16) & 0xff);
848
		prev_rev = rev;
849
	}
I
Ingo Molnar 已提交
850

851 852
	uci->cpu_sig.rev = rev;
	c->microcode = rev;
853

854 855 856 857
	/* Update boot_cpu_data's revision too, if we're on the BSP: */
	if (c->cpu_index == boot_cpu_data.cpu_index)
		boot_cpu_data.microcode = rev;

858
	return UCODE_UPDATED;
L
Linus Torvalds 已提交
859 860
}

861 862
static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
				int (*get_ucode_data)(void *, const void *, size_t))
863
{
D
Dmitry Adamushko 已提交
864
	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
865
	u8 *ucode_ptr = data, *new_mc = NULL, *mc = NULL;
D
Dmitry Adamushko 已提交
866 867
	int new_rev = uci->cpu_sig.rev;
	unsigned int leftover = size;
868
	unsigned int curr_mc_size = 0, new_mc_size = 0;
869
	unsigned int csig, cpf;
870
	enum ucode_state ret = UCODE_OK;
871

D
Dmitry Adamushko 已提交
872 873 874
	while (leftover) {
		struct microcode_header_intel mc_header;
		unsigned int mc_size;
875

876 877 878 879 880
		if (leftover < sizeof(mc_header)) {
			pr_err("error! Truncated header in microcode data file\n");
			break;
		}

D
Dmitry Adamushko 已提交
881 882
		if (get_ucode_data(&mc_header, ucode_ptr, sizeof(mc_header)))
			break;
883

D
Dmitry Adamushko 已提交
884 885
		mc_size = get_totalsize(&mc_header);
		if (!mc_size || mc_size > leftover) {
886
			pr_err("error! Bad data in microcode data file\n");
D
Dmitry Adamushko 已提交
887 888
			break;
		}
889

890 891
		/* For performance reasons, reuse mc area when possible */
		if (!mc || mc_size > curr_mc_size) {
892
			vfree(mc);
893 894 895 896 897
			mc = vmalloc(mc_size);
			if (!mc)
				break;
			curr_mc_size = mc_size;
		}
D
Dmitry Adamushko 已提交
898 899

		if (get_ucode_data(mc, ucode_ptr, mc_size) ||
900
		    microcode_sanity_check(mc, 1) < 0) {
D
Dmitry Adamushko 已提交
901 902 903
			break;
		}

904 905
		csig = uci->cpu_sig.sig;
		cpf = uci->cpu_sig.pf;
906
		if (has_newer_microcode(mc, csig, cpf, new_rev)) {
907
			vfree(new_mc);
D
Dmitry Adamushko 已提交
908 909
			new_rev = mc_header.rev;
			new_mc  = mc;
910
			new_mc_size = mc_size;
911
			mc = NULL;	/* trigger new vmalloc */
912
			ret = UCODE_NEW;
913
		}
D
Dmitry Adamushko 已提交
914 915 916

		ucode_ptr += mc_size;
		leftover  -= mc_size;
917 918
	}

919
	vfree(mc);
920

921
	if (leftover) {
922
		vfree(new_mc);
923
		return UCODE_ERROR;
924
	}
I
Ingo Molnar 已提交
925

926 927
	if (!new_mc)
		return UCODE_NFOUND;
D
Dmitry Adamushko 已提交
928

929
	vfree(uci->mc);
I
Ingo Molnar 已提交
930 931
	uci->mc = (struct microcode_intel *)new_mc;

932 933 934 935 936
	/*
	 * If early loading microcode is supported, save this mc into
	 * permanent memory. So it will be loaded early when a CPU is hot added
	 * or resumes.
	 */
937
	save_mc_for_early(new_mc, new_mc_size);
938

939 940
	pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
		 cpu, new_rev, uci->cpu_sig.rev);
941

942
	return ret;
943 944
}

D
Dmitry Adamushko 已提交
945 946 947 948 949
static int get_ucode_fw(void *to, const void *from, size_t n)
{
	memcpy(to, from, n);
	return 0;
}
950

951 952 953 954
static bool is_blacklisted(unsigned int cpu)
{
	struct cpuinfo_x86 *c = &cpu_data(cpu);

955 956
	/*
	 * Late loading on model 79 with microcode revision less than 0x0b000021
957 958 959
	 * and LLC size per core bigger than 2.5MB may result in a system hang.
	 * This behavior is documented in item BDF90, #334165 (Intel Xeon
	 * Processor E7-8800/4800 v4 Product Family).
960 961 962
	 */
	if (c->x86 == 6 &&
	    c->x86_model == INTEL_FAM6_BROADWELL_X &&
963
	    c->x86_stepping == 0x01 &&
964
	    llc_size_per_core > 2621440 &&
965 966 967
	    c->microcode < 0x0b000021) {
		pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode);
		pr_err_once("Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
968 969 970 971 972 973
		return true;
	}

	return false;
}

974 975
static enum ucode_state request_microcode_fw(int cpu, struct device *device,
					     bool refresh_fw)
976 977
{
	char name[30];
978
	struct cpuinfo_x86 *c = &cpu_data(cpu);
979
	const struct firmware *firmware;
980
	enum ucode_state ret;
981

982 983 984
	if (is_blacklisted(cpu))
		return UCODE_NFOUND;

P
Peter Oruba 已提交
985
	sprintf(name, "intel-ucode/%02x-%02x-%02x",
986
		c->x86, c->x86_model, c->x86_stepping);
987

988
	if (request_firmware_direct(&firmware, name, device)) {
989
		pr_debug("data file %s load failed\n", name);
990
		return UCODE_NFOUND;
991
	}
D
Dmitry Adamushko 已提交
992

993 994
	ret = generic_load_microcode(cpu, (void *)firmware->data,
				     firmware->size, &get_ucode_fw);
D
Dmitry Adamushko 已提交
995

996 997
	release_firmware(firmware);

D
Dmitry Adamushko 已提交
998 999 1000 1001 1002 1003 1004 1005
	return ret;
}

static int get_ucode_user(void *to, const void *from, size_t n)
{
	return copy_from_user(to, from, n);
}

1006 1007
static enum ucode_state
request_microcode_user(int cpu, const void __user *buf, size_t size)
D
Dmitry Adamushko 已提交
1008
{
1009 1010 1011
	if (is_blacklisted(cpu))
		return UCODE_NFOUND;

1012
	return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
1013 1014
}

H
Hannes Eder 已提交
1015
static struct microcode_ops microcode_intel_ops = {
D
Dmitry Adamushko 已提交
1016 1017
	.request_microcode_user		  = request_microcode_user,
	.request_microcode_fw             = request_microcode_fw,
P
Peter Oruba 已提交
1018
	.collect_cpu_info                 = collect_cpu_info,
1019
	.apply_microcode                  = apply_microcode_intel,
P
Peter Oruba 已提交
1020 1021
};

1022 1023
static int __init calc_llc_size_per_core(struct cpuinfo_x86 *c)
{
1024
	u64 llc_size = c->x86_cache_size * 1024ULL;
1025 1026 1027 1028 1029 1030

	do_div(llc_size, c->x86_max_cores);

	return (int)llc_size;
}

1031
struct microcode_ops * __init init_intel_microcode(void)
P
Peter Oruba 已提交
1032
{
1033
	struct cpuinfo_x86 *c = &boot_cpu_data;
1034 1035 1036 1037 1038 1039 1040

	if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 ||
	    cpu_has(c, X86_FEATURE_IA64)) {
		pr_err("Intel CPU family 0x%x not supported\n", c->x86);
		return NULL;
	}

1041 1042
	llc_size_per_core = calc_llc_size_per_core(c);

1043
	return &microcode_intel_ops;
P
Peter Oruba 已提交
1044
}