intel.c 21.9 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2
 * Intel CPU Microcode Update Driver for Linux
L
Linus Torvalds 已提交
3
 *
A
Andrew Morton 已提交
4
 * Copyright (C) 2000-2006 Tigran Aivazian <aivazian.tigran@gmail.com>
5
 *		 2006 Shaohua Li <shaohua.li@intel.com>
L
Linus Torvalds 已提交
6
 *
7 8 9 10 11
 * Intel CPU microcode early update for Linux
 *
 * Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com>
 *		      H Peter Anvin" <hpa@zytor.com>
 *
12 13 14 15
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
L
Linus Torvalds 已提交
16
 */
17

18 19 20 21 22 23
/*
 * This needs to be before all headers so that pr_debug in printk.h doesn't turn
 * printk calls into no_printk().
 *
 *#define DEBUG
 */
24
#define pr_fmt(fmt) "microcode: " fmt
25

26
#include <linux/earlycpio.h>
I
Ingo Molnar 已提交
27 28
#include <linux/firmware.h>
#include <linux/uaccess.h>
29 30
#include <linux/vmalloc.h>
#include <linux/initrd.h>
I
Ingo Molnar 已提交
31
#include <linux/kernel.h>
32 33 34
#include <linux/slab.h>
#include <linux/cpu.h>
#include <linux/mm.h>
L
Linus Torvalds 已提交
35

36
#include <asm/microcode_intel.h>
37
#include <asm/intel-family.h>
I
Ingo Molnar 已提交
38
#include <asm/processor.h>
39 40
#include <asm/tlbflush.h>
#include <asm/setup.h>
I
Ingo Molnar 已提交
41
#include <asm/msr.h>
L
Linus Torvalds 已提交
42

43
static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin";
44

45
/* Current microcode patch used in early patching on the APs. */
46
static struct microcode_intel *intel_ucode_patch;
47

48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102
static inline bool cpu_signatures_match(unsigned int s1, unsigned int p1,
					unsigned int s2, unsigned int p2)
{
	if (s1 != s2)
		return false;

	/* Processor flags are either both 0 ... */
	if (!p1 && !p2)
		return true;

	/* ... or they intersect. */
	return p1 & p2;
}

/*
 * Returns 1 if update has been found, 0 otherwise.
 */
static int find_matching_signature(void *mc, unsigned int csig, int cpf)
{
	struct microcode_header_intel *mc_hdr = mc;
	struct extended_sigtable *ext_hdr;
	struct extended_signature *ext_sig;
	int i;

	if (cpu_signatures_match(csig, cpf, mc_hdr->sig, mc_hdr->pf))
		return 1;

	/* Look for ext. headers: */
	if (get_totalsize(mc_hdr) <= get_datasize(mc_hdr) + MC_HEADER_SIZE)
		return 0;

	ext_hdr = mc + get_datasize(mc_hdr) + MC_HEADER_SIZE;
	ext_sig = (void *)ext_hdr + EXT_HEADER_SIZE;

	for (i = 0; i < ext_hdr->count; i++) {
		if (cpu_signatures_match(csig, cpf, ext_sig->sig, ext_sig->pf))
			return 1;
		ext_sig++;
	}
	return 0;
}

/*
 * Returns 1 if update has been found, 0 otherwise.
 */
static int has_newer_microcode(void *mc, unsigned int csig, int cpf, int new_rev)
{
	struct microcode_header_intel *mc_hdr = mc;

	if (mc_hdr->rev <= new_rev)
		return 0;

	return find_matching_signature(mc, csig, cpf);
}

103 104 105
/*
 * Given CPU signature and a microcode patch, this function finds if the
 * microcode patch has matching family and model with the CPU.
106 107 108
 *
 * %true - if there's a match
 * %false - otherwise
109
 */
110 111
static bool microcode_matches(struct microcode_header_intel *mc_header,
			      unsigned long sig)
112 113 114
{
	unsigned long total_size = get_totalsize(mc_header);
	unsigned long data_size = get_datasize(mc_header);
115 116
	struct extended_sigtable *ext_header;
	unsigned int fam_ucode, model_ucode;
117
	struct extended_signature *ext_sig;
118 119
	unsigned int fam, model;
	int ext_sigcount, i;
120

121
	fam   = x86_family(sig);
122 123
	model = x86_model(sig);

124
	fam_ucode   = x86_family(mc_header->sig);
125 126 127
	model_ucode = x86_model(mc_header->sig);

	if (fam == fam_ucode && model == model_ucode)
128
		return true;
129 130 131

	/* Look for ext. headers: */
	if (total_size <= data_size + MC_HEADER_SIZE)
132
		return false;
133 134 135 136 137 138

	ext_header   = (void *) mc_header + data_size + MC_HEADER_SIZE;
	ext_sig      = (void *)ext_header + EXT_HEADER_SIZE;
	ext_sigcount = ext_header->count;

	for (i = 0; i < ext_sigcount; i++) {
139
		fam_ucode   = x86_family(ext_sig->sig);
140 141 142
		model_ucode = x86_model(ext_sig->sig);

		if (fam == fam_ucode && model == model_ucode)
143
			return true;
144 145 146

		ext_sig++;
	}
147
	return false;
148 149
}

150
static struct ucode_patch *memdup_patch(void *data, unsigned int size)
151
{
152
	struct ucode_patch *p;
153

154
	p = kzalloc(sizeof(struct ucode_patch), GFP_KERNEL);
155
	if (!p)
156
		return NULL;
157

158 159 160
	p->data = kmemdup(data, size, GFP_KERNEL);
	if (!p->data) {
		kfree(p);
161
		return NULL;
162 163
	}

164
	return p;
165 166
}

167
static void save_microcode_patch(void *data, unsigned int size)
168 169
{
	struct microcode_header_intel *mc_hdr, *mc_saved_hdr;
170
	struct ucode_patch *iter, *tmp, *p = NULL;
171
	bool prev_found = false;
172 173
	unsigned int sig, pf;

174
	mc_hdr = (struct microcode_header_intel *)data;
175

176 177
	list_for_each_entry_safe(iter, tmp, &microcode_cache, plist) {
		mc_saved_hdr = (struct microcode_header_intel *)iter->data;
178 179 180
		sig	     = mc_saved_hdr->sig;
		pf	     = mc_saved_hdr->pf;

181 182
		if (find_matching_signature(data, sig, pf)) {
			prev_found = true;
183

184 185
			if (mc_hdr->rev <= mc_saved_hdr->rev)
				continue;
186

187 188
			p = memdup_patch(data, size);
			if (!p)
189 190 191 192
				pr_err("Error allocating buffer %p\n", data);
			else
				list_replace(&iter->plist, &p->plist);
		}
193 194
	}

195 196 197 198 199
	/*
	 * There weren't any previous patches found in the list cache; save the
	 * newly found.
	 */
	if (!prev_found) {
200 201
		p = memdup_patch(data, size);
		if (!p)
202 203 204 205
			pr_err("Error allocating buffer for %p\n", data);
		else
			list_add_tail(&p->plist, &microcode_cache);
	}
206

207 208 209
	if (!p)
		return;

210 211 212 213 214
	/*
	 * Save for early loading. On 32-bit, that needs to be a physical
	 * address as the APs are running from physical addresses, before
	 * paging has been enabled.
	 */
215 216 217 218
	if (IS_ENABLED(CONFIG_X86_32))
		intel_ucode_patch = (struct microcode_intel *)__pa_nodebug(p->data);
	else
		intel_ucode_patch = p->data;
219 220
}

221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318
static int microcode_sanity_check(void *mc, int print_err)
{
	unsigned long total_size, data_size, ext_table_size;
	struct microcode_header_intel *mc_header = mc;
	struct extended_sigtable *ext_header = NULL;
	u32 sum, orig_sum, ext_sigcount = 0, i;
	struct extended_signature *ext_sig;

	total_size = get_totalsize(mc_header);
	data_size = get_datasize(mc_header);

	if (data_size + MC_HEADER_SIZE > total_size) {
		if (print_err)
			pr_err("Error: bad microcode data file size.\n");
		return -EINVAL;
	}

	if (mc_header->ldrver != 1 || mc_header->hdrver != 1) {
		if (print_err)
			pr_err("Error: invalid/unknown microcode update format.\n");
		return -EINVAL;
	}

	ext_table_size = total_size - (MC_HEADER_SIZE + data_size);
	if (ext_table_size) {
		u32 ext_table_sum = 0;
		u32 *ext_tablep;

		if ((ext_table_size < EXT_HEADER_SIZE)
		 || ((ext_table_size - EXT_HEADER_SIZE) % EXT_SIGNATURE_SIZE)) {
			if (print_err)
				pr_err("Error: truncated extended signature table.\n");
			return -EINVAL;
		}

		ext_header = mc + MC_HEADER_SIZE + data_size;
		if (ext_table_size != exttable_size(ext_header)) {
			if (print_err)
				pr_err("Error: extended signature table size mismatch.\n");
			return -EFAULT;
		}

		ext_sigcount = ext_header->count;

		/*
		 * Check extended table checksum: the sum of all dwords that
		 * comprise a valid table must be 0.
		 */
		ext_tablep = (u32 *)ext_header;

		i = ext_table_size / sizeof(u32);
		while (i--)
			ext_table_sum += ext_tablep[i];

		if (ext_table_sum) {
			if (print_err)
				pr_warn("Bad extended signature table checksum, aborting.\n");
			return -EINVAL;
		}
	}

	/*
	 * Calculate the checksum of update data and header. The checksum of
	 * valid update data and header including the extended signature table
	 * must be 0.
	 */
	orig_sum = 0;
	i = (MC_HEADER_SIZE + data_size) / sizeof(u32);
	while (i--)
		orig_sum += ((u32 *)mc)[i];

	if (orig_sum) {
		if (print_err)
			pr_err("Bad microcode data checksum, aborting.\n");
		return -EINVAL;
	}

	if (!ext_table_size)
		return 0;

	/*
	 * Check extended signature checksum: 0 => valid.
	 */
	for (i = 0; i < ext_sigcount; i++) {
		ext_sig = (void *)ext_header + EXT_HEADER_SIZE +
			  EXT_SIGNATURE_SIZE * i;

		sum = (mc_header->sig + mc_header->pf + mc_header->cksum) -
		      (ext_sig->sig + ext_sig->pf + ext_sig->cksum);
		if (sum) {
			if (print_err)
				pr_err("Bad extended signature checksum, aborting.\n");
			return -EINVAL;
		}
	}
	return 0;
}

319 320 321 322
/*
 * Get microcode matching with BSP's model. Only CPUs with the same model as
 * BSP can stay in the platform.
 */
323 324
static struct microcode_intel *
scan_microcode(void *data, size_t size, struct ucode_cpu_info *uci, bool save)
325
{
326
	struct microcode_header_intel *mc_header;
327
	struct microcode_intel *patch = NULL;
328
	unsigned int mc_size;
329

330 331
	while (size) {
		if (size < sizeof(struct microcode_header_intel))
332 333
			break;

334
		mc_header = (struct microcode_header_intel *)data;
335 336

		mc_size = get_totalsize(mc_header);
337 338 339
		if (!mc_size ||
		    mc_size > size ||
		    microcode_sanity_check(data, 0) < 0)
340 341
			break;

342
		size -= mc_size;
343

344 345
		if (!microcode_matches(mc_header, uci->cpu_sig.sig)) {
			data += mc_size;
346 347 348
			continue;
		}

349 350 351 352
		if (save) {
			save_microcode_patch(data, mc_size);
			goto next;
		}
353 354


355 356 357 358 359 360
		if (!patch) {
			if (!has_newer_microcode(data,
						 uci->cpu_sig.sig,
						 uci->cpu_sig.pf,
						 uci->cpu_sig.rev))
				goto next;
361

362 363 364 365 366 367 368 369 370
		} else {
			struct microcode_header_intel *phdr = &patch->hdr;

			if (!has_newer_microcode(data,
						 phdr->sig,
						 phdr->pf,
						 phdr->rev))
				goto next;
		}
371

372 373
		/* We have a newer patch, save it. */
		patch = data;
374

375 376 377
next:
		data += mc_size;
	}
378

379 380 381 382
	if (size)
		return NULL;

	return patch;
383 384 385 386 387 388
}

static int collect_cpu_info_early(struct ucode_cpu_info *uci)
{
	unsigned int val[2];
	unsigned int family, model;
389
	struct cpu_signature csig = { 0 };
390 391 392 393 394 395 396 397 398
	unsigned int eax, ebx, ecx, edx;

	memset(uci, 0, sizeof(*uci));

	eax = 0x00000001;
	ecx = 0;
	native_cpuid(&eax, &ebx, &ecx, &edx);
	csig.sig = eax;

399 400
	family = x86_family(eax);
	model  = x86_model(eax);
401 402 403 404 405 406 407

	if ((model >= 5) || (family > 6)) {
		/* get processor flags from MSR 0x17 */
		native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
		csig.pf = 1 << ((val[1] >> 18) & 7);
	}

408
	csig.rev = intel_get_microcode_revision();
409 410 411 412 413 414 415 416 417

	uci->cpu_sig = csig;
	uci->valid = 1;

	return 0;
}

static void show_saved_mc(void)
{
418
#ifdef DEBUG
419
	int i = 0, j;
420 421
	unsigned int sig, pf, rev, total_size, data_size, date;
	struct ucode_cpu_info uci;
422
	struct ucode_patch *p;
423

424
	if (list_empty(&microcode_cache)) {
425 426 427 428 429 430
		pr_debug("no microcode data saved.\n");
		return;
	}

	collect_cpu_info_early(&uci);

431 432 433
	sig	= uci.cpu_sig.sig;
	pf	= uci.cpu_sig.pf;
	rev	= uci.cpu_sig.rev;
434 435
	pr_debug("CPU: sig=0x%x, pf=0x%x, rev=0x%x\n", sig, pf, rev);

436
	list_for_each_entry(p, &microcode_cache, plist) {
437 438 439
		struct microcode_header_intel *mc_saved_header;
		struct extended_sigtable *ext_header;
		struct extended_signature *ext_sig;
440 441 442 443 444 445 446 447
		int ext_sigcount;

		mc_saved_header = (struct microcode_header_intel *)p->data;

		sig	= mc_saved_header->sig;
		pf	= mc_saved_header->pf;
		rev	= mc_saved_header->rev;
		date	= mc_saved_header->date;
448

449 450
		total_size	= get_totalsize(mc_saved_header);
		data_size	= get_datasize(mc_saved_header);
451

M
Masanari Iida 已提交
452
		pr_debug("mc_saved[%d]: sig=0x%x, pf=0x%x, rev=0x%x, total size=0x%x, date = %04x-%02x-%02x\n",
453
			 i++, sig, pf, rev, total_size,
454 455 456 457 458 459 460 461
			 date & 0xffff,
			 date >> 24,
			 (date >> 16) & 0xff);

		/* Look for ext. headers: */
		if (total_size <= data_size + MC_HEADER_SIZE)
			continue;

462
		ext_header = (void *)mc_saved_header + data_size + MC_HEADER_SIZE;
463 464 465 466 467 468 469 470 471 472 473 474 475 476
		ext_sigcount = ext_header->count;
		ext_sig = (void *)ext_header + EXT_HEADER_SIZE;

		for (j = 0; j < ext_sigcount; j++) {
			sig = ext_sig->sig;
			pf = ext_sig->pf;

			pr_debug("\tExtended[%d]: sig=0x%x, pf=0x%x\n",
				 j, sig, pf);

			ext_sig++;
		}
	}
#endif
477
}
478 479

/*
480 481
 * Save this microcode patch. It will be loaded early when a CPU is
 * hot-added or resumes.
482
 */
483
static void save_mc_for_early(u8 *mc, unsigned int size)
484
{
485
#ifdef CONFIG_HOTPLUG_CPU
486
	/* Synchronization during CPU hotplug. */
487 488
	static DEFINE_MUTEX(x86_cpu_microcode_mutex);

489 490
	mutex_lock(&x86_cpu_microcode_mutex);

491
	save_microcode_patch(mc, size);
492 493 494 495
	show_saved_mc();

	mutex_unlock(&x86_cpu_microcode_mutex);
#endif
496
}
497

498
static bool load_builtin_intel_microcode(struct cpio_data *cp)
499
{
500
	unsigned int eax = 1, ebx, ecx = 0, edx;
501 502
	char name[30];

503 504 505
	if (IS_ENABLED(CONFIG_X86_32))
		return false;

506 507
	native_cpuid(&eax, &ebx, &ecx, &edx);

508 509
	sprintf(name, "intel-ucode/%02x-%02x-%02x",
		      x86_family(eax), x86_model(eax), x86_stepping(eax));
510 511 512 513 514 515 516 517 518 519

	return get_builtin_firmware(cp, name);
}

/*
 * Print ucode update info.
 */
static void
print_ucode_info(struct ucode_cpu_info *uci, unsigned int date)
{
520 521 522 523 524
	pr_info_once("microcode updated early to revision 0x%x, date = %04x-%02x-%02x\n",
		     uci->cpu_sig.rev,
		     date & 0xffff,
		     date >> 24,
		     (date >> 16) & 0xff);
525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546
}

#ifdef CONFIG_X86_32

static int delay_ucode_info;
static int current_mc_date;

/*
 * Print early updated ucode info after printk works. This is delayed info dump.
 */
void show_ucode_info_early(void)
{
	struct ucode_cpu_info uci;

	if (delay_ucode_info) {
		collect_cpu_info_early(&uci);
		print_ucode_info(&uci, current_mc_date);
		delay_ucode_info = 0;
	}
}

/*
547
 * At this point, we can not call printk() yet. Delay printing microcode info in
548 549 550 551
 * show_ucode_info_early() until printk() works.
 */
static void print_ucode(struct ucode_cpu_info *uci)
{
552
	struct microcode_intel *mc;
553 554 555
	int *delay_ucode_info_p;
	int *current_mc_date_p;

556 557
	mc = uci->mc;
	if (!mc)
558 559 560 561 562 563
		return;

	delay_ucode_info_p = (int *)__pa_nodebug(&delay_ucode_info);
	current_mc_date_p = (int *)__pa_nodebug(&current_mc_date);

	*delay_ucode_info_p = 1;
564
	*current_mc_date_p = mc->hdr.date;
565 566 567 568 569
}
#else

static inline void print_ucode(struct ucode_cpu_info *uci)
{
570
	struct microcode_intel *mc;
571

572 573
	mc = uci->mc;
	if (!mc)
574 575
		return;

576
	print_ucode_info(uci, mc->hdr.date);
577 578 579 580 581
}
#endif

static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
{
582
	struct microcode_intel *mc;
583
	u32 rev;
584

585 586
	mc = uci->mc;
	if (!mc)
587 588 589
		return 0;

	/* write microcode via MSR 0x79 */
590
	native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
591

592 593
	rev = intel_get_microcode_revision();
	if (rev != mc->hdr.rev)
594 595
		return -1;

596
	uci->cpu_sig.rev = rev;
597 598 599 600

	if (early)
		print_ucode(uci);
	else
601
		print_ucode_info(uci, mc->hdr.date);
602 603 604 605 606 607

	return 0;
}

int __init save_microcode_in_initrd_intel(void)
{
608 609
	struct ucode_cpu_info uci;
	struct cpio_data cp;
610

611 612 613 614 615 616 617 618
	/*
	 * initrd is going away, clear patch ptr. We will scan the microcode one
	 * last time before jettisoning and save a patch, if found. Then we will
	 * update that pointer too, with a stable patch address to use when
	 * resuming the cores.
	 */
	intel_ucode_patch = NULL;

619 620
	if (!load_builtin_intel_microcode(&cp))
		cp = find_microcode_in_initrd(ucode_path, false);
621

622 623
	if (!(cp.data && cp.size))
		return 0;
624

625
	collect_cpu_info_early(&uci);
626

627
	scan_microcode(cp.data, cp.size, &uci, true);
628

629
	show_saved_mc();
630

631 632
	return 0;
}
633

634 635 636 637 638 639 640 641
/*
 * @res_patch, output: a pointer to the patch we found.
 */
static struct microcode_intel *__load_ucode_intel(struct ucode_cpu_info *uci)
{
	static const char *path;
	struct cpio_data cp;
	bool use_pa;
642

643 644 645 646 647 648
	if (IS_ENABLED(CONFIG_X86_32)) {
		path	  = (const char *)__pa_nodebug(ucode_path);
		use_pa	  = true;
	} else {
		path	  = ucode_path;
		use_pa	  = false;
649 650
	}

651 652 653
	/* try built-in microcode first */
	if (!load_builtin_intel_microcode(&cp))
		cp = find_microcode_in_initrd(path, use_pa);
654

655 656
	if (!(cp.data && cp.size))
		return NULL;
657

658
	collect_cpu_info_early(uci);
659

660
	return scan_microcode(cp.data, cp.size, uci, false);
661 662
}

663
void __init load_ucode_intel_bsp(void)
664
{
665
	struct microcode_intel *patch;
666 667
	struct ucode_cpu_info uci;

668 669
	patch = __load_ucode_intel(&uci);
	if (!patch)
670 671
		return;

672
	uci.mc = patch;
673 674 675 676

	apply_microcode_early(&uci, true);
}

677
void load_ucode_intel_ap(void)
678
{
679 680
	struct microcode_intel *patch, **iup;
	struct ucode_cpu_info uci;
681

682 683 684 685 686 687 688 689 690 691
	if (IS_ENABLED(CONFIG_X86_32))
		iup = (struct microcode_intel **) __pa_nodebug(&intel_ucode_patch);
	else
		iup = &intel_ucode_patch;

reget:
	if (!*iup) {
		patch = __load_ucode_intel(&uci);
		if (!patch)
			return;
692

693 694 695 696 697 698 699 700 701 702 703
		*iup = patch;
	}

	uci.mc = *iup;

	if (apply_microcode_early(&uci, true)) {
		/* Mixed-silicon system? Try to refetch the proper patch: */
		*iup = NULL;

		goto reget;
	}
704 705
}

706
static struct microcode_intel *find_patch(struct ucode_cpu_info *uci)
707
{
708 709
	struct microcode_header_intel *phdr;
	struct ucode_patch *iter, *tmp;
710

711
	list_for_each_entry_safe(iter, tmp, &microcode_cache, plist) {
712

713
		phdr = (struct microcode_header_intel *)iter->data;
714

715 716
		if (phdr->rev <= uci->cpu_sig.rev)
			continue;
717

718 719 720 721
		if (!find_matching_signature(phdr,
					     uci->cpu_sig.sig,
					     uci->cpu_sig.pf))
			continue;
722

723 724 725
		return iter->data;
	}
	return NULL;
726 727 728 729
}

void reload_ucode_intel(void)
{
730
	struct microcode_intel *p;
731 732 733 734
	struct ucode_cpu_info uci;

	collect_cpu_info_early(&uci);

735 736
	p = find_patch(&uci);
	if (!p)
737 738
		return;

739 740
	uci.mc = p;

741 742 743
	apply_microcode_early(&uci, false);
}

744
static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)
L
Linus Torvalds 已提交
745
{
746
	static struct cpu_signature prev;
747
	struct cpuinfo_x86 *c = &cpu_data(cpu_num);
L
Linus Torvalds 已提交
748 749
	unsigned int val[2];

750
	memset(csig, 0, sizeof(*csig));
L
Linus Torvalds 已提交
751

752
	csig->sig = cpuid_eax(0x00000001);
753 754 755 756

	if ((c->x86_model >= 5) || (c->x86 > 6)) {
		/* get processor flags from MSR 0x17 */
		rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
757
		csig->pf = 1 << ((val[1] >> 18) & 7);
L
Linus Torvalds 已提交
758 759
	}

760
	csig->rev = c->microcode;
761 762 763 764 765 766 767

	/* No extra locking on prev, races are harmless. */
	if (csig->sig != prev.sig || csig->pf != prev.pf || csig->rev != prev.rev) {
		pr_info("sig=0x%x, pf=0x%x, revision=0x%x\n",
			csig->sig, csig->pf, csig->rev);
		prev = *csig;
	}
768 769

	return 0;
L
Linus Torvalds 已提交
770 771
}

772
static int apply_microcode_intel(int cpu)
L
Linus Torvalds 已提交
773
{
774
	struct microcode_intel *mc;
I
Ingo Molnar 已提交
775
	struct ucode_cpu_info *uci;
776
	struct cpuinfo_x86 *c;
777
	static int prev_rev;
778
	u32 rev;
I
Ingo Molnar 已提交
779

780
	/* We should bind the task to the CPU */
781
	if (WARN_ON(raw_smp_processor_id() != cpu))
782
		return -1;
783

784 785
	uci = ucode_cpu_info + cpu;
	mc = uci->mc;
786 787 788 789 790 791
	if (!mc) {
		/* Look for a newer patch in our cache: */
		mc = find_patch(uci);
		if (!mc)
			return 0;
	}
792

L
Linus Torvalds 已提交
793
	/* write microcode via MSR 0x79 */
794
	wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
795

796
	rev = intel_get_microcode_revision();
L
Linus Torvalds 已提交
797

798
	if (rev != mc->hdr.rev) {
799
		pr_err("CPU%d update to revision 0x%x failed\n",
800
		       cpu, mc->hdr.rev);
801
		return -1;
802
	}
803

804
	if (rev != prev_rev) {
805
		pr_info("updated to revision 0x%x, date = %04x-%02x-%02x\n",
806
			rev,
807 808 809
			mc->hdr.date & 0xffff,
			mc->hdr.date >> 24,
			(mc->hdr.date >> 16) & 0xff);
810
		prev_rev = rev;
811
	}
I
Ingo Molnar 已提交
812

813 814
	c = &cpu_data(cpu);

815 816
	uci->cpu_sig.rev = rev;
	c->microcode = rev;
817 818

	return 0;
L
Linus Torvalds 已提交
819 820
}

821 822
static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
				int (*get_ucode_data)(void *, const void *, size_t))
823
{
D
Dmitry Adamushko 已提交
824
	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
825
	u8 *ucode_ptr = data, *new_mc = NULL, *mc = NULL;
D
Dmitry Adamushko 已提交
826 827
	int new_rev = uci->cpu_sig.rev;
	unsigned int leftover = size;
828
	unsigned int curr_mc_size = 0, new_mc_size = 0;
829
	unsigned int csig, cpf;
830

D
Dmitry Adamushko 已提交
831 832 833
	while (leftover) {
		struct microcode_header_intel mc_header;
		unsigned int mc_size;
834

835 836 837 838 839
		if (leftover < sizeof(mc_header)) {
			pr_err("error! Truncated header in microcode data file\n");
			break;
		}

D
Dmitry Adamushko 已提交
840 841
		if (get_ucode_data(&mc_header, ucode_ptr, sizeof(mc_header)))
			break;
842

D
Dmitry Adamushko 已提交
843 844
		mc_size = get_totalsize(&mc_header);
		if (!mc_size || mc_size > leftover) {
845
			pr_err("error! Bad data in microcode data file\n");
D
Dmitry Adamushko 已提交
846 847
			break;
		}
848

849 850
		/* For performance reasons, reuse mc area when possible */
		if (!mc || mc_size > curr_mc_size) {
851
			vfree(mc);
852 853 854 855 856
			mc = vmalloc(mc_size);
			if (!mc)
				break;
			curr_mc_size = mc_size;
		}
D
Dmitry Adamushko 已提交
857 858

		if (get_ucode_data(mc, ucode_ptr, mc_size) ||
859
		    microcode_sanity_check(mc, 1) < 0) {
D
Dmitry Adamushko 已提交
860 861 862
			break;
		}

863 864
		csig = uci->cpu_sig.sig;
		cpf = uci->cpu_sig.pf;
865
		if (has_newer_microcode(mc, csig, cpf, new_rev)) {
866
			vfree(new_mc);
D
Dmitry Adamushko 已提交
867 868
			new_rev = mc_header.rev;
			new_mc  = mc;
869
			new_mc_size = mc_size;
870 871
			mc = NULL;	/* trigger new vmalloc */
		}
D
Dmitry Adamushko 已提交
872 873 874

		ucode_ptr += mc_size;
		leftover  -= mc_size;
875 876
	}

877
	vfree(mc);
878

879
	if (leftover) {
880
		vfree(new_mc);
881
		return UCODE_ERROR;
882
	}
I
Ingo Molnar 已提交
883

884 885
	if (!new_mc)
		return UCODE_NFOUND;
D
Dmitry Adamushko 已提交
886

887
	vfree(uci->mc);
I
Ingo Molnar 已提交
888 889
	uci->mc = (struct microcode_intel *)new_mc;

890 891 892 893 894
	/*
	 * If early loading microcode is supported, save this mc into
	 * permanent memory. So it will be loaded early when a CPU is hot added
	 * or resumes.
	 */
895
	save_mc_for_early(new_mc, new_mc_size);
896

897 898
	pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
		 cpu, new_rev, uci->cpu_sig.rev);
899 900

	return UCODE_OK;
901 902
}

D
Dmitry Adamushko 已提交
903 904 905 906 907
static int get_ucode_fw(void *to, const void *from, size_t n)
{
	memcpy(to, from, n);
	return 0;
}
908

909 910 911 912
static bool is_blacklisted(unsigned int cpu)
{
	struct cpuinfo_x86 *c = &cpu_data(cpu);

913 914 915 916 917 918 919 920 921 922 923
	/*
	 * Late loading on model 79 with microcode revision less than 0x0b000021
	 * may result in a system hang. This behavior is documented in item
	 * BDF90, #334165 (Intel Xeon Processor E7-8800/4800 v4 Product Family).
	 */
	if (c->x86 == 6 &&
	    c->x86_model == INTEL_FAM6_BROADWELL_X &&
	    c->x86_mask == 0x01 &&
	    c->microcode < 0x0b000021) {
		pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode);
		pr_err_once("Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
924 925 926 927 928 929
		return true;
	}

	return false;
}

930 931
static enum ucode_state request_microcode_fw(int cpu, struct device *device,
					     bool refresh_fw)
932 933
{
	char name[30];
934
	struct cpuinfo_x86 *c = &cpu_data(cpu);
935
	const struct firmware *firmware;
936
	enum ucode_state ret;
937

938 939 940
	if (is_blacklisted(cpu))
		return UCODE_NFOUND;

P
Peter Oruba 已提交
941
	sprintf(name, "intel-ucode/%02x-%02x-%02x",
942
		c->x86, c->x86_model, c->x86_mask);
943

944
	if (request_firmware_direct(&firmware, name, device)) {
945
		pr_debug("data file %s load failed\n", name);
946
		return UCODE_NFOUND;
947
	}
D
Dmitry Adamushko 已提交
948

949 950
	ret = generic_load_microcode(cpu, (void *)firmware->data,
				     firmware->size, &get_ucode_fw);
D
Dmitry Adamushko 已提交
951

952 953
	release_firmware(firmware);

D
Dmitry Adamushko 已提交
954 955 956 957 958 959 960 961
	return ret;
}

static int get_ucode_user(void *to, const void *from, size_t n)
{
	return copy_from_user(to, from, n);
}

962 963
static enum ucode_state
request_microcode_user(int cpu, const void __user *buf, size_t size)
D
Dmitry Adamushko 已提交
964
{
965 966 967
	if (is_blacklisted(cpu))
		return UCODE_NFOUND;

968
	return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
969 970
}

H
Hannes Eder 已提交
971
static struct microcode_ops microcode_intel_ops = {
D
Dmitry Adamushko 已提交
972 973
	.request_microcode_user		  = request_microcode_user,
	.request_microcode_fw             = request_microcode_fw,
P
Peter Oruba 已提交
974
	.collect_cpu_info                 = collect_cpu_info,
975
	.apply_microcode                  = apply_microcode_intel,
P
Peter Oruba 已提交
976 977
};

978
struct microcode_ops * __init init_intel_microcode(void)
P
Peter Oruba 已提交
979
{
980
	struct cpuinfo_x86 *c = &boot_cpu_data;
981 982 983 984 985 986 987

	if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 ||
	    cpu_has(c, X86_FEATURE_IA64)) {
		pr_err("Intel CPU family 0x%x not supported\n", c->x86);
		return NULL;
	}

988
	return &microcode_intel_ops;
P
Peter Oruba 已提交
989
}