intel.c 22.3 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2
 * Intel CPU Microcode Update Driver for Linux
L
Linus Torvalds 已提交
3
 *
A
Andrew Morton 已提交
4
 * Copyright (C) 2000-2006 Tigran Aivazian <aivazian.tigran@gmail.com>
5
 *		 2006 Shaohua Li <shaohua.li@intel.com>
L
Linus Torvalds 已提交
6
 *
7 8 9 10 11
 * Intel CPU microcode early update for Linux
 *
 * Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com>
 *		      H Peter Anvin" <hpa@zytor.com>
 *
12 13 14 15
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
L
Linus Torvalds 已提交
16
 */
17

18 19 20 21 22 23
/*
 * This needs to be before all headers so that pr_debug in printk.h doesn't turn
 * printk calls into no_printk().
 *
 *#define DEBUG
 */
24
#define pr_fmt(fmt) "microcode: " fmt
25

26
#include <linux/earlycpio.h>
I
Ingo Molnar 已提交
27 28
#include <linux/firmware.h>
#include <linux/uaccess.h>
29 30
#include <linux/vmalloc.h>
#include <linux/initrd.h>
I
Ingo Molnar 已提交
31
#include <linux/kernel.h>
32 33 34
#include <linux/slab.h>
#include <linux/cpu.h>
#include <linux/mm.h>
L
Linus Torvalds 已提交
35

36
#include <asm/microcode_intel.h>
37
#include <asm/intel-family.h>
I
Ingo Molnar 已提交
38
#include <asm/processor.h>
39 40
#include <asm/tlbflush.h>
#include <asm/setup.h>
I
Ingo Molnar 已提交
41
#include <asm/msr.h>
L
Linus Torvalds 已提交
42

43
static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin";
44

45
/* Current microcode patch used in early patching on the APs. */
46
static struct microcode_intel *intel_ucode_patch;
47

48 49 50
/* last level cache size per core */
static int llc_size_per_core;

51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105
static inline bool cpu_signatures_match(unsigned int s1, unsigned int p1,
					unsigned int s2, unsigned int p2)
{
	if (s1 != s2)
		return false;

	/* Processor flags are either both 0 ... */
	if (!p1 && !p2)
		return true;

	/* ... or they intersect. */
	return p1 & p2;
}

/*
 * Returns 1 if update has been found, 0 otherwise.
 */
static int find_matching_signature(void *mc, unsigned int csig, int cpf)
{
	struct microcode_header_intel *mc_hdr = mc;
	struct extended_sigtable *ext_hdr;
	struct extended_signature *ext_sig;
	int i;

	if (cpu_signatures_match(csig, cpf, mc_hdr->sig, mc_hdr->pf))
		return 1;

	/* Look for ext. headers: */
	if (get_totalsize(mc_hdr) <= get_datasize(mc_hdr) + MC_HEADER_SIZE)
		return 0;

	ext_hdr = mc + get_datasize(mc_hdr) + MC_HEADER_SIZE;
	ext_sig = (void *)ext_hdr + EXT_HEADER_SIZE;

	for (i = 0; i < ext_hdr->count; i++) {
		if (cpu_signatures_match(csig, cpf, ext_sig->sig, ext_sig->pf))
			return 1;
		ext_sig++;
	}
	return 0;
}

/*
 * Returns 1 if update has been found, 0 otherwise.
 */
static int has_newer_microcode(void *mc, unsigned int csig, int cpf, int new_rev)
{
	struct microcode_header_intel *mc_hdr = mc;

	if (mc_hdr->rev <= new_rev)
		return 0;

	return find_matching_signature(mc, csig, cpf);
}

106 107 108
/*
 * Given CPU signature and a microcode patch, this function finds if the
 * microcode patch has matching family and model with the CPU.
109 110 111
 *
 * %true - if there's a match
 * %false - otherwise
112
 */
113 114
static bool microcode_matches(struct microcode_header_intel *mc_header,
			      unsigned long sig)
115 116 117
{
	unsigned long total_size = get_totalsize(mc_header);
	unsigned long data_size = get_datasize(mc_header);
118 119
	struct extended_sigtable *ext_header;
	unsigned int fam_ucode, model_ucode;
120
	struct extended_signature *ext_sig;
121 122
	unsigned int fam, model;
	int ext_sigcount, i;
123

124
	fam   = x86_family(sig);
125 126
	model = x86_model(sig);

127
	fam_ucode   = x86_family(mc_header->sig);
128 129 130
	model_ucode = x86_model(mc_header->sig);

	if (fam == fam_ucode && model == model_ucode)
131
		return true;
132 133 134

	/* Look for ext. headers: */
	if (total_size <= data_size + MC_HEADER_SIZE)
135
		return false;
136 137 138 139 140 141

	ext_header   = (void *) mc_header + data_size + MC_HEADER_SIZE;
	ext_sig      = (void *)ext_header + EXT_HEADER_SIZE;
	ext_sigcount = ext_header->count;

	for (i = 0; i < ext_sigcount; i++) {
142
		fam_ucode   = x86_family(ext_sig->sig);
143 144 145
		model_ucode = x86_model(ext_sig->sig);

		if (fam == fam_ucode && model == model_ucode)
146
			return true;
147 148 149

		ext_sig++;
	}
150
	return false;
151 152
}

153
static struct ucode_patch *memdup_patch(void *data, unsigned int size)
154
{
155
	struct ucode_patch *p;
156

157
	p = kzalloc(sizeof(struct ucode_patch), GFP_KERNEL);
158
	if (!p)
159
		return NULL;
160

161 162 163
	p->data = kmemdup(data, size, GFP_KERNEL);
	if (!p->data) {
		kfree(p);
164
		return NULL;
165 166
	}

167
	return p;
168 169
}

170
static void save_microcode_patch(void *data, unsigned int size)
171 172
{
	struct microcode_header_intel *mc_hdr, *mc_saved_hdr;
173
	struct ucode_patch *iter, *tmp, *p = NULL;
174
	bool prev_found = false;
175 176
	unsigned int sig, pf;

177
	mc_hdr = (struct microcode_header_intel *)data;
178

179 180
	list_for_each_entry_safe(iter, tmp, &microcode_cache, plist) {
		mc_saved_hdr = (struct microcode_header_intel *)iter->data;
181 182 183
		sig	     = mc_saved_hdr->sig;
		pf	     = mc_saved_hdr->pf;

184 185
		if (find_matching_signature(data, sig, pf)) {
			prev_found = true;
186

187 188
			if (mc_hdr->rev <= mc_saved_hdr->rev)
				continue;
189

190 191
			p = memdup_patch(data, size);
			if (!p)
192 193 194 195
				pr_err("Error allocating buffer %p\n", data);
			else
				list_replace(&iter->plist, &p->plist);
		}
196 197
	}

198 199 200 201 202
	/*
	 * There weren't any previous patches found in the list cache; save the
	 * newly found.
	 */
	if (!prev_found) {
203 204
		p = memdup_patch(data, size);
		if (!p)
205 206 207 208
			pr_err("Error allocating buffer for %p\n", data);
		else
			list_add_tail(&p->plist, &microcode_cache);
	}
209

210 211 212
	if (!p)
		return;

213 214 215 216 217
	/*
	 * Save for early loading. On 32-bit, that needs to be a physical
	 * address as the APs are running from physical addresses, before
	 * paging has been enabled.
	 */
218 219 220 221
	if (IS_ENABLED(CONFIG_X86_32))
		intel_ucode_patch = (struct microcode_intel *)__pa_nodebug(p->data);
	else
		intel_ucode_patch = p->data;
222 223
}

224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321
static int microcode_sanity_check(void *mc, int print_err)
{
	unsigned long total_size, data_size, ext_table_size;
	struct microcode_header_intel *mc_header = mc;
	struct extended_sigtable *ext_header = NULL;
	u32 sum, orig_sum, ext_sigcount = 0, i;
	struct extended_signature *ext_sig;

	total_size = get_totalsize(mc_header);
	data_size = get_datasize(mc_header);

	if (data_size + MC_HEADER_SIZE > total_size) {
		if (print_err)
			pr_err("Error: bad microcode data file size.\n");
		return -EINVAL;
	}

	if (mc_header->ldrver != 1 || mc_header->hdrver != 1) {
		if (print_err)
			pr_err("Error: invalid/unknown microcode update format.\n");
		return -EINVAL;
	}

	ext_table_size = total_size - (MC_HEADER_SIZE + data_size);
	if (ext_table_size) {
		u32 ext_table_sum = 0;
		u32 *ext_tablep;

		if ((ext_table_size < EXT_HEADER_SIZE)
		 || ((ext_table_size - EXT_HEADER_SIZE) % EXT_SIGNATURE_SIZE)) {
			if (print_err)
				pr_err("Error: truncated extended signature table.\n");
			return -EINVAL;
		}

		ext_header = mc + MC_HEADER_SIZE + data_size;
		if (ext_table_size != exttable_size(ext_header)) {
			if (print_err)
				pr_err("Error: extended signature table size mismatch.\n");
			return -EFAULT;
		}

		ext_sigcount = ext_header->count;

		/*
		 * Check extended table checksum: the sum of all dwords that
		 * comprise a valid table must be 0.
		 */
		ext_tablep = (u32 *)ext_header;

		i = ext_table_size / sizeof(u32);
		while (i--)
			ext_table_sum += ext_tablep[i];

		if (ext_table_sum) {
			if (print_err)
				pr_warn("Bad extended signature table checksum, aborting.\n");
			return -EINVAL;
		}
	}

	/*
	 * Calculate the checksum of update data and header. The checksum of
	 * valid update data and header including the extended signature table
	 * must be 0.
	 */
	orig_sum = 0;
	i = (MC_HEADER_SIZE + data_size) / sizeof(u32);
	while (i--)
		orig_sum += ((u32 *)mc)[i];

	if (orig_sum) {
		if (print_err)
			pr_err("Bad microcode data checksum, aborting.\n");
		return -EINVAL;
	}

	if (!ext_table_size)
		return 0;

	/*
	 * Check extended signature checksum: 0 => valid.
	 */
	for (i = 0; i < ext_sigcount; i++) {
		ext_sig = (void *)ext_header + EXT_HEADER_SIZE +
			  EXT_SIGNATURE_SIZE * i;

		sum = (mc_header->sig + mc_header->pf + mc_header->cksum) -
		      (ext_sig->sig + ext_sig->pf + ext_sig->cksum);
		if (sum) {
			if (print_err)
				pr_err("Bad extended signature checksum, aborting.\n");
			return -EINVAL;
		}
	}
	return 0;
}

322 323 324 325
/*
 * Get microcode matching with BSP's model. Only CPUs with the same model as
 * BSP can stay in the platform.
 */
326 327
static struct microcode_intel *
scan_microcode(void *data, size_t size, struct ucode_cpu_info *uci, bool save)
328
{
329
	struct microcode_header_intel *mc_header;
330
	struct microcode_intel *patch = NULL;
331
	unsigned int mc_size;
332

333 334
	while (size) {
		if (size < sizeof(struct microcode_header_intel))
335 336
			break;

337
		mc_header = (struct microcode_header_intel *)data;
338 339

		mc_size = get_totalsize(mc_header);
340 341 342
		if (!mc_size ||
		    mc_size > size ||
		    microcode_sanity_check(data, 0) < 0)
343 344
			break;

345
		size -= mc_size;
346

347 348
		if (!microcode_matches(mc_header, uci->cpu_sig.sig)) {
			data += mc_size;
349 350 351
			continue;
		}

352 353 354 355
		if (save) {
			save_microcode_patch(data, mc_size);
			goto next;
		}
356 357


358 359 360 361 362 363
		if (!patch) {
			if (!has_newer_microcode(data,
						 uci->cpu_sig.sig,
						 uci->cpu_sig.pf,
						 uci->cpu_sig.rev))
				goto next;
364

365 366 367 368 369 370 371 372 373
		} else {
			struct microcode_header_intel *phdr = &patch->hdr;

			if (!has_newer_microcode(data,
						 phdr->sig,
						 phdr->pf,
						 phdr->rev))
				goto next;
		}
374

375 376
		/* We have a newer patch, save it. */
		patch = data;
377

378 379 380
next:
		data += mc_size;
	}
381

382 383 384 385
	if (size)
		return NULL;

	return patch;
386 387 388 389 390 391
}

static int collect_cpu_info_early(struct ucode_cpu_info *uci)
{
	unsigned int val[2];
	unsigned int family, model;
392
	struct cpu_signature csig = { 0 };
393 394 395 396 397 398 399 400 401
	unsigned int eax, ebx, ecx, edx;

	memset(uci, 0, sizeof(*uci));

	eax = 0x00000001;
	ecx = 0;
	native_cpuid(&eax, &ebx, &ecx, &edx);
	csig.sig = eax;

402 403
	family = x86_family(eax);
	model  = x86_model(eax);
404 405 406 407 408 409 410

	if ((model >= 5) || (family > 6)) {
		/* get processor flags from MSR 0x17 */
		native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
		csig.pf = 1 << ((val[1] >> 18) & 7);
	}

411
	csig.rev = intel_get_microcode_revision();
412 413 414 415 416 417 418 419 420

	uci->cpu_sig = csig;
	uci->valid = 1;

	return 0;
}

static void show_saved_mc(void)
{
421
#ifdef DEBUG
422
	int i = 0, j;
423 424
	unsigned int sig, pf, rev, total_size, data_size, date;
	struct ucode_cpu_info uci;
425
	struct ucode_patch *p;
426

427
	if (list_empty(&microcode_cache)) {
428 429 430 431 432 433
		pr_debug("no microcode data saved.\n");
		return;
	}

	collect_cpu_info_early(&uci);

434 435 436
	sig	= uci.cpu_sig.sig;
	pf	= uci.cpu_sig.pf;
	rev	= uci.cpu_sig.rev;
437 438
	pr_debug("CPU: sig=0x%x, pf=0x%x, rev=0x%x\n", sig, pf, rev);

439
	list_for_each_entry(p, &microcode_cache, plist) {
440 441 442
		struct microcode_header_intel *mc_saved_header;
		struct extended_sigtable *ext_header;
		struct extended_signature *ext_sig;
443 444 445 446 447 448 449 450
		int ext_sigcount;

		mc_saved_header = (struct microcode_header_intel *)p->data;

		sig	= mc_saved_header->sig;
		pf	= mc_saved_header->pf;
		rev	= mc_saved_header->rev;
		date	= mc_saved_header->date;
451

452 453
		total_size	= get_totalsize(mc_saved_header);
		data_size	= get_datasize(mc_saved_header);
454

M
Masanari Iida 已提交
455
		pr_debug("mc_saved[%d]: sig=0x%x, pf=0x%x, rev=0x%x, total size=0x%x, date = %04x-%02x-%02x\n",
456
			 i++, sig, pf, rev, total_size,
457 458 459 460 461 462 463 464
			 date & 0xffff,
			 date >> 24,
			 (date >> 16) & 0xff);

		/* Look for ext. headers: */
		if (total_size <= data_size + MC_HEADER_SIZE)
			continue;

465
		ext_header = (void *)mc_saved_header + data_size + MC_HEADER_SIZE;
466 467 468 469 470 471 472 473 474 475 476 477 478 479
		ext_sigcount = ext_header->count;
		ext_sig = (void *)ext_header + EXT_HEADER_SIZE;

		for (j = 0; j < ext_sigcount; j++) {
			sig = ext_sig->sig;
			pf = ext_sig->pf;

			pr_debug("\tExtended[%d]: sig=0x%x, pf=0x%x\n",
				 j, sig, pf);

			ext_sig++;
		}
	}
#endif
480
}
481 482

/*
483 484
 * Save this microcode patch. It will be loaded early when a CPU is
 * hot-added or resumes.
485
 */
486
static void save_mc_for_early(u8 *mc, unsigned int size)
487
{
488
#ifdef CONFIG_HOTPLUG_CPU
489
	/* Synchronization during CPU hotplug. */
490 491
	static DEFINE_MUTEX(x86_cpu_microcode_mutex);

492 493
	mutex_lock(&x86_cpu_microcode_mutex);

494
	save_microcode_patch(mc, size);
495 496 497 498
	show_saved_mc();

	mutex_unlock(&x86_cpu_microcode_mutex);
#endif
499
}
500

501
static bool load_builtin_intel_microcode(struct cpio_data *cp)
502
{
503
	unsigned int eax = 1, ebx, ecx = 0, edx;
504 505
	char name[30];

506 507 508
	if (IS_ENABLED(CONFIG_X86_32))
		return false;

509 510
	native_cpuid(&eax, &ebx, &ecx, &edx);

511 512
	sprintf(name, "intel-ucode/%02x-%02x-%02x",
		      x86_family(eax), x86_model(eax), x86_stepping(eax));
513 514 515 516 517 518 519 520 521 522

	return get_builtin_firmware(cp, name);
}

/*
 * Print ucode update info.
 */
static void
print_ucode_info(struct ucode_cpu_info *uci, unsigned int date)
{
523 524 525 526 527
	pr_info_once("microcode updated early to revision 0x%x, date = %04x-%02x-%02x\n",
		     uci->cpu_sig.rev,
		     date & 0xffff,
		     date >> 24,
		     (date >> 16) & 0xff);
528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549
}

#ifdef CONFIG_X86_32

static int delay_ucode_info;
static int current_mc_date;

/*
 * Print early updated ucode info after printk works. This is delayed info dump.
 */
void show_ucode_info_early(void)
{
	struct ucode_cpu_info uci;

	if (delay_ucode_info) {
		collect_cpu_info_early(&uci);
		print_ucode_info(&uci, current_mc_date);
		delay_ucode_info = 0;
	}
}

/*
550
 * At this point, we can not call printk() yet. Delay printing microcode info in
551 552 553 554
 * show_ucode_info_early() until printk() works.
 */
static void print_ucode(struct ucode_cpu_info *uci)
{
555
	struct microcode_intel *mc;
556 557 558
	int *delay_ucode_info_p;
	int *current_mc_date_p;

559 560
	mc = uci->mc;
	if (!mc)
561 562 563 564 565 566
		return;

	delay_ucode_info_p = (int *)__pa_nodebug(&delay_ucode_info);
	current_mc_date_p = (int *)__pa_nodebug(&current_mc_date);

	*delay_ucode_info_p = 1;
567
	*current_mc_date_p = mc->hdr.date;
568 569 570 571 572
}
#else

static inline void print_ucode(struct ucode_cpu_info *uci)
{
573
	struct microcode_intel *mc;
574

575 576
	mc = uci->mc;
	if (!mc)
577 578
		return;

579
	print_ucode_info(uci, mc->hdr.date);
580 581 582 583 584
}
#endif

static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
{
585
	struct microcode_intel *mc;
586
	u32 rev;
587

588 589
	mc = uci->mc;
	if (!mc)
590 591 592
		return 0;

	/* write microcode via MSR 0x79 */
593
	native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
594

595 596
	rev = intel_get_microcode_revision();
	if (rev != mc->hdr.rev)
597 598
		return -1;

599
	uci->cpu_sig.rev = rev;
600 601 602 603

	if (early)
		print_ucode(uci);
	else
604
		print_ucode_info(uci, mc->hdr.date);
605 606 607 608 609 610

	return 0;
}

int __init save_microcode_in_initrd_intel(void)
{
611 612
	struct ucode_cpu_info uci;
	struct cpio_data cp;
613

614 615 616 617 618 619 620 621
	/*
	 * initrd is going away, clear patch ptr. We will scan the microcode one
	 * last time before jettisoning and save a patch, if found. Then we will
	 * update that pointer too, with a stable patch address to use when
	 * resuming the cores.
	 */
	intel_ucode_patch = NULL;

622 623
	if (!load_builtin_intel_microcode(&cp))
		cp = find_microcode_in_initrd(ucode_path, false);
624

625 626
	if (!(cp.data && cp.size))
		return 0;
627

628
	collect_cpu_info_early(&uci);
629

630
	scan_microcode(cp.data, cp.size, &uci, true);
631

632
	show_saved_mc();
633

634 635
	return 0;
}
636

637 638 639 640 641 642 643 644
/*
 * @res_patch, output: a pointer to the patch we found.
 */
static struct microcode_intel *__load_ucode_intel(struct ucode_cpu_info *uci)
{
	static const char *path;
	struct cpio_data cp;
	bool use_pa;
645

646 647 648 649 650 651
	if (IS_ENABLED(CONFIG_X86_32)) {
		path	  = (const char *)__pa_nodebug(ucode_path);
		use_pa	  = true;
	} else {
		path	  = ucode_path;
		use_pa	  = false;
652 653
	}

654 655 656
	/* try built-in microcode first */
	if (!load_builtin_intel_microcode(&cp))
		cp = find_microcode_in_initrd(path, use_pa);
657

658 659
	if (!(cp.data && cp.size))
		return NULL;
660

661
	collect_cpu_info_early(uci);
662

663
	return scan_microcode(cp.data, cp.size, uci, false);
664 665
}

666
void __init load_ucode_intel_bsp(void)
667
{
668
	struct microcode_intel *patch;
669 670
	struct ucode_cpu_info uci;

671 672
	patch = __load_ucode_intel(&uci);
	if (!patch)
673 674
		return;

675
	uci.mc = patch;
676 677 678 679

	apply_microcode_early(&uci, true);
}

680
void load_ucode_intel_ap(void)
681
{
682 683
	struct microcode_intel *patch, **iup;
	struct ucode_cpu_info uci;
684

685 686 687 688 689 690 691 692 693 694
	if (IS_ENABLED(CONFIG_X86_32))
		iup = (struct microcode_intel **) __pa_nodebug(&intel_ucode_patch);
	else
		iup = &intel_ucode_patch;

reget:
	if (!*iup) {
		patch = __load_ucode_intel(&uci);
		if (!patch)
			return;
695

696 697 698 699 700 701 702 703 704 705 706
		*iup = patch;
	}

	uci.mc = *iup;

	if (apply_microcode_early(&uci, true)) {
		/* Mixed-silicon system? Try to refetch the proper patch: */
		*iup = NULL;

		goto reget;
	}
707 708
}

709
static struct microcode_intel *find_patch(struct ucode_cpu_info *uci)
710
{
711 712
	struct microcode_header_intel *phdr;
	struct ucode_patch *iter, *tmp;
713

714
	list_for_each_entry_safe(iter, tmp, &microcode_cache, plist) {
715

716
		phdr = (struct microcode_header_intel *)iter->data;
717

718 719
		if (phdr->rev <= uci->cpu_sig.rev)
			continue;
720

721 722 723 724
		if (!find_matching_signature(phdr,
					     uci->cpu_sig.sig,
					     uci->cpu_sig.pf))
			continue;
725

726 727 728
		return iter->data;
	}
	return NULL;
729 730 731 732
}

void reload_ucode_intel(void)
{
733
	struct microcode_intel *p;
734 735 736 737
	struct ucode_cpu_info uci;

	collect_cpu_info_early(&uci);

738 739
	p = find_patch(&uci);
	if (!p)
740 741
		return;

742 743
	uci.mc = p;

744 745 746
	apply_microcode_early(&uci, false);
}

747
static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)
L
Linus Torvalds 已提交
748
{
749
	static struct cpu_signature prev;
750
	struct cpuinfo_x86 *c = &cpu_data(cpu_num);
L
Linus Torvalds 已提交
751 752
	unsigned int val[2];

753
	memset(csig, 0, sizeof(*csig));
L
Linus Torvalds 已提交
754

755
	csig->sig = cpuid_eax(0x00000001);
756 757 758 759

	if ((c->x86_model >= 5) || (c->x86 > 6)) {
		/* get processor flags from MSR 0x17 */
		rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
760
		csig->pf = 1 << ((val[1] >> 18) & 7);
L
Linus Torvalds 已提交
761 762
	}

763
	csig->rev = c->microcode;
764 765 766 767 768 769 770

	/* No extra locking on prev, races are harmless. */
	if (csig->sig != prev.sig || csig->pf != prev.pf || csig->rev != prev.rev) {
		pr_info("sig=0x%x, pf=0x%x, revision=0x%x\n",
			csig->sig, csig->pf, csig->rev);
		prev = *csig;
	}
771 772

	return 0;
L
Linus Torvalds 已提交
773 774
}

775
static int apply_microcode_intel(int cpu)
L
Linus Torvalds 已提交
776
{
777
	struct microcode_intel *mc;
I
Ingo Molnar 已提交
778
	struct ucode_cpu_info *uci;
779
	struct cpuinfo_x86 *c;
780
	static int prev_rev;
781
	u32 rev;
I
Ingo Molnar 已提交
782

783
	/* We should bind the task to the CPU */
784
	if (WARN_ON(raw_smp_processor_id() != cpu))
785
		return -1;
786

787 788
	uci = ucode_cpu_info + cpu;
	mc = uci->mc;
789 790 791 792 793 794
	if (!mc) {
		/* Look for a newer patch in our cache: */
		mc = find_patch(uci);
		if (!mc)
			return 0;
	}
795

L
Linus Torvalds 已提交
796
	/* write microcode via MSR 0x79 */
797
	wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
798

799
	rev = intel_get_microcode_revision();
L
Linus Torvalds 已提交
800

801
	if (rev != mc->hdr.rev) {
802
		pr_err("CPU%d update to revision 0x%x failed\n",
803
		       cpu, mc->hdr.rev);
804
		return -1;
805
	}
806

807
	if (rev != prev_rev) {
808
		pr_info("updated to revision 0x%x, date = %04x-%02x-%02x\n",
809
			rev,
810 811 812
			mc->hdr.date & 0xffff,
			mc->hdr.date >> 24,
			(mc->hdr.date >> 16) & 0xff);
813
		prev_rev = rev;
814
	}
I
Ingo Molnar 已提交
815

816 817
	c = &cpu_data(cpu);

818 819
	uci->cpu_sig.rev = rev;
	c->microcode = rev;
820 821

	return 0;
L
Linus Torvalds 已提交
822 823
}

824 825
static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
				int (*get_ucode_data)(void *, const void *, size_t))
826
{
D
Dmitry Adamushko 已提交
827
	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
828
	u8 *ucode_ptr = data, *new_mc = NULL, *mc = NULL;
D
Dmitry Adamushko 已提交
829 830
	int new_rev = uci->cpu_sig.rev;
	unsigned int leftover = size;
831
	unsigned int curr_mc_size = 0, new_mc_size = 0;
832
	unsigned int csig, cpf;
833

D
Dmitry Adamushko 已提交
834 835 836
	while (leftover) {
		struct microcode_header_intel mc_header;
		unsigned int mc_size;
837

838 839 840 841 842
		if (leftover < sizeof(mc_header)) {
			pr_err("error! Truncated header in microcode data file\n");
			break;
		}

D
Dmitry Adamushko 已提交
843 844
		if (get_ucode_data(&mc_header, ucode_ptr, sizeof(mc_header)))
			break;
845

D
Dmitry Adamushko 已提交
846 847
		mc_size = get_totalsize(&mc_header);
		if (!mc_size || mc_size > leftover) {
848
			pr_err("error! Bad data in microcode data file\n");
D
Dmitry Adamushko 已提交
849 850
			break;
		}
851

852 853
		/* For performance reasons, reuse mc area when possible */
		if (!mc || mc_size > curr_mc_size) {
854
			vfree(mc);
855 856 857 858 859
			mc = vmalloc(mc_size);
			if (!mc)
				break;
			curr_mc_size = mc_size;
		}
D
Dmitry Adamushko 已提交
860 861

		if (get_ucode_data(mc, ucode_ptr, mc_size) ||
862
		    microcode_sanity_check(mc, 1) < 0) {
D
Dmitry Adamushko 已提交
863 864 865
			break;
		}

866 867
		csig = uci->cpu_sig.sig;
		cpf = uci->cpu_sig.pf;
868
		if (has_newer_microcode(mc, csig, cpf, new_rev)) {
869
			vfree(new_mc);
D
Dmitry Adamushko 已提交
870 871
			new_rev = mc_header.rev;
			new_mc  = mc;
872
			new_mc_size = mc_size;
873 874
			mc = NULL;	/* trigger new vmalloc */
		}
D
Dmitry Adamushko 已提交
875 876 877

		ucode_ptr += mc_size;
		leftover  -= mc_size;
878 879
	}

880
	vfree(mc);
881

882
	if (leftover) {
883
		vfree(new_mc);
884
		return UCODE_ERROR;
885
	}
I
Ingo Molnar 已提交
886

887 888
	if (!new_mc)
		return UCODE_NFOUND;
D
Dmitry Adamushko 已提交
889

890
	vfree(uci->mc);
I
Ingo Molnar 已提交
891 892
	uci->mc = (struct microcode_intel *)new_mc;

893 894 895 896 897
	/*
	 * If early loading microcode is supported, save this mc into
	 * permanent memory. So it will be loaded early when a CPU is hot added
	 * or resumes.
	 */
898
	save_mc_for_early(new_mc, new_mc_size);
899

900 901
	pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
		 cpu, new_rev, uci->cpu_sig.rev);
902 903

	return UCODE_OK;
904 905
}

D
Dmitry Adamushko 已提交
906 907 908 909 910
static int get_ucode_fw(void *to, const void *from, size_t n)
{
	memcpy(to, from, n);
	return 0;
}
911

912 913 914 915
static bool is_blacklisted(unsigned int cpu)
{
	struct cpuinfo_x86 *c = &cpu_data(cpu);

916 917
	/*
	 * Late loading on model 79 with microcode revision less than 0x0b000021
918 919 920
	 * and LLC size per core bigger than 2.5MB may result in a system hang.
	 * This behavior is documented in item BDF90, #334165 (Intel Xeon
	 * Processor E7-8800/4800 v4 Product Family).
921 922 923
	 */
	if (c->x86 == 6 &&
	    c->x86_model == INTEL_FAM6_BROADWELL_X &&
924
	    c->x86_stepping == 0x01 &&
925
	    llc_size_per_core > 2621440 &&
926 927 928
	    c->microcode < 0x0b000021) {
		pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode);
		pr_err_once("Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
929 930 931 932 933 934
		return true;
	}

	return false;
}

935 936
static enum ucode_state request_microcode_fw(int cpu, struct device *device,
					     bool refresh_fw)
937 938
{
	char name[30];
939
	struct cpuinfo_x86 *c = &cpu_data(cpu);
940
	const struct firmware *firmware;
941
	enum ucode_state ret;
942

943 944 945
	if (is_blacklisted(cpu))
		return UCODE_NFOUND;

P
Peter Oruba 已提交
946
	sprintf(name, "intel-ucode/%02x-%02x-%02x",
947
		c->x86, c->x86_model, c->x86_stepping);
948

949
	if (request_firmware_direct(&firmware, name, device)) {
950
		pr_debug("data file %s load failed\n", name);
951
		return UCODE_NFOUND;
952
	}
D
Dmitry Adamushko 已提交
953

954 955
	ret = generic_load_microcode(cpu, (void *)firmware->data,
				     firmware->size, &get_ucode_fw);
D
Dmitry Adamushko 已提交
956

957 958
	release_firmware(firmware);

D
Dmitry Adamushko 已提交
959 960 961 962 963 964 965 966
	return ret;
}

static int get_ucode_user(void *to, const void *from, size_t n)
{
	return copy_from_user(to, from, n);
}

967 968
static enum ucode_state
request_microcode_user(int cpu, const void __user *buf, size_t size)
D
Dmitry Adamushko 已提交
969
{
970 971 972
	if (is_blacklisted(cpu))
		return UCODE_NFOUND;

973
	return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
974 975
}

H
Hannes Eder 已提交
976
static struct microcode_ops microcode_intel_ops = {
D
Dmitry Adamushko 已提交
977 978
	.request_microcode_user		  = request_microcode_user,
	.request_microcode_fw             = request_microcode_fw,
P
Peter Oruba 已提交
979
	.collect_cpu_info                 = collect_cpu_info,
980
	.apply_microcode                  = apply_microcode_intel,
P
Peter Oruba 已提交
981 982
};

983 984
static int __init calc_llc_size_per_core(struct cpuinfo_x86 *c)
{
985
	u64 llc_size = c->x86_cache_size * 1024ULL;
986 987 988 989 990 991

	do_div(llc_size, c->x86_max_cores);

	return (int)llc_size;
}

992
struct microcode_ops * __init init_intel_microcode(void)
P
Peter Oruba 已提交
993
{
994
	struct cpuinfo_x86 *c = &boot_cpu_data;
995 996 997 998 999 1000 1001

	if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 ||
	    cpu_has(c, X86_FEATURE_IA64)) {
		pr_err("Intel CPU family 0x%x not supported\n", c->x86);
		return NULL;
	}

1002 1003
	llc_size_per_core = calc_llc_size_per_core(c);

1004
	return &microcode_intel_ops;
P
Peter Oruba 已提交
1005
}