提交 3a5dc1fa 编写于 作者: L Linus Torvalds

Merge branch 'x86-microcode-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 microcode loading updates from Ingo Molnar:
 "The main changes in this cycle are:

   - Reload microcode when resuming and the case when only the early
     loader has been utilized.  (Borislav Petkov)

   - Also, do not load the driver on paravirt guests.  (Boris
     Ostrovsky)"

* 'x86-microcode-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/microcode/intel: Fish out the stashed microcode for the BSP
  x86, microcode: Reload microcode on resume
  x86, microcode: Don't initialize microcode code on paravirt
  x86, microcode, intel: Drop unused parameter
  x86, microcode, AMD: Do not use smp_processor_id() in preemtible context
......@@ -78,6 +78,7 @@ static inline void __exit exit_amd_microcode(void) {}
extern void __init load_ucode_bsp(void);
extern void load_ucode_ap(void);
extern int __init save_microcode_in_initrd(void);
void reload_early_microcode(void);
#else
static inline void __init load_ucode_bsp(void) {}
static inline void load_ucode_ap(void) {}
......@@ -85,6 +86,7 @@ static inline int __init save_microcode_in_initrd(void)
{
return 0;
}
static inline void reload_early_microcode(void) {}
#endif
#endif /* _ASM_X86_MICROCODE_H */
......@@ -59,7 +59,7 @@ static inline u16 find_equiv_id(struct equiv_cpu_entry *equiv_cpu_table,
extern int __apply_microcode_amd(struct microcode_amd *mc_amd);
extern int apply_microcode_amd(int cpu);
extern enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size);
extern enum ucode_state load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size);
#define PATCH_MAX_SIZE PAGE_SIZE
extern u8 amd_ucode_patch[PATCH_MAX_SIZE];
......@@ -68,10 +68,12 @@ extern u8 amd_ucode_patch[PATCH_MAX_SIZE];
extern void __init load_ucode_amd_bsp(void);
extern void load_ucode_amd_ap(void);
extern int __init save_microcode_in_initrd_amd(void);
void reload_ucode_amd(void);
#else
static inline void __init load_ucode_amd_bsp(void) {}
static inline void load_ucode_amd_ap(void) {}
static inline int __init save_microcode_in_initrd_amd(void) { return -EINVAL; }
void reload_ucode_amd(void) {}
#endif
#endif /* _ASM_X86_MICROCODE_AMD_H */
......@@ -68,11 +68,13 @@ extern void __init load_ucode_intel_bsp(void);
extern void load_ucode_intel_ap(void);
extern void show_ucode_info_early(void);
extern int __init save_microcode_in_initrd_intel(void);
void reload_ucode_intel(void);
#else
static inline __init void load_ucode_intel_bsp(void) {}
static inline void load_ucode_intel_ap(void) {}
static inline void show_ucode_info_early(void) {}
static inline int __init save_microcode_in_initrd_intel(void) { return -EINVAL; }
static inline void reload_ucode_intel(void) {}
#endif
#if defined(CONFIG_MICROCODE_INTEL_EARLY) && defined(CONFIG_HOTPLUG_CPU)
......
......@@ -376,7 +376,7 @@ static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
return UCODE_OK;
}
enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size)
enum ucode_state load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size)
{
enum ucode_state ret;
......@@ -390,8 +390,8 @@ enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size)
#if defined(CONFIG_MICROCODE_AMD_EARLY) && defined(CONFIG_X86_32)
/* save BSP's matching patch for early load */
if (cpu_data(smp_processor_id()).cpu_index == boot_cpu_data.cpu_index) {
struct ucode_patch *p = find_patch(smp_processor_id());
if (cpu_data(cpu).cpu_index == boot_cpu_data.cpu_index) {
struct ucode_patch *p = find_patch(cpu);
if (p) {
memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data),
......@@ -444,7 +444,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device,
goto fw_release;
}
ret = load_microcode_amd(c->x86, fw->data, fw->size);
ret = load_microcode_amd(cpu, c->x86, fw->data, fw->size);
fw_release:
release_firmware(fw);
......
......@@ -389,7 +389,7 @@ int __init save_microcode_in_initrd_amd(void)
eax = cpuid_eax(0x00000001);
eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
ret = load_microcode_amd(eax, container, container_size);
ret = load_microcode_amd(smp_processor_id(), eax, container, container_size);
if (ret != UCODE_OK)
retval = -EINVAL;
......@@ -402,3 +402,21 @@ int __init save_microcode_in_initrd_amd(void)
return retval;
}
void reload_ucode_amd(void)
{
struct microcode_amd *mc;
u32 rev, eax;
rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax);
mc = (struct microcode_amd *)amd_ucode_patch;
if (mc && rev < mc->hdr.patch_id) {
if (!__apply_microcode_amd(mc)) {
ucode_new_rev = mc->hdr.patch_id;
pr_info("microcode: reload patch_level=0x%08x\n",
ucode_new_rev);
}
}
}
......@@ -465,16 +465,8 @@ static void mc_bp_resume(void)
if (uci->valid && uci->mc)
microcode_ops->apply_microcode(cpu);
#ifdef CONFIG_X86_64
else if (!uci->mc)
/*
* We might resume and not have applied late microcode but still
* have a newer patch stashed from the early loader. We don't
* have it in uci->mc so we have to load it the same way we're
* applying patches early on the APs.
*/
load_ucode_ap();
#endif
reload_early_microcode();
}
static struct syscore_ops mc_syscore_ops = {
......@@ -559,7 +551,7 @@ static int __init microcode_init(void)
struct cpuinfo_x86 *c = &cpu_data(0);
int error;
if (dis_ucode_ldr)
if (paravirt_enabled() || dis_ucode_ldr)
return 0;
if (c->x86_vendor == X86_VENDOR_INTEL)
......
......@@ -176,3 +176,24 @@ int __init save_microcode_in_initrd(void)
return 0;
}
void reload_early_microcode(void)
{
int vendor, x86;
vendor = x86_vendor();
x86 = x86_family();
switch (vendor) {
case X86_VENDOR_INTEL:
if (x86 >= 6)
reload_ucode_intel();
break;
case X86_VENDOR_AMD:
if (x86 >= 0x10)
reload_ucode_amd();
break;
default:
break;
}
}
......@@ -650,8 +650,7 @@ static inline void print_ucode(struct ucode_cpu_info *uci)
}
#endif
static int apply_microcode_early(struct mc_saved_data *mc_saved_data,
struct ucode_cpu_info *uci)
static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
{
struct microcode_intel *mc_intel;
unsigned int val[2];
......@@ -680,7 +679,10 @@ static int apply_microcode_early(struct mc_saved_data *mc_saved_data,
#endif
uci->cpu_sig.rev = val[1];
print_ucode(uci);
if (early)
print_ucode(uci);
else
print_ucode_info(uci, mc_intel->hdr.date);
return 0;
}
......@@ -715,12 +717,17 @@ _load_ucode_intel_bsp(struct mc_saved_data *mc_saved_data,
unsigned long initrd_end_early,
struct ucode_cpu_info *uci)
{
enum ucode_state ret;
collect_cpu_info_early(uci);
scan_microcode(initrd_start_early, initrd_end_early, mc_saved_data,
mc_saved_in_initrd, uci);
load_microcode(mc_saved_data, mc_saved_in_initrd,
initrd_start_early, uci);
apply_microcode_early(mc_saved_data, uci);
ret = load_microcode(mc_saved_data, mc_saved_in_initrd,
initrd_start_early, uci);
if (ret == UCODE_OK)
apply_microcode_early(uci, true);
}
void __init
......@@ -749,7 +756,8 @@ load_ucode_intel_bsp(void)
initrd_end_early = initrd_start_early + ramdisk_size;
_load_ucode_intel_bsp(&mc_saved_data, mc_saved_in_initrd,
initrd_start_early, initrd_end_early, &uci);
initrd_start_early, initrd_end_early,
&uci);
#endif
}
......@@ -783,5 +791,23 @@ void load_ucode_intel_ap(void)
collect_cpu_info_early(&uci);
load_microcode(mc_saved_data_p, mc_saved_in_initrd_p,
initrd_start_addr, &uci);
apply_microcode_early(mc_saved_data_p, &uci);
apply_microcode_early(&uci, true);
}
void reload_ucode_intel(void)
{
struct ucode_cpu_info uci;
enum ucode_state ret;
if (!mc_saved_data.mc_saved_count)
return;
collect_cpu_info_early(&uci);
ret = generic_load_microcode_early(mc_saved_data.mc_saved,
mc_saved_data.mc_saved_count, &uci);
if (ret != UCODE_OK)
return;
apply_microcode_early(&uci, false);
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册