diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index 38d3a1a8830f7ec92d181db9d8cf2150065c9712..2dbc0bf2b9f3f7848d10025bb4bcfcf6d36a4d2d 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h @@ -151,10 +151,12 @@ extern int mce_p5_enabled; #ifdef CONFIG_X86_MCE int mcheck_init(void); void mcheck_cpu_init(struct cpuinfo_x86 *c); +void mcheck_cpu_clear(struct cpuinfo_x86 *c); void mcheck_vendor_init_severity(void); #else static inline int mcheck_init(void) { return 0; } static inline void mcheck_cpu_init(struct cpuinfo_x86 *c) {} +static inline void mcheck_cpu_clear(struct cpuinfo_x86 *c) {} static inline void mcheck_vendor_init_severity(void) {} #endif @@ -181,12 +183,14 @@ DECLARE_PER_CPU(struct device *, mce_device); #ifdef CONFIG_X86_MCE_INTEL void mce_intel_feature_init(struct cpuinfo_x86 *c); +void mce_intel_feature_clear(struct cpuinfo_x86 *c); void cmci_clear(void); void cmci_reenable(void); void cmci_rediscover(void); void cmci_recheck(void); #else static inline void mce_intel_feature_init(struct cpuinfo_x86 *c) { } +static inline void mce_intel_feature_clear(struct cpuinfo_x86 *c) { } static inline void cmci_clear(void) {} static inline void cmci_reenable(void) {} static inline void cmci_rediscover(void) {} diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 32b586ee006a66c6661af61f92b45c4df608cc2f..ee5272d77a165521d20937befd8e93bddc4b2661 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -1606,6 +1606,17 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c) } } +static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c) +{ + switch (c->x86_vendor) { + case X86_VENDOR_INTEL: + mce_intel_feature_clear(c); + break; + default: + break; + } +} + static void mce_start_timer(unsigned int cpu, struct timer_list *t) { unsigned long iv = check_interval * HZ; @@ -1672,6 +1683,25 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c) __mcheck_cpu_init_timer(); } +/* + * Called for each booted CPU to clear some machine checks opt-ins + */ +void mcheck_cpu_clear(struct cpuinfo_x86 *c) +{ + if (mca_cfg.disabled) + return; + + if (!mce_available(c)) + return; + + /* + * Possibly to clear general settings generic to x86 + * __mcheck_cpu_clear_generic(c); + */ + __mcheck_cpu_clear_vendor(c); + +} + /* * mce_chrdev: Character device /dev/mcelog to read and clear the MCE log. */ diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c index 70f567f774ed0fec3c3b936498d0f75b10969d92..c5c003291861ff521f61bd881a080365db134376 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_intel.c +++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c @@ -434,7 +434,7 @@ static void intel_init_cmci(void) cmci_recheck(); } -void intel_init_lmce(void) +static void intel_init_lmce(void) { u64 val; @@ -447,9 +447,26 @@ void intel_init_lmce(void) wrmsrl(MSR_IA32_MCG_EXT_CTL, val | MCG_EXT_CTL_LMCE_EN); } +static void intel_clear_lmce(void) +{ + u64 val; + + if (!lmce_supported()) + return; + + rdmsrl(MSR_IA32_MCG_EXT_CTL, val); + val &= ~MCG_EXT_CTL_LMCE_EN; + wrmsrl(MSR_IA32_MCG_EXT_CTL, val); +} + void mce_intel_feature_init(struct cpuinfo_x86 *c) { intel_init_thermal(c); intel_init_cmci(); intel_init_lmce(); } + +void mce_intel_feature_clear(struct cpuinfo_x86 *c) +{ + intel_clear_lmce(); +} diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 397688beed4be5ce7d9445d7847d44613d2d84b5..b20ef187ff41e16e474f85d860c837d52c86a241 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -29,6 +29,7 @@ #include #include #include +#include /* * per-CPU TSS segments. Threads are completely 'soft' on Linux, @@ -319,6 +320,7 @@ void stop_this_cpu(void *dummy) */ set_cpu_online(smp_processor_id(), false); disable_local_APIC(); + mcheck_cpu_clear(this_cpu_ptr(&cpu_info)); for (;;) halt(); diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index 15aaa69bbb5eff9596e49000b38b90abce636052..12c8286206ce27c1627b7202836d7d5419a2b4b6 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c @@ -30,6 +30,7 @@ #include #include #include +#include #include /* * Some notes on x86 processor bugs affecting SMP operation: @@ -243,6 +244,7 @@ static void native_stop_other_cpus(int wait) finish: local_irq_save(flags); disable_local_APIC(); + mcheck_cpu_clear(this_cpu_ptr(&cpu_info)); local_irq_restore(flags); }