提交 24fd78a8 编写于 作者: A Aravind Gopalakrishnan 提交者: Borislav Petkov

x86/mce/amd: Introduce deferred error interrupt handler

Deferred errors indicate error conditions that were not corrected, but
require no action from S/W (or action is optional).These errors provide
info about a latent UC MCE that can occur when a poisoned data is
consumed by the processor.

Processors that report these errors can be configured to generate APIC
interrupts to notify OS about the error.

Provide an interrupt handler in this patch so that OS can catch these
errors as and when they happen. Currently, we simply log the errors and
exit the handler as S/W action is not mandated.
Signed-off-by: NAravind Gopalakrishnan <Aravind.Gopalakrishnan@amd.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: x86-ml <x86@kernel.org>
Cc: linux-edac <linux-edac@vger.kernel.org>
Link: http://lkml.kernel.org/r/1430913538-1415-5-git-send-email-Aravind.Gopalakrishnan@amd.comSigned-off-by: NBorislav Petkov <bp@suse.de>
上级 7559e13f
...@@ -50,4 +50,7 @@ BUILD_INTERRUPT(thermal_interrupt,THERMAL_APIC_VECTOR) ...@@ -50,4 +50,7 @@ BUILD_INTERRUPT(thermal_interrupt,THERMAL_APIC_VECTOR)
BUILD_INTERRUPT(threshold_interrupt,THRESHOLD_APIC_VECTOR) BUILD_INTERRUPT(threshold_interrupt,THRESHOLD_APIC_VECTOR)
#endif #endif
#ifdef CONFIG_X86_MCE_AMD
BUILD_INTERRUPT(deferred_error_interrupt, DEFERRED_ERROR_VECTOR)
#endif
#endif #endif
...@@ -33,6 +33,9 @@ typedef struct { ...@@ -33,6 +33,9 @@ typedef struct {
#ifdef CONFIG_X86_MCE_THRESHOLD #ifdef CONFIG_X86_MCE_THRESHOLD
unsigned int irq_threshold_count; unsigned int irq_threshold_count;
#endif #endif
#ifdef CONFIG_X86_MCE_AMD
unsigned int irq_deferred_error_count;
#endif
#if IS_ENABLED(CONFIG_HYPERV) || defined(CONFIG_XEN) #if IS_ENABLED(CONFIG_HYPERV) || defined(CONFIG_XEN)
unsigned int irq_hv_callback_count; unsigned int irq_hv_callback_count;
#endif #endif
......
...@@ -73,6 +73,7 @@ extern asmlinkage void invalidate_interrupt31(void); ...@@ -73,6 +73,7 @@ extern asmlinkage void invalidate_interrupt31(void);
extern asmlinkage void irq_move_cleanup_interrupt(void); extern asmlinkage void irq_move_cleanup_interrupt(void);
extern asmlinkage void reboot_interrupt(void); extern asmlinkage void reboot_interrupt(void);
extern asmlinkage void threshold_interrupt(void); extern asmlinkage void threshold_interrupt(void);
extern asmlinkage void deferred_error_interrupt(void);
extern asmlinkage void call_function_interrupt(void); extern asmlinkage void call_function_interrupt(void);
extern asmlinkage void call_function_single_interrupt(void); extern asmlinkage void call_function_single_interrupt(void);
...@@ -87,6 +88,7 @@ extern void trace_spurious_interrupt(void); ...@@ -87,6 +88,7 @@ extern void trace_spurious_interrupt(void);
extern void trace_thermal_interrupt(void); extern void trace_thermal_interrupt(void);
extern void trace_reschedule_interrupt(void); extern void trace_reschedule_interrupt(void);
extern void trace_threshold_interrupt(void); extern void trace_threshold_interrupt(void);
extern void trace_deferred_error_interrupt(void);
extern void trace_call_function_interrupt(void); extern void trace_call_function_interrupt(void);
extern void trace_call_function_single_interrupt(void); extern void trace_call_function_single_interrupt(void);
#define trace_irq_move_cleanup_interrupt irq_move_cleanup_interrupt #define trace_irq_move_cleanup_interrupt irq_move_cleanup_interrupt
......
...@@ -113,6 +113,7 @@ ...@@ -113,6 +113,7 @@
#define IRQ_WORK_VECTOR 0xf6 #define IRQ_WORK_VECTOR 0xf6
#define UV_BAU_MESSAGE 0xf5 #define UV_BAU_MESSAGE 0xf5
#define DEFERRED_ERROR_VECTOR 0xf4
/* Vector on which hypervisor callbacks will be delivered */ /* Vector on which hypervisor callbacks will be delivered */
#define HYPERVISOR_CALLBACK_VECTOR 0xf3 #define HYPERVISOR_CALLBACK_VECTOR 0xf3
......
...@@ -234,6 +234,9 @@ void do_machine_check(struct pt_regs *, long); ...@@ -234,6 +234,9 @@ void do_machine_check(struct pt_regs *, long);
extern void (*mce_threshold_vector)(void); extern void (*mce_threshold_vector)(void);
extern void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu); extern void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
/* Deferred error interrupt handler */
extern void (*deferred_error_int_vector)(void);
/* /*
* Thermal handler * Thermal handler
*/ */
......
...@@ -100,6 +100,12 @@ DEFINE_IRQ_VECTOR_EVENT(call_function_single); ...@@ -100,6 +100,12 @@ DEFINE_IRQ_VECTOR_EVENT(call_function_single);
*/ */
DEFINE_IRQ_VECTOR_EVENT(threshold_apic); DEFINE_IRQ_VECTOR_EVENT(threshold_apic);
/*
* deferred_error_apic - called when entering/exiting a deferred apic interrupt
* vector handler
*/
DEFINE_IRQ_VECTOR_EVENT(deferred_error_apic);
/* /*
* thermal_apic - called when entering/exiting a thermal apic interrupt * thermal_apic - called when entering/exiting a thermal apic interrupt
* vector handler * vector handler
......
...@@ -108,7 +108,8 @@ extern int panic_on_unrecovered_nmi; ...@@ -108,7 +108,8 @@ extern int panic_on_unrecovered_nmi;
void math_emulate(struct math_emu_info *); void math_emulate(struct math_emu_info *);
#ifndef CONFIG_X86_32 #ifndef CONFIG_X86_32
asmlinkage void smp_thermal_interrupt(void); asmlinkage void smp_thermal_interrupt(void);
asmlinkage void mce_threshold_interrupt(void); asmlinkage void smp_threshold_interrupt(void);
asmlinkage void smp_deferred_error_interrupt(void);
#endif #endif
extern enum ctx_state ist_enter(struct pt_regs *regs); extern enum ctx_state ist_enter(struct pt_regs *regs);
......
...@@ -12,6 +12,8 @@ ...@@ -12,6 +12,8 @@
* - added support for AMD Family 0x10 processors * - added support for AMD Family 0x10 processors
* May 2012 * May 2012
* - major scrubbing * - major scrubbing
* May 2015
* - add support for deferred error interrupts (Aravind Gopalakrishnan)
* *
* All MC4_MISCi registers are shared between multi-cores * All MC4_MISCi registers are shared between multi-cores
*/ */
...@@ -32,6 +34,7 @@ ...@@ -32,6 +34,7 @@
#include <asm/idle.h> #include <asm/idle.h>
#include <asm/mce.h> #include <asm/mce.h>
#include <asm/msr.h> #include <asm/msr.h>
#include <asm/trace/irq_vectors.h>
#define NR_BLOCKS 9 #define NR_BLOCKS 9
#define THRESHOLD_MAX 0xFFF #define THRESHOLD_MAX 0xFFF
...@@ -47,6 +50,13 @@ ...@@ -47,6 +50,13 @@
#define MASK_BLKPTR_LO 0xFF000000 #define MASK_BLKPTR_LO 0xFF000000
#define MCG_XBLK_ADDR 0xC0000400 #define MCG_XBLK_ADDR 0xC0000400
/* Deferred error settings */
#define MSR_CU_DEF_ERR 0xC0000410
#define MASK_DEF_LVTOFF 0x000000F0
#define MASK_DEF_INT_TYPE 0x00000006
#define DEF_LVT_OFF 0x2
#define DEF_INT_TYPE_APIC 0x2
static const char * const th_names[] = { static const char * const th_names[] = {
"load_store", "load_store",
"insn_fetch", "insn_fetch",
...@@ -60,6 +70,13 @@ static DEFINE_PER_CPU(struct threshold_bank **, threshold_banks); ...@@ -60,6 +70,13 @@ static DEFINE_PER_CPU(struct threshold_bank **, threshold_banks);
static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */ static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */
static void amd_threshold_interrupt(void); static void amd_threshold_interrupt(void);
static void amd_deferred_error_interrupt(void);
static void default_deferred_error_interrupt(void)
{
pr_err("Unexpected deferred interrupt at vector %x\n", DEFERRED_ERROR_VECTOR);
}
void (*deferred_error_int_vector)(void) = default_deferred_error_interrupt;
/* /*
* CPU Initialization * CPU Initialization
...@@ -205,6 +222,39 @@ static int setup_APIC_mce(int reserved, int new) ...@@ -205,6 +222,39 @@ static int setup_APIC_mce(int reserved, int new)
return reserved; return reserved;
} }
static int setup_APIC_deferred_error(int reserved, int new)
{
if (reserved < 0 && !setup_APIC_eilvt(new, DEFERRED_ERROR_VECTOR,
APIC_EILVT_MSG_FIX, 0))
return new;
return reserved;
}
static void deferred_error_interrupt_enable(struct cpuinfo_x86 *c)
{
u32 low = 0, high = 0;
int def_offset = -1, def_new;
if (rdmsr_safe(MSR_CU_DEF_ERR, &low, &high))
return;
def_new = (low & MASK_DEF_LVTOFF) >> 4;
if (!(low & MASK_DEF_LVTOFF)) {
pr_err(FW_BUG "Your BIOS is not setting up LVT offset 0x2 for deferred error IRQs correctly.\n");
def_new = DEF_LVT_OFF;
low = (low & ~MASK_DEF_LVTOFF) | (DEF_LVT_OFF << 4);
}
def_offset = setup_APIC_deferred_error(def_offset, def_new);
if ((def_offset == def_new) &&
(deferred_error_int_vector != amd_deferred_error_interrupt))
deferred_error_int_vector = amd_deferred_error_interrupt;
low = (low & ~MASK_DEF_INT_TYPE) | DEF_INT_TYPE_APIC;
wrmsr(MSR_CU_DEF_ERR, low, high);
}
/* cpu init entry point, called from mce.c with preempt off */ /* cpu init entry point, called from mce.c with preempt off */
void mce_amd_feature_init(struct cpuinfo_x86 *c) void mce_amd_feature_init(struct cpuinfo_x86 *c)
{ {
...@@ -262,6 +312,9 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c) ...@@ -262,6 +312,9 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
mce_threshold_block_init(&b, offset); mce_threshold_block_init(&b, offset);
} }
} }
if (mce_flags.succor)
deferred_error_interrupt_enable(c);
} }
static void __log_error(unsigned int bank, bool threshold_err, u64 misc) static void __log_error(unsigned int bank, bool threshold_err, u64 misc)
...@@ -288,6 +341,46 @@ static void __log_error(unsigned int bank, bool threshold_err, u64 misc) ...@@ -288,6 +341,46 @@ static void __log_error(unsigned int bank, bool threshold_err, u64 misc)
wrmsrl(MSR_IA32_MCx_STATUS(bank), 0); wrmsrl(MSR_IA32_MCx_STATUS(bank), 0);
} }
static inline void __smp_deferred_error_interrupt(void)
{
inc_irq_stat(irq_deferred_error_count);
deferred_error_int_vector();
}
asmlinkage __visible void smp_deferred_error_interrupt(void)
{
entering_irq();
__smp_deferred_error_interrupt();
exiting_ack_irq();
}
asmlinkage __visible void smp_trace_deferred_error_interrupt(void)
{
entering_irq();
trace_deferred_error_apic_entry(DEFERRED_ERROR_VECTOR);
__smp_deferred_error_interrupt();
trace_deferred_error_apic_exit(DEFERRED_ERROR_VECTOR);
exiting_ack_irq();
}
/* APIC interrupt handler for deferred errors */
static void amd_deferred_error_interrupt(void)
{
u64 status;
unsigned int bank;
for (bank = 0; bank < mca_cfg.banks; ++bank) {
rdmsrl(MSR_IA32_MCx_STATUS(bank), status);
if (!(status & MCI_STATUS_VAL) ||
!(status & MCI_STATUS_DEFERRED))
continue;
__log_error(bank, false, 0);
break;
}
}
/* /*
* APIC Interrupt Handler * APIC Interrupt Handler
*/ */
......
...@@ -935,6 +935,11 @@ apicinterrupt THRESHOLD_APIC_VECTOR \ ...@@ -935,6 +935,11 @@ apicinterrupt THRESHOLD_APIC_VECTOR \
threshold_interrupt smp_threshold_interrupt threshold_interrupt smp_threshold_interrupt
#endif #endif
#ifdef CONFIG_X86_MCE_AMD
apicinterrupt DEFERRED_ERROR_VECTOR \
deferred_error_interrupt smp_deferred_error_interrupt
#endif
#ifdef CONFIG_X86_THERMAL_VECTOR #ifdef CONFIG_X86_THERMAL_VECTOR
apicinterrupt THERMAL_APIC_VECTOR \ apicinterrupt THERMAL_APIC_VECTOR \
thermal_interrupt smp_thermal_interrupt thermal_interrupt smp_thermal_interrupt
......
...@@ -116,6 +116,12 @@ int arch_show_interrupts(struct seq_file *p, int prec) ...@@ -116,6 +116,12 @@ int arch_show_interrupts(struct seq_file *p, int prec)
seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count); seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count);
seq_puts(p, " Threshold APIC interrupts\n"); seq_puts(p, " Threshold APIC interrupts\n");
#endif #endif
#ifdef CONFIG_X86_MCE_AMD
seq_printf(p, "%*s: ", prec, "DFR");
for_each_online_cpu(j)
seq_printf(p, "%10u ", irq_stats(j)->irq_deferred_error_count);
seq_puts(p, " Deferred Error APIC interrupts\n");
#endif
#ifdef CONFIG_X86_MCE #ifdef CONFIG_X86_MCE
seq_printf(p, "%*s: ", prec, "MCE"); seq_printf(p, "%*s: ", prec, "MCE");
for_each_online_cpu(j) for_each_online_cpu(j)
......
...@@ -135,6 +135,10 @@ static void __init apic_intr_init(void) ...@@ -135,6 +135,10 @@ static void __init apic_intr_init(void)
alloc_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt); alloc_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt);
#endif #endif
#ifdef CONFIG_X86_MCE_AMD
alloc_intr_gate(DEFERRED_ERROR_VECTOR, deferred_error_interrupt);
#endif
#ifdef CONFIG_X86_LOCAL_APIC #ifdef CONFIG_X86_LOCAL_APIC
/* self generated IPI for local APIC timer */ /* self generated IPI for local APIC timer */
alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt); alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
......
...@@ -827,6 +827,11 @@ asmlinkage __visible void __attribute__((weak)) smp_threshold_interrupt(void) ...@@ -827,6 +827,11 @@ asmlinkage __visible void __attribute__((weak)) smp_threshold_interrupt(void)
{ {
} }
asmlinkage __visible void __attribute__((weak))
smp_deferred_error_interrupt(void)
{
}
/* /*
* 'math_state_restore()' saves the current math information in the * 'math_state_restore()' saves the current math information in the
* old math state array, and gets the new ones from the current task * old math state array, and gets the new ones from the current task
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册