diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index c98aa9aed056e43335d378af904334d2351810dd..94a8547d915b80ab7ccfbbd569738490192939dd 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -578,6 +578,7 @@ enum vmx_l1d_flush_state { VMENTER_L1D_FLUSH_NEVER, VMENTER_L1D_FLUSH_COND, VMENTER_L1D_FLUSH_ALWAYS, + VMENTER_L1D_FLUSH_EPT_DISABLED, }; extern enum vmx_l1d_flush_state l1tf_vmx_mitigation; diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 8aba7d3abccb503464d8db379b013dee4a52ba69..1b3dc586d29ded834b65090c50205eeaf8499f8d 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -697,10 +697,11 @@ static void __init l1tf_select_mitigation(void) #if IS_ENABLED(CONFIG_KVM_INTEL) static const char *l1tf_vmx_states[] = { - [VMENTER_L1D_FLUSH_AUTO] = "auto", - [VMENTER_L1D_FLUSH_NEVER] = "vulnerable", - [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes", - [VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes", + [VMENTER_L1D_FLUSH_AUTO] = "auto", + [VMENTER_L1D_FLUSH_NEVER] = "vulnerable", + [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes", + [VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes", + [VMENTER_L1D_FLUSH_EPT_DISABLED] = "EPT disabled", }; static ssize_t l1tf_show_state(char *buf) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index e553e43a114cde219c0f90a30cbe2364e8b205cb..78593550059bd5c3cd7d31cb6b5454e73359acce 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -13204,6 +13204,11 @@ static int __init vmx_setup_l1d_flush(void) if (!boot_cpu_has_bug(X86_BUG_L1TF)) return 0; + if (!enable_ept) { + l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED; + return 0; + } + l1tf_vmx_mitigation = vmentry_l1d_flush; if (vmentry_l1d_flush == VMENTER_L1D_FLUSH_NEVER) @@ -13230,6 +13235,41 @@ static void vmx_cleanup_l1d_flush(void) l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO; } +static void vmx_exit(void) +{ +#ifdef CONFIG_KEXEC_CORE + RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL); + synchronize_rcu(); +#endif + + kvm_exit(); + +#if IS_ENABLED(CONFIG_HYPERV) + if (static_branch_unlikely(&enable_evmcs)) { + int cpu; + struct hv_vp_assist_page *vp_ap; + /* + * Reset everything to support using non-enlightened VMCS + * access later (e.g. when we reload the module with + * enlightened_vmcs=0) + */ + for_each_online_cpu(cpu) { + vp_ap = hv_get_vp_assist_page(cpu); + + if (!vp_ap) + continue; + + vp_ap->current_nested_vmcs = 0; + vp_ap->enlighten_vmentry = 0; + } + + static_branch_disable(&enable_evmcs); + } +#endif + vmx_cleanup_l1d_flush(); +} +module_exit(vmx_exit); + static int __init vmx_init(void) { int r; @@ -13263,14 +13303,17 @@ static int __init vmx_init(void) } #endif - r = vmx_setup_l1d_flush(); + r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), + __alignof__(struct vcpu_vmx), THIS_MODULE); if (r) return r; - r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), - __alignof__(struct vcpu_vmx), THIS_MODULE); + /* + * Must be called after kvm_init() so enable_ept is properly set up + */ + r = vmx_setup_l1d_flush(); if (r) { - vmx_cleanup_l1d_flush(); + vmx_exit(); return r; } @@ -13282,40 +13325,4 @@ static int __init vmx_init(void) return 0; } - -static void __exit vmx_exit(void) -{ -#ifdef CONFIG_KEXEC_CORE - RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL); - synchronize_rcu(); -#endif - - kvm_exit(); - -#if IS_ENABLED(CONFIG_HYPERV) - if (static_branch_unlikely(&enable_evmcs)) { - int cpu; - struct hv_vp_assist_page *vp_ap; - /* - * Reset everything to support using non-enlightened VMCS - * access later (e.g. when we reload the module with - * enlightened_vmcs=0) - */ - for_each_online_cpu(cpu) { - vp_ap = hv_get_vp_assist_page(cpu); - - if (!vp_ap) - continue; - - vp_ap->current_nested_vmcs = 0; - vp_ap->enlighten_vmentry = 0; - } - - static_branch_disable(&enable_evmcs); - } -#endif - vmx_cleanup_l1d_flush(); -} - -module_init(vmx_init) -module_exit(vmx_exit) +module_init(vmx_init);