提交 a7b9020b 编写于 作者: T Thomas Gleixner

x86/l1tf: Handle EPT disabled state proper

If Extended Page Tables (EPT) are disabled or not supported, no L1D
flushing is required. The setup function can just avoid setting up the L1D
flush for the EPT=n case.

Invoke it after the hardware setup has be done and enable_ept has the
correct state and expose the EPT disabled state in the mitigation status as
well.
Signed-off-by: NThomas Gleixner <tglx@linutronix.de>
Tested-by: NJiri Kosina <jkosina@suse.cz>
Reviewed-by: NGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Reviewed-by: NJosh Poimboeuf <jpoimboe@redhat.com>
Link: https://lkml.kernel.org/r/20180713142322.612160168@linutronix.de
上级 2f055947
......@@ -578,6 +578,7 @@ enum vmx_l1d_flush_state {
VMENTER_L1D_FLUSH_NEVER,
VMENTER_L1D_FLUSH_COND,
VMENTER_L1D_FLUSH_ALWAYS,
VMENTER_L1D_FLUSH_EPT_DISABLED,
};
extern enum vmx_l1d_flush_state l1tf_vmx_mitigation;
......
......@@ -701,6 +701,7 @@ static const char *l1tf_vmx_states[] = {
[VMENTER_L1D_FLUSH_NEVER] = "vulnerable",
[VMENTER_L1D_FLUSH_COND] = "conditional cache flushes",
[VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes",
[VMENTER_L1D_FLUSH_EPT_DISABLED] = "EPT disabled",
};
static ssize_t l1tf_show_state(char *buf)
......
......@@ -13204,6 +13204,11 @@ static int __init vmx_setup_l1d_flush(void)
if (!boot_cpu_has_bug(X86_BUG_L1TF))
return 0;
if (!enable_ept) {
l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED;
return 0;
}
l1tf_vmx_mitigation = vmentry_l1d_flush;
if (vmentry_l1d_flush == VMENTER_L1D_FLUSH_NEVER)
......@@ -13230,6 +13235,41 @@ static void vmx_cleanup_l1d_flush(void)
l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
}
static void vmx_exit(void)
{
#ifdef CONFIG_KEXEC_CORE
RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL);
synchronize_rcu();
#endif
kvm_exit();
#if IS_ENABLED(CONFIG_HYPERV)
if (static_branch_unlikely(&enable_evmcs)) {
int cpu;
struct hv_vp_assist_page *vp_ap;
/*
* Reset everything to support using non-enlightened VMCS
* access later (e.g. when we reload the module with
* enlightened_vmcs=0)
*/
for_each_online_cpu(cpu) {
vp_ap = hv_get_vp_assist_page(cpu);
if (!vp_ap)
continue;
vp_ap->current_nested_vmcs = 0;
vp_ap->enlighten_vmentry = 0;
}
static_branch_disable(&enable_evmcs);
}
#endif
vmx_cleanup_l1d_flush();
}
module_exit(vmx_exit);
static int __init vmx_init(void)
{
int r;
......@@ -13263,14 +13303,17 @@ static int __init vmx_init(void)
}
#endif
r = vmx_setup_l1d_flush();
r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx),
__alignof__(struct vcpu_vmx), THIS_MODULE);
if (r)
return r;
r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx),
__alignof__(struct vcpu_vmx), THIS_MODULE);
/*
* Must be called after kvm_init() so enable_ept is properly set up
*/
r = vmx_setup_l1d_flush();
if (r) {
vmx_cleanup_l1d_flush();
vmx_exit();
return r;
}
......@@ -13282,40 +13325,4 @@ static int __init vmx_init(void)
return 0;
}
static void __exit vmx_exit(void)
{
#ifdef CONFIG_KEXEC_CORE
RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL);
synchronize_rcu();
#endif
kvm_exit();
#if IS_ENABLED(CONFIG_HYPERV)
if (static_branch_unlikely(&enable_evmcs)) {
int cpu;
struct hv_vp_assist_page *vp_ap;
/*
* Reset everything to support using non-enlightened VMCS
* access later (e.g. when we reload the module with
* enlightened_vmcs=0)
*/
for_each_online_cpu(cpu) {
vp_ap = hv_get_vp_assist_page(cpu);
if (!vp_ap)
continue;
vp_ap->current_nested_vmcs = 0;
vp_ap->enlighten_vmentry = 0;
}
static_branch_disable(&enable_evmcs);
}
#endif
vmx_cleanup_l1d_flush();
}
module_init(vmx_init)
module_exit(vmx_exit)
module_init(vmx_init);
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册