提交 b6194b94 编写于 作者: S Sean Christopherson 提交者: Paolo Bonzini

KVM: VMX: Configure list of user return MSRs at module init

Configure the list of user return MSRs that are actually supported at
module init instead of reprobing the list of possible MSRs every time a
vCPU is created.  Curating the list on a per-vCPU basis is pointless; KVM
is completely hosed if the set of supported MSRs changes after module init,
or if the set of MSRs differs per physical PCU.

The per-vCPU lists also increase complexity (see __vmx_find_uret_msr()) and
creates corner cases that _should_ be impossible, but theoretically exist
in KVM, e.g. advertising RDTSCP to userspace without actually being able to
virtualize RDTSCP if probing MSR_TSC_AUX fails.
Signed-off-by: NSean Christopherson <seanjc@google.com>
Message-Id: <20210504171734.1434054-9-seanjc@google.com>
Signed-off-by: NPaolo Bonzini <pbonzini@redhat.com>
上级 36fa06f9
...@@ -461,7 +461,7 @@ static unsigned long host_idt_base; ...@@ -461,7 +461,7 @@ static unsigned long host_idt_base;
* support this emulation, IA32_STAR must always be included in * support this emulation, IA32_STAR must always be included in
* vmx_uret_msrs_list[], even in i386 builds. * vmx_uret_msrs_list[], even in i386 builds.
*/ */
static const u32 vmx_uret_msrs_list[] = { static u32 vmx_uret_msrs_list[] = {
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
#endif #endif
...@@ -469,6 +469,12 @@ static const u32 vmx_uret_msrs_list[] = { ...@@ -469,6 +469,12 @@ static const u32 vmx_uret_msrs_list[] = {
MSR_IA32_TSX_CTRL, MSR_IA32_TSX_CTRL,
}; };
/*
* Number of user return MSRs that are actually supported in hardware.
* vmx_uret_msrs_list is modified when KVM is loaded to drop unsupported MSRs.
*/
static int vmx_nr_uret_msrs;
#if IS_ENABLED(CONFIG_HYPERV) #if IS_ENABLED(CONFIG_HYPERV)
static bool __read_mostly enlightened_vmcs = true; static bool __read_mostly enlightened_vmcs = true;
module_param(enlightened_vmcs, bool, 0444); module_param(enlightened_vmcs, bool, 0444);
...@@ -700,9 +706,16 @@ static inline int __vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr) ...@@ -700,9 +706,16 @@ static inline int __vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr)
{ {
int i; int i;
for (i = 0; i < vmx->nr_uret_msrs; ++i) /*
* Note, vmx->guest_uret_msrs is the same size as vmx_uret_msrs_list,
* but is ordered differently. The MSR is matched against the list of
* supported uret MSRs using "slot", but the index that is returned is
* the index into guest_uret_msrs.
*/
for (i = 0; i < vmx_nr_uret_msrs; ++i) {
if (vmx_uret_msrs_list[vmx->guest_uret_msrs[i].slot] == msr) if (vmx_uret_msrs_list[vmx->guest_uret_msrs[i].slot] == msr)
return i; return i;
}
return -1; return -1;
} }
...@@ -6929,18 +6942,10 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu) ...@@ -6929,18 +6942,10 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
goto free_vpid; goto free_vpid;
} }
BUILD_BUG_ON(ARRAY_SIZE(vmx_uret_msrs_list) != MAX_NR_USER_RETURN_MSRS); for (i = 0; i < vmx_nr_uret_msrs; ++i) {
vmx->guest_uret_msrs[i].data = 0;
for (i = 0; i < ARRAY_SIZE(vmx_uret_msrs_list); ++i) {
u32 index = vmx_uret_msrs_list[i];
int j = vmx->nr_uret_msrs;
if (kvm_probe_user_return_msr(index)) switch (vmx_uret_msrs_list[i]) {
continue;
vmx->guest_uret_msrs[j].slot = i;
vmx->guest_uret_msrs[j].data = 0;
switch (index) {
case MSR_IA32_TSX_CTRL: case MSR_IA32_TSX_CTRL:
/* /*
* TSX_CTRL_CPUID_CLEAR is handled in the CPUID * TSX_CTRL_CPUID_CLEAR is handled in the CPUID
...@@ -6954,15 +6959,14 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu) ...@@ -6954,15 +6959,14 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
* host so that TSX remains always disabled. * host so that TSX remains always disabled.
*/ */
if (boot_cpu_has(X86_FEATURE_RTM)) if (boot_cpu_has(X86_FEATURE_RTM))
vmx->guest_uret_msrs[j].mask = ~(u64)TSX_CTRL_CPUID_CLEAR; vmx->guest_uret_msrs[i].mask = ~(u64)TSX_CTRL_CPUID_CLEAR;
else else
vmx->guest_uret_msrs[j].mask = 0; vmx->guest_uret_msrs[i].mask = 0;
break; break;
default: default:
vmx->guest_uret_msrs[j].mask = -1ull; vmx->guest_uret_msrs[i].mask = -1ull;
break; break;
} }
++vmx->nr_uret_msrs;
} }
err = alloc_loaded_vmcs(&vmx->vmcs01); err = alloc_loaded_vmcs(&vmx->vmcs01);
...@@ -7821,17 +7825,34 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = { ...@@ -7821,17 +7825,34 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.vcpu_deliver_sipi_vector = kvm_vcpu_deliver_sipi_vector, .vcpu_deliver_sipi_vector = kvm_vcpu_deliver_sipi_vector,
}; };
static __init void vmx_setup_user_return_msrs(void)
{
u32 msr;
int i;
BUILD_BUG_ON(ARRAY_SIZE(vmx_uret_msrs_list) != MAX_NR_USER_RETURN_MSRS);
for (i = 0; i < ARRAY_SIZE(vmx_uret_msrs_list); ++i) {
msr = vmx_uret_msrs_list[i];
if (kvm_probe_user_return_msr(msr))
continue;
kvm_define_user_return_msr(vmx_nr_uret_msrs, msr);
vmx_uret_msrs_list[vmx_nr_uret_msrs++] = msr;
}
}
static __init int hardware_setup(void) static __init int hardware_setup(void)
{ {
unsigned long host_bndcfgs; unsigned long host_bndcfgs;
struct desc_ptr dt; struct desc_ptr dt;
int r, i, ept_lpage_level; int r, ept_lpage_level;
store_idt(&dt); store_idt(&dt);
host_idt_base = dt.address; host_idt_base = dt.address;
for (i = 0; i < ARRAY_SIZE(vmx_uret_msrs_list); ++i) vmx_setup_user_return_msrs();
kvm_define_user_return_msr(i, vmx_uret_msrs_list[i]);
if (setup_vmcs_config(&vmcs_config, &vmx_capability) < 0) if (setup_vmcs_config(&vmcs_config, &vmx_capability) < 0)
return -EIO; return -EIO;
......
...@@ -245,8 +245,16 @@ struct vcpu_vmx { ...@@ -245,8 +245,16 @@ struct vcpu_vmx {
u32 idt_vectoring_info; u32 idt_vectoring_info;
ulong rflags; ulong rflags;
/*
* User return MSRs are always emulated when enabled in the guest, but
* only loaded into hardware when necessary, e.g. SYSCALL #UDs outside
* of 64-bit mode or if EFER.SCE=1, thus the SYSCALL MSRs don't need to
* be loaded into hardware if those conditions aren't met.
* nr_active_uret_msrs tracks the number of MSRs that need to be loaded
* into hardware when running the guest. guest_uret_msrs[] is resorted
* whenever the number of "active" uret MSRs is modified.
*/
struct vmx_uret_msr guest_uret_msrs[MAX_NR_USER_RETURN_MSRS]; struct vmx_uret_msr guest_uret_msrs[MAX_NR_USER_RETURN_MSRS];
int nr_uret_msrs;
int nr_active_uret_msrs; int nr_active_uret_msrs;
bool guest_uret_msrs_loaded; bool guest_uret_msrs_loaded;
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册