提交 1f3a3e46 编写于 作者: L Lan Tianyu 提交者: Paolo Bonzini

KVM/VMX: Add hv tlb range flush support

This patch is to register tlb_remote_flush_with_range callback with
hv tlb range flush interface.
Signed-off-by: NLan Tianyu <Tianyu.Lan@microsoft.com>
Signed-off-by: NPaolo Bonzini <pbonzini@redhat.com>
上级 cc4edae4
......@@ -421,7 +421,34 @@ static void check_ept_pointer_match(struct kvm *kvm)
to_kvm_vmx(kvm)->ept_pointers_match = EPT_POINTERS_MATCH;
}
static int vmx_hv_remote_flush_tlb(struct kvm *kvm)
int kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list *flush,
void *data)
{
struct kvm_tlb_range *range = data;
return hyperv_fill_flush_guest_mapping_list(flush, range->start_gfn,
range->pages);
}
static inline int __hv_remote_flush_tlb_with_range(struct kvm *kvm,
struct kvm_vcpu *vcpu, struct kvm_tlb_range *range)
{
u64 ept_pointer = to_vmx(vcpu)->ept_pointer;
/*
* FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE hypercall needs address
* of the base of EPT PML4 table, strip off EPT configuration
* information.
*/
if (range)
return hyperv_flush_guest_mapping_range(ept_pointer & PAGE_MASK,
kvm_fill_hv_flush_list_func, (void *)range);
else
return hyperv_flush_guest_mapping(ept_pointer & PAGE_MASK);
}
static int hv_remote_flush_tlb_with_range(struct kvm *kvm,
struct kvm_tlb_range *range)
{
struct kvm_vcpu *vcpu;
int ret = -ENOTSUPP, i;
......@@ -431,29 +458,26 @@ static int vmx_hv_remote_flush_tlb(struct kvm *kvm)
if (to_kvm_vmx(kvm)->ept_pointers_match == EPT_POINTERS_CHECK)
check_ept_pointer_match(kvm);
/*
* FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE hypercall needs the address of the
* base of EPT PML4 table, strip off EPT configuration information.
* If ept_pointer is invalid pointer, bypass the flush request.
*/
if (to_kvm_vmx(kvm)->ept_pointers_match != EPT_POINTERS_MATCH) {
kvm_for_each_vcpu(i, vcpu, kvm) {
u64 ept_pointer = to_vmx(vcpu)->ept_pointer;
if (!VALID_PAGE(ept_pointer))
continue;
ret |= hyperv_flush_guest_mapping(
ept_pointer & PAGE_MASK);
/* If ept_pointer is invalid pointer, bypass flush request. */
if (VALID_PAGE(to_vmx(vcpu)->ept_pointer))
ret |= __hv_remote_flush_tlb_with_range(
kvm, vcpu, range);
}
} else {
ret = hyperv_flush_guest_mapping(
to_vmx(kvm_get_vcpu(kvm, 0))->ept_pointer & PAGE_MASK);
ret = __hv_remote_flush_tlb_with_range(kvm,
kvm_get_vcpu(kvm, 0), range);
}
spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock);
return ret;
}
static int hv_remote_flush_tlb(struct kvm *kvm)
{
return hv_remote_flush_tlb_with_range(kvm, NULL);
}
#endif /* IS_ENABLED(CONFIG_HYPERV) */
/*
......@@ -7554,8 +7578,11 @@ static __init int hardware_setup(void)
#if IS_ENABLED(CONFIG_HYPERV)
if (ms_hyperv.nested_features & HV_X64_NESTED_GUEST_MAPPING_FLUSH
&& enable_ept)
kvm_x86_ops->tlb_remote_flush = vmx_hv_remote_flush_tlb;
&& enable_ept) {
kvm_x86_ops->tlb_remote_flush = hv_remote_flush_tlb;
kvm_x86_ops->tlb_remote_flush_with_range =
hv_remote_flush_tlb_with_range;
}
#endif
if (!cpu_has_vmx_ple()) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册