提交 5e2f30b7 编写于 作者: D David Hildenbrand 提交者: Paolo Bonzini

KVM: nVMX: get rid of nested_get_page()

nested_get_page() just sounds confusing. All we want is a page from G1.
This is even unrelated to nested.

Let's introduce kvm_vcpu_gpa_to_page() so we don't get too lengthy
lines.
Reviewed-by: NPaolo Bonzini <pbonzini@redhat.com>
Signed-off-by: NDavid Hildenbrand <david@redhat.com>
Signed-off-by: NRadim Krčmář <rkrcmar@redhat.com>
[Squash pasto fix from Wanpeng Li. - Paolo]
Signed-off-by: NPaolo Bonzini <pbonzini@redhat.com>
上级 90a2db6d
...@@ -891,14 +891,6 @@ static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu) ...@@ -891,14 +891,6 @@ static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
return to_vmx(vcpu)->nested.cached_vmcs12; return to_vmx(vcpu)->nested.cached_vmcs12;
} }
static struct page *nested_get_page(struct kvm_vcpu *vcpu, gpa_t addr)
{
struct page *page = kvm_vcpu_gfn_to_page(vcpu, addr >> PAGE_SHIFT);
if (is_error_page(page))
return NULL;
return page;
}
static void nested_release_page(struct page *page) static void nested_release_page(struct page *page)
{ {
...@@ -7156,8 +7148,8 @@ static int handle_vmon(struct kvm_vcpu *vcpu) ...@@ -7156,8 +7148,8 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
return kvm_skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
} }
page = nested_get_page(vcpu, vmptr); page = kvm_vcpu_gpa_to_page(vcpu, vmptr);
if (page == NULL) { if (is_error_page(page)) {
nested_vmx_failInvalid(vcpu); nested_vmx_failInvalid(vcpu);
return kvm_skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
} }
...@@ -7625,8 +7617,8 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu) ...@@ -7625,8 +7617,8 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
if (vmx->nested.current_vmptr != vmptr) { if (vmx->nested.current_vmptr != vmptr) {
struct vmcs12 *new_vmcs12; struct vmcs12 *new_vmcs12;
struct page *page; struct page *page;
page = nested_get_page(vcpu, vmptr); page = kvm_vcpu_gpa_to_page(vcpu, vmptr);
if (page == NULL) { if (is_error_page(page)) {
nested_vmx_failInvalid(vcpu); nested_vmx_failInvalid(vcpu);
return kvm_skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
} }
...@@ -9632,6 +9624,7 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu, ...@@ -9632,6 +9624,7 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12) struct vmcs12 *vmcs12)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
struct page *page;
u64 hpa; u64 hpa;
if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
...@@ -9641,17 +9634,19 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu, ...@@ -9641,17 +9634,19 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu,
* physical address remains valid. We keep a reference * physical address remains valid. We keep a reference
* to it so we can release it later. * to it so we can release it later.
*/ */
if (vmx->nested.apic_access_page) /* shouldn't happen */ if (vmx->nested.apic_access_page) { /* shouldn't happen */
nested_release_page(vmx->nested.apic_access_page); nested_release_page(vmx->nested.apic_access_page);
vmx->nested.apic_access_page = vmx->nested.apic_access_page = NULL;
nested_get_page(vcpu, vmcs12->apic_access_addr); }
page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->apic_access_addr);
/* /*
* If translation failed, no matter: This feature asks * If translation failed, no matter: This feature asks
* to exit when accessing the given address, and if it * to exit when accessing the given address, and if it
* can never be accessed, this feature won't do * can never be accessed, this feature won't do
* anything anyway. * anything anyway.
*/ */
if (vmx->nested.apic_access_page) { if (!is_error_page(page)) {
vmx->nested.apic_access_page = page;
hpa = page_to_phys(vmx->nested.apic_access_page); hpa = page_to_phys(vmx->nested.apic_access_page);
vmcs_write64(APIC_ACCESS_ADDR, hpa); vmcs_write64(APIC_ACCESS_ADDR, hpa);
} else { } else {
...@@ -9666,10 +9661,11 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu, ...@@ -9666,10 +9661,11 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu,
} }
if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) { if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
if (vmx->nested.virtual_apic_page) /* shouldn't happen */ if (vmx->nested.virtual_apic_page) { /* shouldn't happen */
nested_release_page(vmx->nested.virtual_apic_page); nested_release_page(vmx->nested.virtual_apic_page);
vmx->nested.virtual_apic_page = vmx->nested.virtual_apic_page = NULL;
nested_get_page(vcpu, vmcs12->virtual_apic_page_addr); }
page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->virtual_apic_page_addr);
/* /*
* If translation failed, VM entry will fail because * If translation failed, VM entry will fail because
...@@ -9684,7 +9680,8 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu, ...@@ -9684,7 +9680,8 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu,
* control. But such a configuration is useless, so * control. But such a configuration is useless, so
* let's keep the code simple. * let's keep the code simple.
*/ */
if (vmx->nested.virtual_apic_page) { if (!is_error_page(page)) {
vmx->nested.virtual_apic_page = page;
hpa = page_to_phys(vmx->nested.virtual_apic_page); hpa = page_to_phys(vmx->nested.virtual_apic_page);
vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, hpa); vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, hpa);
} }
...@@ -9694,15 +9691,13 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu, ...@@ -9694,15 +9691,13 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu,
if (vmx->nested.pi_desc_page) { /* shouldn't happen */ if (vmx->nested.pi_desc_page) { /* shouldn't happen */
kunmap(vmx->nested.pi_desc_page); kunmap(vmx->nested.pi_desc_page);
nested_release_page(vmx->nested.pi_desc_page); nested_release_page(vmx->nested.pi_desc_page);
vmx->nested.pi_desc_page = NULL;
} }
vmx->nested.pi_desc_page = page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->posted_intr_desc_addr);
nested_get_page(vcpu, vmcs12->posted_intr_desc_addr); if (is_error_page(page))
vmx->nested.pi_desc =
(struct pi_desc *)kmap(vmx->nested.pi_desc_page);
if (!vmx->nested.pi_desc) {
nested_release_page_clean(vmx->nested.pi_desc_page);
return; return;
} vmx->nested.pi_desc_page = page;
vmx->nested.pi_desc = kmap(vmx->nested.pi_desc_page);
vmx->nested.pi_desc = vmx->nested.pi_desc =
(struct pi_desc *)((void *)vmx->nested.pi_desc + (struct pi_desc *)((void *)vmx->nested.pi_desc +
(unsigned long)(vmcs12->posted_intr_desc_addr & (unsigned long)(vmcs12->posted_intr_desc_addr &
...@@ -9784,8 +9779,8 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu, ...@@ -9784,8 +9779,8 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu,
if (!nested_cpu_has_virt_x2apic_mode(vmcs12)) if (!nested_cpu_has_virt_x2apic_mode(vmcs12))
return false; return false;
page = nested_get_page(vcpu, vmcs12->msr_bitmap); page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->msr_bitmap);
if (!page) if (is_error_page(page))
return false; return false;
msr_bitmap_l1 = (unsigned long *)kmap(page); msr_bitmap_l1 = (unsigned long *)kmap(page);
...@@ -11392,8 +11387,8 @@ static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu) ...@@ -11392,8 +11387,8 @@ static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu)
gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS) & ~0xFFFull; gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS) & ~0xFFFull;
page = nested_get_page(vcpu, vmcs12->pml_address); page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->pml_address);
if (!page) if (is_error_page(page))
return 0; return 0;
pml_address = kmap(page); pml_address = kmap(page);
......
...@@ -985,6 +985,12 @@ static inline hpa_t pfn_to_hpa(kvm_pfn_t pfn) ...@@ -985,6 +985,12 @@ static inline hpa_t pfn_to_hpa(kvm_pfn_t pfn)
return (hpa_t)pfn << PAGE_SHIFT; return (hpa_t)pfn << PAGE_SHIFT;
} }
static inline struct page *kvm_vcpu_gpa_to_page(struct kvm_vcpu *vcpu,
gpa_t gpa)
{
return kvm_vcpu_gfn_to_page(vcpu, gpa_to_gfn(gpa));
}
static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa) static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa)
{ {
unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa)); unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册