提交 14dfe855 编写于 作者: J Joerg Roedel 提交者: Avi Kivity

KVM: X86: Introduce pointer to mmu context used for gva_to_gpa

This patch introduces the walk_mmu pointer which points to
the mmu-context currently used for gva_to_gpa translations.
Signed-off-by: NJoerg Roedel <joerg.roedel@amd.com>
Signed-off-by: NAvi Kivity <avi@redhat.com>
上级 c30a358d
...@@ -286,8 +286,21 @@ struct kvm_vcpu_arch { ...@@ -286,8 +286,21 @@ struct kvm_vcpu_arch {
u64 ia32_misc_enable_msr; u64 ia32_misc_enable_msr;
bool tpr_access_reporting; bool tpr_access_reporting;
/*
* Paging state of the vcpu
*
* If the vcpu runs in guest mode with two level paging this still saves
* the paging mode of the l1 guest. This context is always used to
* handle faults.
*/
struct kvm_mmu mmu; struct kvm_mmu mmu;
/*
* Pointer to the mmu context currently used for
* gva_to_gpa translations.
*/
struct kvm_mmu *walk_mmu;
/* /*
* This struct is filled with the necessary information to propagate a * This struct is filled with the necessary information to propagate a
* page fault into the guest * page fault into the guest
......
...@@ -2708,7 +2708,7 @@ static int paging32E_init_context(struct kvm_vcpu *vcpu, ...@@ -2708,7 +2708,7 @@ static int paging32E_init_context(struct kvm_vcpu *vcpu,
static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
{ {
struct kvm_mmu *context = &vcpu->arch.mmu; struct kvm_mmu *context = vcpu->arch.walk_mmu;
context->new_cr3 = nonpaging_new_cr3; context->new_cr3 = nonpaging_new_cr3;
context->page_fault = tdp_page_fault; context->page_fault = tdp_page_fault;
...@@ -2767,11 +2767,11 @@ EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu); ...@@ -2767,11 +2767,11 @@ EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);
static int init_kvm_softmmu(struct kvm_vcpu *vcpu) static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
{ {
int r = kvm_init_shadow_mmu(vcpu, &vcpu->arch.mmu); int r = kvm_init_shadow_mmu(vcpu, vcpu->arch.walk_mmu);
vcpu->arch.mmu.set_cr3 = kvm_x86_ops->set_cr3; vcpu->arch.walk_mmu->set_cr3 = kvm_x86_ops->set_cr3;
vcpu->arch.mmu.get_cr3 = get_cr3; vcpu->arch.walk_mmu->get_cr3 = get_cr3;
vcpu->arch.mmu.inject_page_fault = kvm_inject_page_fault; vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
return r; return r;
} }
......
...@@ -3456,27 +3456,27 @@ static gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access) ...@@ -3456,27 +3456,27 @@ static gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error) gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
{ {
u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error); return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, error);
} }
gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error) gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
{ {
u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
access |= PFERR_FETCH_MASK; access |= PFERR_FETCH_MASK;
return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error); return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, error);
} }
gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error) gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
{ {
u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
access |= PFERR_WRITE_MASK; access |= PFERR_WRITE_MASK;
return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error); return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, error);
} }
/* uses this to access any guest's mapped memory without checking CPL */ /* uses this to access any guest's mapped memory without checking CPL */
gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error) gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
{ {
return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, 0, error); return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, error);
} }
static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes, static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
...@@ -3487,7 +3487,8 @@ static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes, ...@@ -3487,7 +3487,8 @@ static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
int r = X86EMUL_CONTINUE; int r = X86EMUL_CONTINUE;
while (bytes) { while (bytes) {
gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr, access, error); gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access,
error);
unsigned offset = addr & (PAGE_SIZE-1); unsigned offset = addr & (PAGE_SIZE-1);
unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset); unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
int ret; int ret;
...@@ -3542,8 +3543,9 @@ static int kvm_write_guest_virt_system(gva_t addr, void *val, ...@@ -3542,8 +3543,9 @@ static int kvm_write_guest_virt_system(gva_t addr, void *val,
int r = X86EMUL_CONTINUE; int r = X86EMUL_CONTINUE;
while (bytes) { while (bytes) {
gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr, gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr,
PFERR_WRITE_MASK, error); PFERR_WRITE_MASK,
error);
unsigned offset = addr & (PAGE_SIZE-1); unsigned offset = addr & (PAGE_SIZE-1);
unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset); unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
int ret; int ret;
...@@ -5663,6 +5665,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) ...@@ -5663,6 +5665,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
kvm = vcpu->kvm; kvm = vcpu->kvm;
vcpu->arch.emulate_ctxt.ops = &emulate_ops; vcpu->arch.emulate_ctxt.ops = &emulate_ops;
vcpu->arch.walk_mmu = &vcpu->arch.mmu;
vcpu->arch.mmu.root_hpa = INVALID_PAGE; vcpu->arch.mmu.root_hpa = INVALID_PAGE;
vcpu->arch.mmu.translate_gpa = translate_gpa; vcpu->arch.mmu.translate_gpa = translate_gpa;
if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu)) if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu))
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册