提交 ab9ae313 编写于 作者: A Avi Kivity

KVM: Push struct x86_exception info the various gva_to_gpa variants

Signed-off-by: NAvi Kivity <avi@redhat.com>
Signed-off-by: NMarcelo Tosatti <mtosatti@redhat.com>
上级 35d3d4a1
...@@ -245,7 +245,7 @@ struct kvm_mmu { ...@@ -245,7 +245,7 @@ struct kvm_mmu {
void (*inject_page_fault)(struct kvm_vcpu *vcpu); void (*inject_page_fault)(struct kvm_vcpu *vcpu);
void (*free)(struct kvm_vcpu *vcpu); void (*free)(struct kvm_vcpu *vcpu);
gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access, gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access,
u32 *error); struct x86_exception *exception);
gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access); gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access);
void (*prefetch_page)(struct kvm_vcpu *vcpu, void (*prefetch_page)(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *page); struct kvm_mmu_page *page);
...@@ -708,10 +708,14 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); ...@@ -708,10 +708,14 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
int kvm_mmu_load(struct kvm_vcpu *vcpu); int kvm_mmu_load(struct kvm_vcpu *vcpu);
void kvm_mmu_unload(struct kvm_vcpu *vcpu); void kvm_mmu_unload(struct kvm_vcpu *vcpu);
void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu); void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error); gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error); struct x86_exception *exception);
gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error); gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error); struct x86_exception *exception);
gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
struct x86_exception *exception);
gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
struct x86_exception *exception);
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
......
...@@ -2567,18 +2567,19 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu) ...@@ -2567,18 +2567,19 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
} }
static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr, static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr,
u32 access, u32 *error) u32 access, struct x86_exception *exception)
{ {
if (error) if (exception)
*error = 0; exception->error_code = 0;
return vaddr; return vaddr;
} }
static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gva_t vaddr, static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gva_t vaddr,
u32 access, u32 *error) u32 access,
struct x86_exception *exception)
{ {
if (error) if (exception)
*error = 0; exception->error_code = 0;
return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access); return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access);
} }
......
...@@ -677,7 +677,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) ...@@ -677,7 +677,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
} }
static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access, static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
u32 *error) struct x86_exception *exception)
{ {
struct guest_walker walker; struct guest_walker walker;
gpa_t gpa = UNMAPPED_GVA; gpa_t gpa = UNMAPPED_GVA;
...@@ -688,14 +688,18 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access, ...@@ -688,14 +688,18 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
if (r) { if (r) {
gpa = gfn_to_gpa(walker.gfn); gpa = gfn_to_gpa(walker.gfn);
gpa |= vaddr & ~PAGE_MASK; gpa |= vaddr & ~PAGE_MASK;
} else if (error) } else if (exception) {
*error = walker.error_code; exception->vector = PF_VECTOR;
exception->error_code_valid = true;
exception->error_code = walker.error_code;
}
return gpa; return gpa;
} }
static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr, static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr,
u32 access, u32 *error) u32 access,
struct x86_exception *exception)
{ {
struct guest_walker walker; struct guest_walker walker;
gpa_t gpa = UNMAPPED_GVA; gpa_t gpa = UNMAPPED_GVA;
...@@ -706,8 +710,11 @@ static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr, ...@@ -706,8 +710,11 @@ static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr,
if (r) { if (r) {
gpa = gfn_to_gpa(walker.gfn); gpa = gfn_to_gpa(walker.gfn);
gpa |= vaddr & ~PAGE_MASK; gpa |= vaddr & ~PAGE_MASK;
} else if (error) } else if (exception) {
*error = walker.error_code; exception->vector = PF_VECTOR;
exception->error_code_valid = true;
exception->error_code = walker.error_code;
}
return gpa; return gpa;
} }
......
...@@ -3603,51 +3603,47 @@ static gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access) ...@@ -3603,51 +3603,47 @@ static gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
static gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access) static gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
{ {
gpa_t t_gpa; gpa_t t_gpa;
u32 error; struct x86_exception exception;
BUG_ON(!mmu_is_nested(vcpu)); BUG_ON(!mmu_is_nested(vcpu));
/* NPT walks are always user-walks */ /* NPT walks are always user-walks */
access |= PFERR_USER_MASK; access |= PFERR_USER_MASK;
t_gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, &error); t_gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, &exception);
if (t_gpa == UNMAPPED_GVA) if (t_gpa == UNMAPPED_GVA)
vcpu->arch.fault.nested = true; vcpu->arch.fault.nested = true;
return t_gpa; return t_gpa;
} }
gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error) gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
struct x86_exception *exception)
{ {
u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, error); return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
} }
gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error) gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
struct x86_exception *exception)
{ {
u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
access |= PFERR_FETCH_MASK; access |= PFERR_FETCH_MASK;
return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, error); return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
} }
gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error) gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
struct x86_exception *exception)
{ {
u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
access |= PFERR_WRITE_MASK; access |= PFERR_WRITE_MASK;
return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, error); return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
} }
/* uses this to access any guest's mapped memory without checking CPL */ /* uses this to access any guest's mapped memory without checking CPL */
gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error) gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
{ struct x86_exception *exception)
return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, error);
}
static int make_page_fault(struct x86_exception *exception, u32 error)
{ {
exception->vector = PF_VECTOR; return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception);
exception->error_code_valid = true;
exception->error_code = error;
return X86EMUL_PROPAGATE_FAULT;
} }
static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes, static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
...@@ -3656,17 +3652,16 @@ static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes, ...@@ -3656,17 +3652,16 @@ static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
{ {
void *data = val; void *data = val;
int r = X86EMUL_CONTINUE; int r = X86EMUL_CONTINUE;
u32 error;
while (bytes) { while (bytes) {
gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access, gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access,
&error); exception);
unsigned offset = addr & (PAGE_SIZE-1); unsigned offset = addr & (PAGE_SIZE-1);
unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset); unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
int ret; int ret;
if (gpa == UNMAPPED_GVA) if (gpa == UNMAPPED_GVA)
return make_page_fault(exception, error); return X86EMUL_PROPAGATE_FAULT;
ret = kvm_read_guest(vcpu->kvm, gpa, data, toread); ret = kvm_read_guest(vcpu->kvm, gpa, data, toread);
if (ret < 0) { if (ret < 0) {
r = X86EMUL_IO_NEEDED; r = X86EMUL_IO_NEEDED;
...@@ -3715,18 +3710,17 @@ static int kvm_write_guest_virt_system(gva_t addr, void *val, ...@@ -3715,18 +3710,17 @@ static int kvm_write_guest_virt_system(gva_t addr, void *val,
{ {
void *data = val; void *data = val;
int r = X86EMUL_CONTINUE; int r = X86EMUL_CONTINUE;
u32 error;
while (bytes) { while (bytes) {
gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr,
PFERR_WRITE_MASK, PFERR_WRITE_MASK,
&error); exception);
unsigned offset = addr & (PAGE_SIZE-1); unsigned offset = addr & (PAGE_SIZE-1);
unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset); unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
int ret; int ret;
if (gpa == UNMAPPED_GVA) if (gpa == UNMAPPED_GVA)
return make_page_fault(exception, error); return X86EMUL_PROPAGATE_FAULT;
ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite); ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite);
if (ret < 0) { if (ret < 0) {
r = X86EMUL_IO_NEEDED; r = X86EMUL_IO_NEEDED;
...@@ -3748,7 +3742,6 @@ static int emulator_read_emulated(unsigned long addr, ...@@ -3748,7 +3742,6 @@ static int emulator_read_emulated(unsigned long addr,
struct kvm_vcpu *vcpu) struct kvm_vcpu *vcpu)
{ {
gpa_t gpa; gpa_t gpa;
u32 error_code;
if (vcpu->mmio_read_completed) { if (vcpu->mmio_read_completed) {
memcpy(val, vcpu->mmio_data, bytes); memcpy(val, vcpu->mmio_data, bytes);
...@@ -3758,10 +3751,10 @@ static int emulator_read_emulated(unsigned long addr, ...@@ -3758,10 +3751,10 @@ static int emulator_read_emulated(unsigned long addr,
return X86EMUL_CONTINUE; return X86EMUL_CONTINUE;
} }
gpa = kvm_mmu_gva_to_gpa_read(vcpu, addr, &error_code); gpa = kvm_mmu_gva_to_gpa_read(vcpu, addr, exception);
if (gpa == UNMAPPED_GVA) if (gpa == UNMAPPED_GVA)
return make_page_fault(exception, error_code); return X86EMUL_PROPAGATE_FAULT;
/* For APIC access vmexit */ /* For APIC access vmexit */
if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
...@@ -3810,12 +3803,11 @@ static int emulator_write_emulated_onepage(unsigned long addr, ...@@ -3810,12 +3803,11 @@ static int emulator_write_emulated_onepage(unsigned long addr,
struct kvm_vcpu *vcpu) struct kvm_vcpu *vcpu)
{ {
gpa_t gpa; gpa_t gpa;
u32 error_code;
gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, &error_code); gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, exception);
if (gpa == UNMAPPED_GVA) if (gpa == UNMAPPED_GVA)
return make_page_fault(exception, error_code); return X86EMUL_PROPAGATE_FAULT;
/* For APIC access vmexit */ /* For APIC access vmexit */
if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册