提交 49a9b07e 编写于 作者: A Avi Kivity

KVM: Fix mov cr0 #GP at wrong instruction

On Intel, we call skip_emulated_instruction() even if we injected a #GP,
resulting in the #GP pointing at the wrong address.

Fix by injecting the exception and skipping the instruction at the same place,
so we can do just one or the other.
Signed-off-by: NAvi Kivity <avi@redhat.com>
Signed-off-by: NMarcelo Tosatti <mtosatti@redhat.com>
上级 2acf923e
......@@ -597,7 +597,7 @@ int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg);
int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason,
bool has_error_code, u32 error_code);
void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
......
......@@ -807,7 +807,7 @@ static void init_vmcb(struct vcpu_svm *svm)
* svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0.
*/
svm->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
kvm_set_cr0(&svm->vcpu, svm->vcpu.arch.cr0);
(void)kvm_set_cr0(&svm->vcpu, svm->vcpu.arch.cr0);
save->cr4 = X86_CR4_PAE;
/* rdx = ?? */
......
......@@ -3157,11 +3157,20 @@ vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
hypercall[2] = 0xc1;
}
static void complete_insn_gp(struct kvm_vcpu *vcpu, int err)
{
if (err)
kvm_inject_gp(vcpu, 0);
else
skip_emulated_instruction(vcpu);
}
static int handle_cr(struct kvm_vcpu *vcpu)
{
unsigned long exit_qualification, val;
int cr;
int reg;
int err;
exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
cr = exit_qualification & 15;
......@@ -3172,8 +3181,8 @@ static int handle_cr(struct kvm_vcpu *vcpu)
trace_kvm_cr_write(cr, val);
switch (cr) {
case 0:
kvm_set_cr0(vcpu, val);
skip_emulated_instruction(vcpu);
err = kvm_set_cr0(vcpu, val);
complete_insn_gp(vcpu, err);
return 1;
case 3:
kvm_set_cr3(vcpu, val);
......
......@@ -425,7 +425,7 @@ static bool pdptrs_changed(struct kvm_vcpu *vcpu)
return changed;
}
static int __kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{
unsigned long old_cr0 = kvm_read_cr0(vcpu);
unsigned long update_bits = X86_CR0_PG | X86_CR0_WP |
......@@ -468,17 +468,11 @@ static int __kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
kvm_mmu_reset_context(vcpu);
return 0;
}
void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{
if (__kvm_set_cr0(vcpu, cr0))
kvm_inject_gp(vcpu, 0);
}
EXPORT_SYMBOL_GPL(kvm_set_cr0);
void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
{
kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f));
(void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f));
}
EXPORT_SYMBOL_GPL(kvm_lmsw);
......@@ -3732,7 +3726,7 @@ static int emulator_set_cr(int cr, unsigned long val, struct kvm_vcpu *vcpu)
switch (cr) {
case 0:
res = __kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val));
res = kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val));
break;
case 2:
vcpu->arch.cr2 = val;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册