提交 eea1cff9 编写于 作者: A Andre Przywara 提交者: Avi Kivity

KVM: x86: fix CR8 handling

The handling of CR8 writes in KVM is currently somewhat cumbersome.
This patch makes it look like the other CR register handlers
and fixes a possible issue in VMX, where the RIP would be incremented
despite an injected #GP.
Signed-off-by: NAndre Przywara <andre.przywara@amd.com>
Signed-off-by: NMarcelo Tosatti <mtosatti@redhat.com>
上级 a63512a4
...@@ -661,7 +661,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason, ...@@ -661,7 +661,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason,
int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3); int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8); int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val); int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val);
int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val); int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val);
unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu); unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu);
......
...@@ -2676,16 +2676,17 @@ static int cr0_write_interception(struct vcpu_svm *svm) ...@@ -2676,16 +2676,17 @@ static int cr0_write_interception(struct vcpu_svm *svm)
static int cr8_write_interception(struct vcpu_svm *svm) static int cr8_write_interception(struct vcpu_svm *svm)
{ {
struct kvm_run *kvm_run = svm->vcpu.run; struct kvm_run *kvm_run = svm->vcpu.run;
int r;
u8 cr8_prev = kvm_get_cr8(&svm->vcpu); u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
/* instruction emulation calls kvm_set_cr8() */ /* instruction emulation calls kvm_set_cr8() */
emulate_instruction(&svm->vcpu, 0, 0, 0); r = emulate_instruction(&svm->vcpu, 0, 0, 0);
if (irqchip_in_kernel(svm->vcpu.kvm)) { if (irqchip_in_kernel(svm->vcpu.kvm)) {
clr_cr_intercept(svm, INTERCEPT_CR8_WRITE); clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
return 1; return r == EMULATE_DONE;
} }
if (cr8_prev <= kvm_get_cr8(&svm->vcpu)) if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
return 1; return r == EMULATE_DONE;
kvm_run->exit_reason = KVM_EXIT_SET_TPR; kvm_run->exit_reason = KVM_EXIT_SET_TPR;
return 0; return 0;
} }
......
...@@ -3185,8 +3185,8 @@ static int handle_cr(struct kvm_vcpu *vcpu) ...@@ -3185,8 +3185,8 @@ static int handle_cr(struct kvm_vcpu *vcpu)
case 8: { case 8: {
u8 cr8_prev = kvm_get_cr8(vcpu); u8 cr8_prev = kvm_get_cr8(vcpu);
u8 cr8 = kvm_register_read(vcpu, reg); u8 cr8 = kvm_register_read(vcpu, reg);
kvm_set_cr8(vcpu, cr8); err = kvm_set_cr8(vcpu, cr8);
skip_emulated_instruction(vcpu); complete_insn_gp(vcpu, err);
if (irqchip_in_kernel(vcpu->kvm)) if (irqchip_in_kernel(vcpu->kvm))
return 1; return 1;
if (cr8_prev <= cr8) if (cr8_prev <= cr8)
......
...@@ -662,7 +662,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) ...@@ -662,7 +662,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
} }
EXPORT_SYMBOL_GPL(kvm_set_cr3); EXPORT_SYMBOL_GPL(kvm_set_cr3);
int __kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
{ {
if (cr8 & CR8_RESERVED_BITS) if (cr8 & CR8_RESERVED_BITS)
return 1; return 1;
...@@ -672,12 +672,6 @@ int __kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) ...@@ -672,12 +672,6 @@ int __kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
vcpu->arch.cr8 = cr8; vcpu->arch.cr8 = cr8;
return 0; return 0;
} }
void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
{
if (__kvm_set_cr8(vcpu, cr8))
kvm_inject_gp(vcpu, 0);
}
EXPORT_SYMBOL_GPL(kvm_set_cr8); EXPORT_SYMBOL_GPL(kvm_set_cr8);
unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu) unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
...@@ -4104,7 +4098,7 @@ static int emulator_set_cr(int cr, unsigned long val, struct kvm_vcpu *vcpu) ...@@ -4104,7 +4098,7 @@ static int emulator_set_cr(int cr, unsigned long val, struct kvm_vcpu *vcpu)
res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val)); res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
break; break;
case 8: case 8:
res = __kvm_set_cr8(vcpu, val & 0xfUL); res = kvm_set_cr8(vcpu, val);
break; break;
default: default:
vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr); vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
...@@ -5381,8 +5375,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -5381,8 +5375,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
} }
/* re-sync apic's tpr */ /* re-sync apic's tpr */
if (!irqchip_in_kernel(vcpu->kvm)) if (!irqchip_in_kernel(vcpu->kvm)) {
kvm_set_cr8(vcpu, kvm_run->cr8); if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) {
r = -EINVAL;
goto out;
}
}
if (vcpu->arch.pio.count || vcpu->mmio_needed) { if (vcpu->arch.pio.count || vcpu->mmio_needed) {
if (vcpu->mmio_needed) { if (vcpu->mmio_needed) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册