提交 f6528b03 编写于 作者: A Avi Kivity

KVM: Remove set_cr0_no_modeswitch() arch op

set_cr0_no_modeswitch() was a hack to avoid corrupting segment registers.
As we now cache the protected mode values on entry to real mode, this
isn't an issue anymore, and it interferes with reboot (which usually _is_
a modeswitch).
Signed-off-by: NAvi Kivity <avi@qumranet.com>
上级 8cb5b033
...@@ -383,8 +383,6 @@ struct kvm_arch_ops { ...@@ -383,8 +383,6 @@ struct kvm_arch_ops {
void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l); void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
void (*decache_cr0_cr4_guest_bits)(struct kvm_vcpu *vcpu); void (*decache_cr0_cr4_guest_bits)(struct kvm_vcpu *vcpu);
void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0); void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
void (*set_cr0_no_modeswitch)(struct kvm_vcpu *vcpu,
unsigned long cr0);
void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4); void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer); void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
......
...@@ -1936,7 +1936,7 @@ static int kvm_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, ...@@ -1936,7 +1936,7 @@ static int kvm_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
kvm_arch_ops->decache_cr0_cr4_guest_bits(vcpu); kvm_arch_ops->decache_cr0_cr4_guest_bits(vcpu);
mmu_reset_needed |= vcpu->cr0 != sregs->cr0; mmu_reset_needed |= vcpu->cr0 != sregs->cr0;
kvm_arch_ops->set_cr0_no_modeswitch(vcpu, sregs->cr0); kvm_arch_ops->set_cr0(vcpu, sregs->cr0);
mmu_reset_needed |= vcpu->cr4 != sregs->cr4; mmu_reset_needed |= vcpu->cr4 != sregs->cr4;
kvm_arch_ops->set_cr4(vcpu, sregs->cr4); kvm_arch_ops->set_cr4(vcpu, sregs->cr4);
......
...@@ -1716,7 +1716,6 @@ static struct kvm_arch_ops svm_arch_ops = { ...@@ -1716,7 +1716,6 @@ static struct kvm_arch_ops svm_arch_ops = {
.get_cs_db_l_bits = svm_get_cs_db_l_bits, .get_cs_db_l_bits = svm_get_cs_db_l_bits,
.decache_cr0_cr4_guest_bits = svm_decache_cr0_cr4_guest_bits, .decache_cr0_cr4_guest_bits = svm_decache_cr0_cr4_guest_bits,
.set_cr0 = svm_set_cr0, .set_cr0 = svm_set_cr0,
.set_cr0_no_modeswitch = svm_set_cr0,
.set_cr3 = svm_set_cr3, .set_cr3 = svm_set_cr3,
.set_cr4 = svm_set_cr4, .set_cr4 = svm_set_cr4,
.set_efer = svm_set_efer, .set_efer = svm_set_efer,
......
...@@ -788,22 +788,6 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) ...@@ -788,22 +788,6 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
vcpu->cr0 = cr0; vcpu->cr0 = cr0;
} }
/*
* Used when restoring the VM to avoid corrupting segment registers
*/
static void vmx_set_cr0_no_modeswitch(struct kvm_vcpu *vcpu, unsigned long cr0)
{
if (!vcpu->rmode.active && !(cr0 & CR0_PE_MASK))
enter_rmode(vcpu);
vcpu->rmode.active = ((cr0 & CR0_PE_MASK) == 0);
update_exception_bitmap(vcpu);
vmcs_writel(CR0_READ_SHADOW, cr0);
vmcs_writel(GUEST_CR0,
(cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON);
vcpu->cr0 = cr0;
}
static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
{ {
vmcs_writel(GUEST_CR3, cr3); vmcs_writel(GUEST_CR3, cr3);
...@@ -2069,7 +2053,6 @@ static struct kvm_arch_ops vmx_arch_ops = { ...@@ -2069,7 +2053,6 @@ static struct kvm_arch_ops vmx_arch_ops = {
.get_cs_db_l_bits = vmx_get_cs_db_l_bits, .get_cs_db_l_bits = vmx_get_cs_db_l_bits,
.decache_cr0_cr4_guest_bits = vmx_decache_cr0_cr4_guest_bits, .decache_cr0_cr4_guest_bits = vmx_decache_cr0_cr4_guest_bits,
.set_cr0 = vmx_set_cr0, .set_cr0 = vmx_set_cr0,
.set_cr0_no_modeswitch = vmx_set_cr0_no_modeswitch,
.set_cr3 = vmx_set_cr3, .set_cr3 = vmx_set_cr3,
.set_cr4 = vmx_set_cr4, .set_cr4 = vmx_set_cr4,
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册