提交 4d4ec087 编写于 作者: A Avi Kivity 提交者: Marcelo Tosatti

KVM: Replace read accesses of vcpu->arch.cr0 by an accessor

Since we'd like to allow the guest to own a few bits of cr0 at times, we need
to know when we access those bits.
Signed-off-by: NAvi Kivity <avi@redhat.com>
上级 a1f83a74
...@@ -1515,7 +1515,7 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt) ...@@ -1515,7 +1515,7 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt)
/* syscall is not available in real mode */ /* syscall is not available in real mode */
if (c->lock_prefix || ctxt->mode == X86EMUL_MODE_REAL if (c->lock_prefix || ctxt->mode == X86EMUL_MODE_REAL
|| !(ctxt->vcpu->arch.cr0 & X86_CR0_PE)) || !kvm_read_cr0_bits(ctxt->vcpu, X86_CR0_PE))
return -1; return -1;
setup_syscalls_segments(ctxt, &cs, &ss); setup_syscalls_segments(ctxt, &cs, &ss);
...@@ -1569,7 +1569,7 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt) ...@@ -1569,7 +1569,7 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt)
/* inject #GP if in real mode or paging is disabled */ /* inject #GP if in real mode or paging is disabled */
if (ctxt->mode == X86EMUL_MODE_REAL || if (ctxt->mode == X86EMUL_MODE_REAL ||
!(ctxt->vcpu->arch.cr0 & X86_CR0_PE)) { !kvm_read_cr0_bits(ctxt->vcpu, X86_CR0_PE)) {
kvm_inject_gp(ctxt->vcpu, 0); kvm_inject_gp(ctxt->vcpu, 0);
return -1; return -1;
} }
...@@ -1635,7 +1635,7 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt) ...@@ -1635,7 +1635,7 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt)
/* inject #GP if in real mode or paging is disabled */ /* inject #GP if in real mode or paging is disabled */
if (ctxt->mode == X86EMUL_MODE_REAL if (ctxt->mode == X86EMUL_MODE_REAL
|| !(ctxt->vcpu->arch.cr0 & X86_CR0_PE)) { || !kvm_read_cr0_bits(ctxt->vcpu, X86_CR0_PE)) {
kvm_inject_gp(ctxt->vcpu, 0); kvm_inject_gp(ctxt->vcpu, 0);
return -1; return -1;
} }
......
...@@ -38,6 +38,16 @@ static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index) ...@@ -38,6 +38,16 @@ static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
return vcpu->arch.pdptrs[index]; return vcpu->arch.pdptrs[index];
} }
static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
{
return vcpu->arch.cr0 & mask;
}
static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu)
{
return kvm_read_cr0_bits(vcpu, ~0UL);
}
static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask) static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
{ {
if (mask & vcpu->arch.cr4_guest_owned_bits) if (mask & vcpu->arch.cr4_guest_owned_bits)
......
...@@ -226,7 +226,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes); ...@@ -226,7 +226,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
static int is_write_protection(struct kvm_vcpu *vcpu) static int is_write_protection(struct kvm_vcpu *vcpu)
{ {
return vcpu->arch.cr0 & X86_CR0_WP; return kvm_read_cr0_bits(vcpu, X86_CR0_WP);
} }
static int is_cpuid_PSE36(void) static int is_cpuid_PSE36(void)
......
...@@ -79,7 +79,7 @@ static inline int is_pse(struct kvm_vcpu *vcpu) ...@@ -79,7 +79,7 @@ static inline int is_pse(struct kvm_vcpu *vcpu)
static inline int is_paging(struct kvm_vcpu *vcpu) static inline int is_paging(struct kvm_vcpu *vcpu)
{ {
return vcpu->arch.cr0 & X86_CR0_PG; return kvm_read_cr0_bits(vcpu, X86_CR0_PG);
} }
static inline int is_present_gpte(unsigned long pte) static inline int is_present_gpte(unsigned long pte)
......
...@@ -980,7 +980,7 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) ...@@ -980,7 +980,7 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
if (npt_enabled) if (npt_enabled)
goto set; goto set;
if ((vcpu->arch.cr0 & X86_CR0_TS) && !(cr0 & X86_CR0_TS)) { if (kvm_read_cr0_bits(vcpu, X86_CR0_TS) && !(cr0 & X86_CR0_TS)) {
svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
vcpu->fpu_active = 1; vcpu->fpu_active = 1;
} }
...@@ -1244,7 +1244,7 @@ static int ud_interception(struct vcpu_svm *svm) ...@@ -1244,7 +1244,7 @@ static int ud_interception(struct vcpu_svm *svm)
static int nm_interception(struct vcpu_svm *svm) static int nm_interception(struct vcpu_svm *svm)
{ {
svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
if (!(svm->vcpu.arch.cr0 & X86_CR0_TS)) if (!kvm_read_cr0_bits(&svm->vcpu, X86_CR0_TS))
svm->vmcb->save.cr0 &= ~X86_CR0_TS; svm->vmcb->save.cr0 &= ~X86_CR0_TS;
svm->vcpu.fpu_active = 1; svm->vcpu.fpu_active = 1;
...@@ -1743,7 +1743,7 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm) ...@@ -1743,7 +1743,7 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
hsave->save.gdtr = vmcb->save.gdtr; hsave->save.gdtr = vmcb->save.gdtr;
hsave->save.idtr = vmcb->save.idtr; hsave->save.idtr = vmcb->save.idtr;
hsave->save.efer = svm->vcpu.arch.shadow_efer; hsave->save.efer = svm->vcpu.arch.shadow_efer;
hsave->save.cr0 = svm->vcpu.arch.cr0; hsave->save.cr0 = kvm_read_cr0(&svm->vcpu);
hsave->save.cr4 = svm->vcpu.arch.cr4; hsave->save.cr4 = svm->vcpu.arch.cr4;
hsave->save.rflags = vmcb->save.rflags; hsave->save.rflags = vmcb->save.rflags;
hsave->save.rip = svm->next_rip; hsave->save.rip = svm->next_rip;
...@@ -2387,7 +2387,8 @@ static int handle_exit(struct kvm_vcpu *vcpu) ...@@ -2387,7 +2387,8 @@ static int handle_exit(struct kvm_vcpu *vcpu)
if (npt_enabled) { if (npt_enabled) {
int mmu_reload = 0; int mmu_reload = 0;
if ((vcpu->arch.cr0 ^ svm->vmcb->save.cr0) & X86_CR0_PG) { if ((kvm_read_cr0_bits(vcpu, X86_CR0_PG) ^ svm->vmcb->save.cr0)
& X86_CR0_PG) {
svm_set_cr0(vcpu, svm->vmcb->save.cr0); svm_set_cr0(vcpu, svm->vmcb->save.cr0);
mmu_reload = 1; mmu_reload = 1;
} }
......
...@@ -799,7 +799,7 @@ static void vmx_fpu_activate(struct kvm_vcpu *vcpu) ...@@ -799,7 +799,7 @@ static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
return; return;
vcpu->fpu_active = 1; vcpu->fpu_active = 1;
vmcs_clear_bits(GUEST_CR0, X86_CR0_TS); vmcs_clear_bits(GUEST_CR0, X86_CR0_TS);
if (vcpu->arch.cr0 & X86_CR0_TS) if (kvm_read_cr0_bits(vcpu, X86_CR0_TS))
vmcs_set_bits(GUEST_CR0, X86_CR0_TS); vmcs_set_bits(GUEST_CR0, X86_CR0_TS);
update_exception_bitmap(vcpu); update_exception_bitmap(vcpu);
} }
...@@ -1785,7 +1785,7 @@ static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) ...@@ -1785,7 +1785,7 @@ static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
vmx_flush_tlb(vcpu); vmx_flush_tlb(vcpu);
vmcs_writel(GUEST_CR3, guest_cr3); vmcs_writel(GUEST_CR3, guest_cr3);
if (vcpu->arch.cr0 & X86_CR0_PE) if (kvm_read_cr0_bits(vcpu, X86_CR0_PE))
vmx_fpu_deactivate(vcpu); vmx_fpu_deactivate(vcpu);
} }
...@@ -1840,7 +1840,7 @@ static void vmx_get_segment(struct kvm_vcpu *vcpu, ...@@ -1840,7 +1840,7 @@ static void vmx_get_segment(struct kvm_vcpu *vcpu,
static int vmx_get_cpl(struct kvm_vcpu *vcpu) static int vmx_get_cpl(struct kvm_vcpu *vcpu)
{ {
if (!(vcpu->arch.cr0 & X86_CR0_PE)) /* if real mode */ if (!kvm_read_cr0_bits(vcpu, X86_CR0_PE)) /* if real mode */
return 0; return 0;
if (vmx_get_rflags(vcpu) & X86_EFLAGS_VM) /* if virtual 8086 */ if (vmx_get_rflags(vcpu) & X86_EFLAGS_VM) /* if virtual 8086 */
...@@ -2095,7 +2095,7 @@ static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu) ...@@ -2095,7 +2095,7 @@ static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu)
static bool guest_state_valid(struct kvm_vcpu *vcpu) static bool guest_state_valid(struct kvm_vcpu *vcpu)
{ {
/* real mode guest state checks */ /* real mode guest state checks */
if (!(vcpu->arch.cr0 & X86_CR0_PE)) { if (!kvm_read_cr0_bits(vcpu, X86_CR0_PE)) {
if (!rmode_segment_valid(vcpu, VCPU_SREG_CS)) if (!rmode_segment_valid(vcpu, VCPU_SREG_CS))
return false; return false;
if (!rmode_segment_valid(vcpu, VCPU_SREG_SS)) if (!rmode_segment_valid(vcpu, VCPU_SREG_SS))
...@@ -2580,7 +2580,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu) ...@@ -2580,7 +2580,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
vmx->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET; vmx->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
vmx_set_cr0(&vmx->vcpu, vmx->vcpu.arch.cr0); /* enter rmode */ vmx_set_cr0(&vmx->vcpu, kvm_read_cr0(vcpu)); /* enter rmode */
vmx_set_cr4(&vmx->vcpu, 0); vmx_set_cr4(&vmx->vcpu, 0);
vmx_set_efer(&vmx->vcpu, 0); vmx_set_efer(&vmx->vcpu, 0);
vmx_fpu_activate(&vmx->vcpu); vmx_fpu_activate(&vmx->vcpu);
...@@ -2996,8 +2996,8 @@ static int handle_cr(struct kvm_vcpu *vcpu) ...@@ -2996,8 +2996,8 @@ static int handle_cr(struct kvm_vcpu *vcpu)
case 2: /* clts */ case 2: /* clts */
vmx_fpu_deactivate(vcpu); vmx_fpu_deactivate(vcpu);
vcpu->arch.cr0 &= ~X86_CR0_TS; vcpu->arch.cr0 &= ~X86_CR0_TS;
vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0); vmcs_writel(CR0_READ_SHADOW, kvm_read_cr0(vcpu));
trace_kvm_cr_write(0, vcpu->arch.cr0); trace_kvm_cr_write(0, kvm_read_cr0(vcpu));
vmx_fpu_activate(vcpu); vmx_fpu_activate(vcpu);
skip_emulated_instruction(vcpu); skip_emulated_instruction(vcpu);
return 1; return 1;
...@@ -3018,7 +3018,7 @@ static int handle_cr(struct kvm_vcpu *vcpu) ...@@ -3018,7 +3018,7 @@ static int handle_cr(struct kvm_vcpu *vcpu)
break; break;
case 3: /* lmsw */ case 3: /* lmsw */
val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f; val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
trace_kvm_cr_write(0, (vcpu->arch.cr0 & ~0xful) | val); trace_kvm_cr_write(0, (kvm_read_cr0(vcpu) & ~0xful) | val);
kvm_lmsw(vcpu, val); kvm_lmsw(vcpu, val);
skip_emulated_instruction(vcpu); skip_emulated_instruction(vcpu);
......
...@@ -430,7 +430,7 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) ...@@ -430,7 +430,7 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{ {
if (cr0 & CR0_RESERVED_BITS) { if (cr0 & CR0_RESERVED_BITS) {
printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n", printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
cr0, vcpu->arch.cr0); cr0, kvm_read_cr0(vcpu));
kvm_inject_gp(vcpu, 0); kvm_inject_gp(vcpu, 0);
return; return;
} }
...@@ -488,7 +488,7 @@ EXPORT_SYMBOL_GPL(kvm_set_cr0); ...@@ -488,7 +488,7 @@ EXPORT_SYMBOL_GPL(kvm_set_cr0);
void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw) void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
{ {
kvm_set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f)); kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0ful) | (msw & 0x0f));
} }
EXPORT_SYMBOL_GPL(kvm_lmsw); EXPORT_SYMBOL_GPL(kvm_lmsw);
...@@ -3095,7 +3095,7 @@ int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address) ...@@ -3095,7 +3095,7 @@ int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
int emulate_clts(struct kvm_vcpu *vcpu) int emulate_clts(struct kvm_vcpu *vcpu)
{ {
kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 & ~X86_CR0_TS); kvm_x86_ops->set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS));
return X86EMUL_CONTINUE; return X86EMUL_CONTINUE;
} }
...@@ -3714,7 +3714,7 @@ unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr) ...@@ -3714,7 +3714,7 @@ unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
switch (cr) { switch (cr) {
case 0: case 0:
value = vcpu->arch.cr0; value = kvm_read_cr0(vcpu);
break; break;
case 2: case 2:
value = vcpu->arch.cr2; value = vcpu->arch.cr2;
...@@ -3741,7 +3741,7 @@ void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val, ...@@ -3741,7 +3741,7 @@ void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
{ {
switch (cr) { switch (cr) {
case 0: case 0:
kvm_set_cr0(vcpu, mk_cr_64(vcpu->arch.cr0, val)); kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val));
*rflags = kvm_get_rflags(vcpu); *rflags = kvm_get_rflags(vcpu);
break; break;
case 2: case 2:
...@@ -4335,7 +4335,7 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, ...@@ -4335,7 +4335,7 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
sregs->gdt.limit = dt.limit; sregs->gdt.limit = dt.limit;
sregs->gdt.base = dt.base; sregs->gdt.base = dt.base;
sregs->cr0 = vcpu->arch.cr0; sregs->cr0 = kvm_read_cr0(vcpu);
sregs->cr2 = vcpu->arch.cr2; sregs->cr2 = vcpu->arch.cr2;
sregs->cr3 = vcpu->arch.cr3; sregs->cr3 = vcpu->arch.cr3;
sregs->cr4 = kvm_read_cr4(vcpu); sregs->cr4 = kvm_read_cr4(vcpu);
...@@ -4521,7 +4521,7 @@ int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, ...@@ -4521,7 +4521,7 @@ int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
{ {
struct kvm_segment kvm_seg; struct kvm_segment kvm_seg;
if (is_vm86_segment(vcpu, seg) || !(vcpu->arch.cr0 & X86_CR0_PE)) if (is_vm86_segment(vcpu, seg) || !(kvm_read_cr0_bits(vcpu, X86_CR0_PE)))
return kvm_load_realmode_segment(vcpu, selector, seg); return kvm_load_realmode_segment(vcpu, selector, seg);
if (load_segment_descriptor_to_kvm_desct(vcpu, selector, &kvm_seg)) if (load_segment_descriptor_to_kvm_desct(vcpu, selector, &kvm_seg))
return 1; return 1;
...@@ -4799,7 +4799,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason) ...@@ -4799,7 +4799,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
&nseg_desc); &nseg_desc);
} }
kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 | X86_CR0_TS); kvm_x86_ops->set_cr0(vcpu, kvm_read_cr0(vcpu) | X86_CR0_TS);
seg_desct_to_kvm_desct(&nseg_desc, tss_selector, &tr_seg); seg_desct_to_kvm_desct(&nseg_desc, tss_selector, &tr_seg);
tr_seg.type = 11; tr_seg.type = 11;
kvm_set_segment(vcpu, &tr_seg, VCPU_SREG_TR); kvm_set_segment(vcpu, &tr_seg, VCPU_SREG_TR);
...@@ -4834,7 +4834,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, ...@@ -4834,7 +4834,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
kvm_x86_ops->set_efer(vcpu, sregs->efer); kvm_x86_ops->set_efer(vcpu, sregs->efer);
kvm_set_apic_base(vcpu, sregs->apic_base); kvm_set_apic_base(vcpu, sregs->apic_base);
mmu_reset_needed |= vcpu->arch.cr0 != sregs->cr0; mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0;
kvm_x86_ops->set_cr0(vcpu, sregs->cr0); kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
vcpu->arch.cr0 = sregs->cr0; vcpu->arch.cr0 = sregs->cr0;
...@@ -4873,7 +4873,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, ...@@ -4873,7 +4873,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
/* Older userspace won't unhalt the vcpu on reset. */ /* Older userspace won't unhalt the vcpu on reset. */
if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 && if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 &&
sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 && sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
!(vcpu->arch.cr0 & X86_CR0_PE)) !(kvm_read_cr0_bits(vcpu, X86_CR0_PE)))
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
vcpu_put(vcpu); vcpu_put(vcpu);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册