提交 c1a5d4f9 编写于 作者: A Avi Kivity

KVM: Replace #GP injection by the generalized exception queue

Signed-off-by: NAvi Kivity <avi@qumranet.com>
上级 c3c91fee
...@@ -207,17 +207,6 @@ static bool svm_exception_injected(struct kvm_vcpu *vcpu) ...@@ -207,17 +207,6 @@ static bool svm_exception_injected(struct kvm_vcpu *vcpu)
return !(svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID); return !(svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID);
} }
static void svm_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code)
{
struct vcpu_svm *svm = to_svm(vcpu);
svm->vmcb->control.event_inj = SVM_EVTINJ_VALID |
SVM_EVTINJ_VALID_ERR |
SVM_EVTINJ_TYPE_EXEPT |
GP_VECTOR;
svm->vmcb->control.event_inj_err = error_code;
}
static void inject_ud(struct kvm_vcpu *vcpu) static void inject_ud(struct kvm_vcpu *vcpu)
{ {
to_svm(vcpu)->vmcb->control.event_inj = SVM_EVTINJ_VALID | to_svm(vcpu)->vmcb->control.event_inj = SVM_EVTINJ_VALID |
...@@ -1115,7 +1104,7 @@ static int rdmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) ...@@ -1115,7 +1104,7 @@ static int rdmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
u64 data; u64 data;
if (svm_get_msr(&svm->vcpu, ecx, &data)) if (svm_get_msr(&svm->vcpu, ecx, &data))
svm_inject_gp(&svm->vcpu, 0); kvm_inject_gp(&svm->vcpu, 0);
else { else {
svm->vmcb->save.rax = data & 0xffffffff; svm->vmcb->save.rax = data & 0xffffffff;
svm->vcpu.regs[VCPU_REGS_RDX] = data >> 32; svm->vcpu.regs[VCPU_REGS_RDX] = data >> 32;
...@@ -1176,7 +1165,7 @@ static int wrmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) ...@@ -1176,7 +1165,7 @@ static int wrmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
| ((u64)(svm->vcpu.regs[VCPU_REGS_RDX] & -1u) << 32); | ((u64)(svm->vcpu.regs[VCPU_REGS_RDX] & -1u) << 32);
svm->next_rip = svm->vmcb->save.rip + 2; svm->next_rip = svm->vmcb->save.rip + 2;
if (svm_set_msr(&svm->vcpu, ecx, data)) if (svm_set_msr(&svm->vcpu, ecx, data))
svm_inject_gp(&svm->vcpu, 0); kvm_inject_gp(&svm->vcpu, 0);
else else
skip_emulated_instruction(&svm->vcpu); skip_emulated_instruction(&svm->vcpu);
return 1; return 1;
...@@ -1688,8 +1677,6 @@ static struct kvm_x86_ops svm_x86_ops = { ...@@ -1688,8 +1677,6 @@ static struct kvm_x86_ops svm_x86_ops = {
.tlb_flush = svm_flush_tlb, .tlb_flush = svm_flush_tlb,
.inject_gp = svm_inject_gp,
.run = svm_vcpu_run, .run = svm_vcpu_run,
.handle_exit = handle_exit, .handle_exit = handle_exit,
.skip_emulated_instruction = skip_emulated_instruction, .skip_emulated_instruction = skip_emulated_instruction,
......
...@@ -613,18 +613,6 @@ static bool vmx_exception_injected(struct kvm_vcpu *vcpu) ...@@ -613,18 +613,6 @@ static bool vmx_exception_injected(struct kvm_vcpu *vcpu)
return !(vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK); return !(vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK);
} }
static void vmx_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code)
{
printk(KERN_DEBUG "inject_general_protection: rip 0x%lx\n",
vmcs_readl(GUEST_RIP));
vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
GP_VECTOR |
INTR_TYPE_EXCEPTION |
INTR_INFO_DELIEVER_CODE_MASK |
INTR_INFO_VALID_MASK);
}
static void vmx_inject_ud(struct kvm_vcpu *vcpu) static void vmx_inject_ud(struct kvm_vcpu *vcpu)
{ {
vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
...@@ -2083,7 +2071,7 @@ static int handle_rdmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -2083,7 +2071,7 @@ static int handle_rdmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
u64 data; u64 data;
if (vmx_get_msr(vcpu, ecx, &data)) { if (vmx_get_msr(vcpu, ecx, &data)) {
vmx_inject_gp(vcpu, 0); kvm_inject_gp(vcpu, 0);
return 1; return 1;
} }
...@@ -2101,7 +2089,7 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -2101,7 +2089,7 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
| ((u64)(vcpu->regs[VCPU_REGS_RDX] & -1u) << 32); | ((u64)(vcpu->regs[VCPU_REGS_RDX] & -1u) << 32);
if (vmx_set_msr(vcpu, ecx, data) != 0) { if (vmx_set_msr(vcpu, ecx, data) != 0) {
vmx_inject_gp(vcpu, 0); kvm_inject_gp(vcpu, 0);
return 1; return 1;
} }
...@@ -2619,8 +2607,6 @@ static struct kvm_x86_ops vmx_x86_ops = { ...@@ -2619,8 +2607,6 @@ static struct kvm_x86_ops vmx_x86_ops = {
.tlb_flush = vmx_flush_tlb, .tlb_flush = vmx_flush_tlb,
.inject_gp = vmx_inject_gp,
.run = vmx_vcpu_run, .run = vmx_vcpu_run,
.handle_exit = kvm_handle_exit, .handle_exit = kvm_handle_exit,
.skip_emulated_instruction = skip_emulated_instruction, .skip_emulated_instruction = skip_emulated_instruction,
......
...@@ -128,11 +128,6 @@ void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data) ...@@ -128,11 +128,6 @@ void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
} }
EXPORT_SYMBOL_GPL(kvm_set_apic_base); EXPORT_SYMBOL_GPL(kvm_set_apic_base);
static void inject_gp(struct kvm_vcpu *vcpu)
{
kvm_x86_ops->inject_gp(vcpu, 0);
}
void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr) void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
{ {
WARN_ON(vcpu->exception.pending); WARN_ON(vcpu->exception.pending);
...@@ -232,20 +227,20 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) ...@@ -232,20 +227,20 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
if (cr0 & CR0_RESERVED_BITS) { if (cr0 & CR0_RESERVED_BITS) {
printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n", printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
cr0, vcpu->cr0); cr0, vcpu->cr0);
inject_gp(vcpu); kvm_inject_gp(vcpu, 0);
return; return;
} }
if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) { if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n"); printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
inject_gp(vcpu); kvm_inject_gp(vcpu, 0);
return; return;
} }
if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) { if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
printk(KERN_DEBUG "set_cr0: #GP, set PG flag " printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
"and a clear PE flag\n"); "and a clear PE flag\n");
inject_gp(vcpu); kvm_inject_gp(vcpu, 0);
return; return;
} }
...@@ -257,14 +252,14 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) ...@@ -257,14 +252,14 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
if (!is_pae(vcpu)) { if (!is_pae(vcpu)) {
printk(KERN_DEBUG "set_cr0: #GP, start paging " printk(KERN_DEBUG "set_cr0: #GP, start paging "
"in long mode while PAE is disabled\n"); "in long mode while PAE is disabled\n");
inject_gp(vcpu); kvm_inject_gp(vcpu, 0);
return; return;
} }
kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
if (cs_l) { if (cs_l) {
printk(KERN_DEBUG "set_cr0: #GP, start paging " printk(KERN_DEBUG "set_cr0: #GP, start paging "
"in long mode while CS.L == 1\n"); "in long mode while CS.L == 1\n");
inject_gp(vcpu); kvm_inject_gp(vcpu, 0);
return; return;
} }
...@@ -273,7 +268,7 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) ...@@ -273,7 +268,7 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->cr3)) { if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->cr3)) {
printk(KERN_DEBUG "set_cr0: #GP, pdptrs " printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
"reserved bits\n"); "reserved bits\n");
inject_gp(vcpu); kvm_inject_gp(vcpu, 0);
return; return;
} }
...@@ -299,7 +294,7 @@ void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) ...@@ -299,7 +294,7 @@ void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{ {
if (cr4 & CR4_RESERVED_BITS) { if (cr4 & CR4_RESERVED_BITS) {
printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n"); printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
inject_gp(vcpu); kvm_inject_gp(vcpu, 0);
return; return;
} }
...@@ -307,19 +302,19 @@ void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) ...@@ -307,19 +302,19 @@ void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
if (!(cr4 & X86_CR4_PAE)) { if (!(cr4 & X86_CR4_PAE)) {
printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while " printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
"in long mode\n"); "in long mode\n");
inject_gp(vcpu); kvm_inject_gp(vcpu, 0);
return; return;
} }
} else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & X86_CR4_PAE) } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & X86_CR4_PAE)
&& !load_pdptrs(vcpu, vcpu->cr3)) { && !load_pdptrs(vcpu, vcpu->cr3)) {
printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n"); printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
inject_gp(vcpu); kvm_inject_gp(vcpu, 0);
return; return;
} }
if (cr4 & X86_CR4_VMXE) { if (cr4 & X86_CR4_VMXE) {
printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n"); printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
inject_gp(vcpu); kvm_inject_gp(vcpu, 0);
return; return;
} }
kvm_x86_ops->set_cr4(vcpu, cr4); kvm_x86_ops->set_cr4(vcpu, cr4);
...@@ -340,7 +335,7 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) ...@@ -340,7 +335,7 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
if (is_long_mode(vcpu)) { if (is_long_mode(vcpu)) {
if (cr3 & CR3_L_MODE_RESERVED_BITS) { if (cr3 & CR3_L_MODE_RESERVED_BITS) {
printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n"); printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
inject_gp(vcpu); kvm_inject_gp(vcpu, 0);
return; return;
} }
} else { } else {
...@@ -348,13 +343,13 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) ...@@ -348,13 +343,13 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
if (cr3 & CR3_PAE_RESERVED_BITS) { if (cr3 & CR3_PAE_RESERVED_BITS) {
printk(KERN_DEBUG printk(KERN_DEBUG
"set_cr3: #GP, reserved bits\n"); "set_cr3: #GP, reserved bits\n");
inject_gp(vcpu); kvm_inject_gp(vcpu, 0);
return; return;
} }
if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) { if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
printk(KERN_DEBUG "set_cr3: #GP, pdptrs " printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
"reserved bits\n"); "reserved bits\n");
inject_gp(vcpu); kvm_inject_gp(vcpu, 0);
return; return;
} }
} }
...@@ -375,7 +370,7 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) ...@@ -375,7 +370,7 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
* to debug) behavior on the guest side. * to debug) behavior on the guest side.
*/ */
if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT))) if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
inject_gp(vcpu); kvm_inject_gp(vcpu, 0);
else { else {
vcpu->cr3 = cr3; vcpu->cr3 = cr3;
vcpu->mmu.new_cr3(vcpu); vcpu->mmu.new_cr3(vcpu);
...@@ -388,7 +383,7 @@ void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) ...@@ -388,7 +383,7 @@ void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
{ {
if (cr8 & CR8_RESERVED_BITS) { if (cr8 & CR8_RESERVED_BITS) {
printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8); printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
inject_gp(vcpu); kvm_inject_gp(vcpu, 0);
return; return;
} }
if (irqchip_in_kernel(vcpu->kvm)) if (irqchip_in_kernel(vcpu->kvm))
...@@ -436,14 +431,14 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer) ...@@ -436,14 +431,14 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
if (efer & EFER_RESERVED_BITS) { if (efer & EFER_RESERVED_BITS) {
printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n", printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
efer); efer);
inject_gp(vcpu); kvm_inject_gp(vcpu, 0);
return; return;
} }
if (is_paging(vcpu) if (is_paging(vcpu)
&& (vcpu->shadow_efer & EFER_LME) != (efer & EFER_LME)) { && (vcpu->shadow_efer & EFER_LME) != (efer & EFER_LME)) {
printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n"); printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
inject_gp(vcpu); kvm_inject_gp(vcpu, 0);
return; return;
} }
...@@ -2047,7 +2042,7 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, ...@@ -2047,7 +2042,7 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
* String I/O in reverse. Yuck. Kill the guest, fix later. * String I/O in reverse. Yuck. Kill the guest, fix later.
*/ */
pr_unimpl(vcpu, "guest string pio down\n"); pr_unimpl(vcpu, "guest string pio down\n");
inject_gp(vcpu); kvm_inject_gp(vcpu, 0);
return 1; return 1;
} }
vcpu->run->io.count = now; vcpu->run->io.count = now;
...@@ -2062,7 +2057,7 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, ...@@ -2062,7 +2057,7 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
vcpu->pio.guest_pages[i] = page; vcpu->pio.guest_pages[i] = page;
mutex_unlock(&vcpu->kvm->lock); mutex_unlock(&vcpu->kvm->lock);
if (!page) { if (!page) {
inject_gp(vcpu); kvm_inject_gp(vcpu, 0);
free_pio_guest_pages(vcpu); free_pio_guest_pages(vcpu);
return 1; return 1;
} }
......
...@@ -220,8 +220,6 @@ struct kvm_x86_ops { ...@@ -220,8 +220,6 @@ struct kvm_x86_ops {
void (*tlb_flush)(struct kvm_vcpu *vcpu); void (*tlb_flush)(struct kvm_vcpu *vcpu);
void (*inject_gp)(struct kvm_vcpu *vcpu, unsigned err_code);
void (*run)(struct kvm_vcpu *vcpu, struct kvm_run *run); void (*run)(struct kvm_vcpu *vcpu, struct kvm_run *run);
int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu); int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu);
void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu); void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
...@@ -467,6 +465,11 @@ static inline u32 get_rdx_init_val(void) ...@@ -467,6 +465,11 @@ static inline u32 get_rdx_init_val(void)
return 0x600; /* P6 family */ return 0x600; /* P6 family */
} }
static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
{
kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
}
#define ASM_VMX_VMCLEAR_RAX ".byte 0x66, 0x0f, 0xc7, 0x30" #define ASM_VMX_VMCLEAR_RAX ".byte 0x66, 0x0f, 0xc7, 0x30"
#define ASM_VMX_VMLAUNCH ".byte 0x0f, 0x01, 0xc2" #define ASM_VMX_VMLAUNCH ".byte 0x0f, 0x01, 0xc2"
#define ASM_VMX_VMRESUME ".byte 0x0f, 0x01, 0xc3" #define ASM_VMX_VMRESUME ".byte 0x0f, 0x01, 0xc3"
......
...@@ -1779,7 +1779,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) ...@@ -1779,7 +1779,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
| ((u64)c->regs[VCPU_REGS_RDX] << 32); | ((u64)c->regs[VCPU_REGS_RDX] << 32);
rc = kvm_set_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], msr_data); rc = kvm_set_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], msr_data);
if (rc) { if (rc) {
kvm_x86_ops->inject_gp(ctxt->vcpu, 0); kvm_inject_gp(ctxt->vcpu, 0);
c->eip = ctxt->vcpu->rip; c->eip = ctxt->vcpu->rip;
} }
rc = X86EMUL_CONTINUE; rc = X86EMUL_CONTINUE;
...@@ -1789,7 +1789,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) ...@@ -1789,7 +1789,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
/* rdmsr */ /* rdmsr */
rc = kvm_get_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], &msr_data); rc = kvm_get_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], &msr_data);
if (rc) { if (rc) {
kvm_x86_ops->inject_gp(ctxt->vcpu, 0); kvm_inject_gp(ctxt->vcpu, 0);
c->eip = ctxt->vcpu->rip; c->eip = ctxt->vcpu->rip;
} else { } else {
c->regs[VCPU_REGS_RAX] = (u32)msr_data; c->regs[VCPU_REGS_RAX] = (u32)msr_data;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册