提交 ce7ddec4 编写于 作者: J Joerg Roedel 提交者: Avi Kivity

KVM: x86: Allow marking an exception as reinjected

This patch adds logic to kvm/x86 which allows to mark an
injected exception as reinjected. This allows to remove an
ugly hack from svm_complete_interrupts that prevented
exceptions from being reinjected at all in the nested case.
The hack was necessary because an reinjected exception into
the nested guest could cause a nested vmexit emulation. But
reinjected exceptions must not intercept. The downside of
the hack is that a exception that in injected could get
lost.
This patch fixes the problem and puts the code for it into
generic x86 files because. Nested-VMX will likely have the
same problem and could reuse the code.
Signed-off-by: NJoerg Roedel <joerg.roedel@amd.com>
Signed-off-by: NAvi Kivity <avi@redhat.com>
上级 c2c63a49
...@@ -312,6 +312,7 @@ struct kvm_vcpu_arch { ...@@ -312,6 +312,7 @@ struct kvm_vcpu_arch {
struct kvm_queued_exception { struct kvm_queued_exception {
bool pending; bool pending;
bool has_error_code; bool has_error_code;
bool reinject;
u8 nr; u8 nr;
u32 error_code; u32 error_code;
} exception; } exception;
...@@ -514,7 +515,8 @@ struct kvm_x86_ops { ...@@ -514,7 +515,8 @@ struct kvm_x86_ops {
void (*set_irq)(struct kvm_vcpu *vcpu); void (*set_irq)(struct kvm_vcpu *vcpu);
void (*set_nmi)(struct kvm_vcpu *vcpu); void (*set_nmi)(struct kvm_vcpu *vcpu);
void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr, void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr,
bool has_error_code, u32 error_code); bool has_error_code, u32 error_code,
bool reinject);
int (*interrupt_allowed)(struct kvm_vcpu *vcpu); int (*interrupt_allowed)(struct kvm_vcpu *vcpu);
int (*nmi_allowed)(struct kvm_vcpu *vcpu); int (*nmi_allowed)(struct kvm_vcpu *vcpu);
bool (*get_nmi_mask)(struct kvm_vcpu *vcpu); bool (*get_nmi_mask)(struct kvm_vcpu *vcpu);
...@@ -617,6 +619,8 @@ void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); ...@@ -617,6 +619,8 @@ void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr); void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr);
void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2, void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2,
u32 error_code); u32 error_code);
bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl); bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl);
......
...@@ -338,7 +338,8 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu) ...@@ -338,7 +338,8 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
} }
static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
bool has_error_code, u32 error_code) bool has_error_code, u32 error_code,
bool reinject)
{ {
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
...@@ -346,7 +347,8 @@ static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, ...@@ -346,7 +347,8 @@ static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
* If we are within a nested VM we'd better #VMEXIT and let the guest * If we are within a nested VM we'd better #VMEXIT and let the guest
* handle the exception * handle the exception
*/ */
if (nested_svm_check_exception(svm, nr, has_error_code, error_code)) if (!reinject &&
nested_svm_check_exception(svm, nr, has_error_code, error_code))
return; return;
if (nr == BP_VECTOR && !svm_has(SVM_FEATURE_NRIP)) { if (nr == BP_VECTOR && !svm_has(SVM_FEATURE_NRIP)) {
...@@ -2918,8 +2920,6 @@ static void svm_complete_interrupts(struct vcpu_svm *svm) ...@@ -2918,8 +2920,6 @@ static void svm_complete_interrupts(struct vcpu_svm *svm)
svm->vcpu.arch.nmi_injected = true; svm->vcpu.arch.nmi_injected = true;
break; break;
case SVM_EXITINTINFO_TYPE_EXEPT: case SVM_EXITINTINFO_TYPE_EXEPT:
if (is_nested(svm))
break;
/* /*
* In case of software exceptions, do not reinject the vector, * In case of software exceptions, do not reinject the vector,
* but re-execute the instruction instead. Rewind RIP first * but re-execute the instruction instead. Rewind RIP first
...@@ -2935,10 +2935,10 @@ static void svm_complete_interrupts(struct vcpu_svm *svm) ...@@ -2935,10 +2935,10 @@ static void svm_complete_interrupts(struct vcpu_svm *svm)
} }
if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) { if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) {
u32 err = svm->vmcb->control.exit_int_info_err; u32 err = svm->vmcb->control.exit_int_info_err;
kvm_queue_exception_e(&svm->vcpu, vector, err); kvm_requeue_exception_e(&svm->vcpu, vector, err);
} else } else
kvm_queue_exception(&svm->vcpu, vector); kvm_requeue_exception(&svm->vcpu, vector);
break; break;
case SVM_EXITINTINFO_TYPE_INTR: case SVM_EXITINTINFO_TYPE_INTR:
kvm_queue_interrupt(&svm->vcpu, vector, false); kvm_queue_interrupt(&svm->vcpu, vector, false);
......
...@@ -919,7 +919,8 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu) ...@@ -919,7 +919,8 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
} }
static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
bool has_error_code, u32 error_code) bool has_error_code, u32 error_code,
bool reinject)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 intr_info = nr | INTR_INFO_VALID_MASK; u32 intr_info = nr | INTR_INFO_VALID_MASK;
......
...@@ -265,7 +265,8 @@ static int exception_class(int vector) ...@@ -265,7 +265,8 @@ static int exception_class(int vector)
} }
static void kvm_multiple_exception(struct kvm_vcpu *vcpu, static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
unsigned nr, bool has_error, u32 error_code) unsigned nr, bool has_error, u32 error_code,
bool reinject)
{ {
u32 prev_nr; u32 prev_nr;
int class1, class2; int class1, class2;
...@@ -276,6 +277,7 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu, ...@@ -276,6 +277,7 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
vcpu->arch.exception.has_error_code = has_error; vcpu->arch.exception.has_error_code = has_error;
vcpu->arch.exception.nr = nr; vcpu->arch.exception.nr = nr;
vcpu->arch.exception.error_code = error_code; vcpu->arch.exception.error_code = error_code;
vcpu->arch.exception.reinject = true;
return; return;
} }
...@@ -304,10 +306,16 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu, ...@@ -304,10 +306,16 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr) void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
{ {
kvm_multiple_exception(vcpu, nr, false, 0); kvm_multiple_exception(vcpu, nr, false, 0, false);
} }
EXPORT_SYMBOL_GPL(kvm_queue_exception); EXPORT_SYMBOL_GPL(kvm_queue_exception);
void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
{
kvm_multiple_exception(vcpu, nr, false, 0, true);
}
EXPORT_SYMBOL_GPL(kvm_requeue_exception);
void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr, void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr,
u32 error_code) u32 error_code)
{ {
...@@ -324,10 +332,16 @@ EXPORT_SYMBOL_GPL(kvm_inject_nmi); ...@@ -324,10 +332,16 @@ EXPORT_SYMBOL_GPL(kvm_inject_nmi);
void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
{ {
kvm_multiple_exception(vcpu, nr, true, error_code); kvm_multiple_exception(vcpu, nr, true, error_code, false);
} }
EXPORT_SYMBOL_GPL(kvm_queue_exception_e); EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
{
kvm_multiple_exception(vcpu, nr, true, error_code, true);
}
EXPORT_SYMBOL_GPL(kvm_requeue_exception_e);
/* /*
* Checks if cpl <= required_cpl; if true, return true. Otherwise queue * Checks if cpl <= required_cpl; if true, return true. Otherwise queue
* a #GP and return false. * a #GP and return false.
...@@ -4408,7 +4422,8 @@ static void inject_pending_event(struct kvm_vcpu *vcpu) ...@@ -4408,7 +4422,8 @@ static void inject_pending_event(struct kvm_vcpu *vcpu)
vcpu->arch.exception.error_code); vcpu->arch.exception.error_code);
kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr, kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
vcpu->arch.exception.has_error_code, vcpu->arch.exception.has_error_code,
vcpu->arch.exception.error_code); vcpu->arch.exception.error_code,
vcpu->arch.exception.reinject);
return; return;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册