提交 4d5523cf 编写于 作者: P Paolo Bonzini

KVM: x86: fix DR6 delivery for various cases of #DB injection

Go through kvm_queue_exception_p so that the payload is correctly delivered
through the exit qualification, and add a kvm_update_dr6 call to
kvm_deliver_exception_payload that is needed on AMD.
Reported-by: NPeter Xu <peterx@redhat.com>
Reviewed-by: NPeter Xu <peterx@redhat.com>
Signed-off-by: NPaolo Bonzini <pbonzini@redhat.com>
上级 b9b2782c
...@@ -1449,6 +1449,7 @@ bool kvm_rdpmc(struct kvm_vcpu *vcpu); ...@@ -1449,6 +1449,7 @@ bool kvm_rdpmc(struct kvm_vcpu *vcpu);
void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr); void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr, unsigned long payload);
void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr); void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr);
void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault); void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
......
...@@ -4677,12 +4677,10 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu) ...@@ -4677,12 +4677,10 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
dr6 = vmcs_readl(EXIT_QUALIFICATION); dr6 = vmcs_readl(EXIT_QUALIFICATION);
if (!(vcpu->guest_debug & if (!(vcpu->guest_debug &
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) { (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
vcpu->arch.dr6 &= ~DR_TRAP_BITS;
vcpu->arch.dr6 |= dr6 | DR6_RTM;
if (is_icebp(intr_info)) if (is_icebp(intr_info))
WARN_ON(!skip_emulated_instruction(vcpu)); WARN_ON(!skip_emulated_instruction(vcpu));
kvm_queue_exception(vcpu, DB_VECTOR); kvm_queue_exception_p(vcpu, DB_VECTOR, dr6);
return 1; return 1;
} }
kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1; kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1;
...@@ -4936,9 +4934,7 @@ static int handle_dr(struct kvm_vcpu *vcpu) ...@@ -4936,9 +4934,7 @@ static int handle_dr(struct kvm_vcpu *vcpu)
vcpu->run->exit_reason = KVM_EXIT_DEBUG; vcpu->run->exit_reason = KVM_EXIT_DEBUG;
return 0; return 0;
} else { } else {
vcpu->arch.dr6 &= ~DR_TRAP_BITS; kvm_queue_exception_p(vcpu, DB_VECTOR, DR6_BD);
vcpu->arch.dr6 |= DR6_BD | DR6_RTM;
kvm_queue_exception(vcpu, DB_VECTOR);
return 1; return 1;
} }
} }
......
...@@ -104,6 +104,7 @@ static u64 __read_mostly cr4_reserved_bits = CR4_RESERVED_BITS; ...@@ -104,6 +104,7 @@ static u64 __read_mostly cr4_reserved_bits = CR4_RESERVED_BITS;
KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK) KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK)
static void update_cr8_intercept(struct kvm_vcpu *vcpu); static void update_cr8_intercept(struct kvm_vcpu *vcpu);
static void kvm_update_dr6(struct kvm_vcpu *vcpu);
static void process_nmi(struct kvm_vcpu *vcpu); static void process_nmi(struct kvm_vcpu *vcpu);
static void enter_smm(struct kvm_vcpu *vcpu); static void enter_smm(struct kvm_vcpu *vcpu);
static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
...@@ -473,6 +474,7 @@ void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu) ...@@ -473,6 +474,7 @@ void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu)
* breakpoint), it is reserved and must be zero in DR6. * breakpoint), it is reserved and must be zero in DR6.
*/ */
vcpu->arch.dr6 &= ~BIT(12); vcpu->arch.dr6 &= ~BIT(12);
kvm_update_dr6(vcpu);
break; break;
case PF_VECTOR: case PF_VECTOR:
vcpu->arch.cr2 = payload; vcpu->arch.cr2 = payload;
...@@ -572,11 +574,12 @@ void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr) ...@@ -572,11 +574,12 @@ void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
} }
EXPORT_SYMBOL_GPL(kvm_requeue_exception); EXPORT_SYMBOL_GPL(kvm_requeue_exception);
static void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr, void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr,
unsigned long payload) unsigned long payload)
{ {
kvm_multiple_exception(vcpu, nr, false, 0, true, payload, false); kvm_multiple_exception(vcpu, nr, false, 0, true, payload, false);
} }
EXPORT_SYMBOL_GPL(kvm_queue_exception_p);
static void kvm_queue_exception_e_p(struct kvm_vcpu *vcpu, unsigned nr, static void kvm_queue_exception_e_p(struct kvm_vcpu *vcpu, unsigned nr,
u32 error_code, unsigned long payload) u32 error_code, unsigned long payload)
...@@ -6719,9 +6722,7 @@ static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r) ...@@ -6719,9 +6722,7 @@ static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r)
vcpu->arch.db); vcpu->arch.db);
if (dr6 != 0) { if (dr6 != 0) {
vcpu->arch.dr6 &= ~DR_TRAP_BITS; kvm_queue_exception_p(vcpu, DB_VECTOR, dr6);
vcpu->arch.dr6 |= dr6 | DR6_RTM;
kvm_queue_exception(vcpu, DB_VECTOR);
*r = 1; *r = 1;
return true; return true;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册