未验证 提交 e23d9025 编写于 作者: O openeuler-ci-bot 提交者: Gitee

!115 SPR: KVM: Add Bus Lock Debug Exception

Merge Pull Request from: @allen-shi 
 
This PR is to add KVM support for Bus Lock Debug Exception.

Intel-Kernel Issue
[#I5RHW7](https://gitee.com/openeuler/intel-kernel/issues/I5RHW7)

Test
Guest supports for Bus Lock Debug Exception feature.

Known Issue
N/A

Default config change
N/A 
 
Link:https://gitee.com/openeuler/kernel/pulls/115 
Reviewed-by: Zheng Zengkai <zhengzengkai@huawei.com> 
Reviewed-by: Kevin Zhu <zhukeqian1@huawei.com> 
Signed-off-by: Zheng Zengkai <zhengzengkai@huawei.com> 
...@@ -202,13 +202,22 @@ enum x86_intercept_stage; ...@@ -202,13 +202,22 @@ enum x86_intercept_stage;
#define KVM_NR_DB_REGS 4 #define KVM_NR_DB_REGS 4
#define DR6_BUS_LOCK (1 << 11)
#define DR6_BD (1 << 13) #define DR6_BD (1 << 13)
#define DR6_BS (1 << 14) #define DR6_BS (1 << 14)
#define DR6_BT (1 << 15) #define DR6_BT (1 << 15)
#define DR6_RTM (1 << 16) #define DR6_RTM (1 << 16)
#define DR6_FIXED_1 0xfffe0ff0 /*
#define DR6_INIT 0xffff0ff0 * DR6_ACTIVE_LOW combines fixed-1 and active-low bits.
#define DR6_VOLATILE 0x0001e00f * We can regard all the bits in DR6_FIXED_1 as active_low bits;
* they will never be 0 for now, but when they are defined
* in the future it will require no code change.
*
* DR6_ACTIVE_LOW is also used as the init/reset value for DR6.
*/
#define DR6_ACTIVE_LOW 0xffff0ff0
#define DR6_VOLATILE 0x0001e80f
#define DR6_FIXED_1 (DR6_ACTIVE_LOW & ~DR6_VOLATILE)
#define DR7_BP_EN_MASK 0x000000ff #define DR7_BP_EN_MASK 0x000000ff
#define DR7_GE (1 << 9) #define DR7_GE (1 << 9)
......
...@@ -1150,22 +1150,23 @@ static void bus_lock_init(void) ...@@ -1150,22 +1150,23 @@ static void bus_lock_init(void)
{ {
u64 val; u64 val;
/* if (!boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT))
* Warn and fatal are handled by #AC for split lock if #AC for
* split lock is supported.
*/
if (!boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT) ||
(boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT) &&
(sld_state == sld_warn || sld_state == sld_fatal)) ||
sld_state == sld_off)
return; return;
/*
* Enable #DB for bus lock. All bus locks are handled in #DB except
* split locks are handled in #AC in the fatal case.
*/
rdmsrl(MSR_IA32_DEBUGCTLMSR, val); rdmsrl(MSR_IA32_DEBUGCTLMSR, val);
val |= DEBUGCTLMSR_BUS_LOCK_DETECT;
if ((boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT) &&
(sld_state == sld_warn || sld_state == sld_fatal)) ||
sld_state == sld_off) {
/*
* Warn and fatal are handled by #AC for split lock if #AC for
* split lock is supported.
*/
val &= ~DEBUGCTLMSR_BUS_LOCK_DETECT;
} else {
val |= DEBUGCTLMSR_BUS_LOCK_DETECT;
}
wrmsrl(MSR_IA32_DEBUGCTLMSR, val); wrmsrl(MSR_IA32_DEBUGCTLMSR, val);
} }
......
...@@ -441,7 +441,7 @@ void kvm_set_cpu_caps(void) ...@@ -441,7 +441,7 @@ void kvm_set_cpu_caps(void)
F(AVX512_VPOPCNTDQ) | F(UMIP) | F(AVX512_VBMI2) | F(GFNI) | F(AVX512_VPOPCNTDQ) | F(UMIP) | F(AVX512_VBMI2) | F(GFNI) |
F(VAES) | F(VPCLMULQDQ) | F(AVX512_VNNI) | F(AVX512_BITALG) | F(VAES) | F(VPCLMULQDQ) | F(AVX512_VNNI) | F(AVX512_BITALG) |
F(CLDEMOTE) | F(MOVDIRI) | F(MOVDIR64B) | 0 /*WAITPKG*/ | F(CLDEMOTE) | F(MOVDIRI) | F(MOVDIR64B) | 0 /*WAITPKG*/ |
F(SGX_LC) F(SGX_LC) | F(BUS_LOCK_DETECT)
); );
/* Set LA57 based on hardware capability. */ /* Set LA57 based on hardware capability. */
if (cpuid_ecx(7) & F(LA57)) if (cpuid_ecx(7) & F(LA57))
......
...@@ -4261,7 +4261,7 @@ static int check_dr_read(struct x86_emulate_ctxt *ctxt) ...@@ -4261,7 +4261,7 @@ static int check_dr_read(struct x86_emulate_ctxt *ctxt)
ctxt->ops->get_dr(ctxt, 6, &dr6); ctxt->ops->get_dr(ctxt, 6, &dr6);
dr6 &= ~DR_TRAP_BITS; dr6 &= ~DR_TRAP_BITS;
dr6 |= DR6_BD | DR6_RTM; dr6 |= DR6_BD | DR6_ACTIVE_LOW;
ctxt->ops->set_dr(ctxt, 6, dr6); ctxt->ops->set_dr(ctxt, 6, dr6);
return emulate_db(ctxt); return emulate_db(ctxt);
} }
......
...@@ -405,7 +405,7 @@ static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *vmcb12) ...@@ -405,7 +405,7 @@ static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
svm->vmcb->save.ds = vmcb12->save.ds; svm->vmcb->save.ds = vmcb12->save.ds;
svm->vmcb->save.gdtr = vmcb12->save.gdtr; svm->vmcb->save.gdtr = vmcb12->save.gdtr;
svm->vmcb->save.idtr = vmcb12->save.idtr; svm->vmcb->save.idtr = vmcb12->save.idtr;
kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags); kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED);
/* /*
* Force-set EFER_SVME even though it is checked earlier on the * Force-set EFER_SVME even though it is checked earlier on the
...@@ -425,8 +425,8 @@ static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *vmcb12) ...@@ -425,8 +425,8 @@ static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
svm->vmcb->save.rax = vmcb12->save.rax; svm->vmcb->save.rax = vmcb12->save.rax;
svm->vmcb->save.rsp = vmcb12->save.rsp; svm->vmcb->save.rsp = vmcb12->save.rsp;
svm->vmcb->save.rip = vmcb12->save.rip; svm->vmcb->save.rip = vmcb12->save.rip;
svm->vmcb->save.dr7 = vmcb12->save.dr7; svm->vmcb->save.dr7 = vmcb12->save.dr7 | DR7_FIXED_1;
svm->vcpu.arch.dr6 = vmcb12->save.dr6; svm->vcpu.arch.dr6 = vmcb12->save.dr6 | DR6_ACTIVE_LOW;
svm->vmcb->save.cpl = vmcb12->save.cpl; svm->vmcb->save.cpl = vmcb12->save.cpl;
} }
...@@ -699,14 +699,14 @@ int nested_svm_vmexit(struct vcpu_svm *svm) ...@@ -699,14 +699,14 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
svm->vmcb->save.ds = hsave->save.ds; svm->vmcb->save.ds = hsave->save.ds;
svm->vmcb->save.gdtr = hsave->save.gdtr; svm->vmcb->save.gdtr = hsave->save.gdtr;
svm->vmcb->save.idtr = hsave->save.idtr; svm->vmcb->save.idtr = hsave->save.idtr;
kvm_set_rflags(&svm->vcpu, hsave->save.rflags); kvm_set_rflags(&svm->vcpu, hsave->save.rflags | X86_EFLAGS_FIXED);
svm_set_efer(&svm->vcpu, hsave->save.efer); svm_set_efer(&svm->vcpu, hsave->save.efer);
svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE); svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE);
svm_set_cr4(&svm->vcpu, hsave->save.cr4); svm_set_cr4(&svm->vcpu, hsave->save.cr4);
kvm_rax_write(&svm->vcpu, hsave->save.rax); kvm_rax_write(&svm->vcpu, hsave->save.rax);
kvm_rsp_write(&svm->vcpu, hsave->save.rsp); kvm_rsp_write(&svm->vcpu, hsave->save.rsp);
kvm_rip_write(&svm->vcpu, hsave->save.rip); kvm_rip_write(&svm->vcpu, hsave->save.rip);
svm->vmcb->save.dr7 = 0; svm->vmcb->save.dr7 = DR7_FIXED_1;
svm->vmcb->save.cpl = 0; svm->vmcb->save.cpl = 0;
svm->vmcb->control.exit_int_info = 0; svm->vmcb->control.exit_int_info = 0;
......
...@@ -1788,7 +1788,7 @@ static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu) ...@@ -1788,7 +1788,7 @@ static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
get_debugreg(vcpu->arch.db[2], 2); get_debugreg(vcpu->arch.db[2], 2);
get_debugreg(vcpu->arch.db[3], 3); get_debugreg(vcpu->arch.db[3], 3);
/* /*
* We cannot reset svm->vmcb->save.dr6 to DR6_FIXED_1|DR6_RTM here, * We cannot reset svm->vmcb->save.dr6 to DR6_ACTIVE_LOW here,
* because db_interception might need it. We can do it before vmentry. * because db_interception might need it. We can do it before vmentry.
*/ */
vcpu->arch.dr6 = svm->vmcb->save.dr6; vcpu->arch.dr6 = svm->vmcb->save.dr6;
...@@ -1836,7 +1836,7 @@ static int db_interception(struct vcpu_svm *svm) ...@@ -1836,7 +1836,7 @@ static int db_interception(struct vcpu_svm *svm)
if (!(svm->vcpu.guest_debug & if (!(svm->vcpu.guest_debug &
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) && (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
!svm->nmi_singlestep) { !svm->nmi_singlestep) {
u32 payload = (svm->vmcb->save.dr6 ^ DR6_RTM) & ~DR6_FIXED_1; u32 payload = svm->vmcb->save.dr6 ^ DR6_ACTIVE_LOW;
kvm_queue_exception_p(&svm->vcpu, DB_VECTOR, payload); kvm_queue_exception_p(&svm->vcpu, DB_VECTOR, payload);
return 1; return 1;
} }
...@@ -3595,7 +3595,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -3595,7 +3595,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
if (unlikely(svm->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) if (unlikely(svm->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT))
svm_set_dr6(svm, vcpu->arch.dr6); svm_set_dr6(svm, vcpu->arch.dr6);
else else
svm_set_dr6(svm, DR6_FIXED_1 | DR6_RTM); svm_set_dr6(svm, DR6_ACTIVE_LOW);
clgi(); clgi();
kvm_load_guest_xsave_state(vcpu); kvm_load_guest_xsave_state(vcpu);
......
...@@ -411,6 +411,9 @@ static inline u64 vmx_supported_debugctl(void) ...@@ -411,6 +411,9 @@ static inline u64 vmx_supported_debugctl(void)
{ {
u64 debugctl = 0; u64 debugctl = 0;
if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT))
debugctl |= DEBUGCTLMSR_BUS_LOCK_DETECT;
if (vmx_get_perf_capabilities() & PMU_CAP_LBR_FMT) if (vmx_get_perf_capabilities() & PMU_CAP_LBR_FMT)
debugctl |= DEBUGCTLMSR_LBR_MASK; debugctl |= DEBUGCTLMSR_LBR_MASK;
......
...@@ -412,8 +412,8 @@ static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit ...@@ -412,8 +412,8 @@ static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit
if (nr == DB_VECTOR) { if (nr == DB_VECTOR) {
if (!has_payload) { if (!has_payload) {
payload = vcpu->arch.dr6; payload = vcpu->arch.dr6;
payload &= ~(DR6_FIXED_1 | DR6_BT); payload &= ~DR6_BT;
payload ^= DR6_RTM; payload ^= DR6_ACTIVE_LOW;
} }
*exit_qual = payload; *exit_qual = payload;
} else } else
......
...@@ -2076,6 +2076,9 @@ static u64 vcpu_supported_debugctl(struct kvm_vcpu *vcpu) ...@@ -2076,6 +2076,9 @@ static u64 vcpu_supported_debugctl(struct kvm_vcpu *vcpu)
if (!intel_pmu_lbr_is_enabled(vcpu)) if (!intel_pmu_lbr_is_enabled(vcpu))
debugctl &= ~DEBUGCTLMSR_LBR_MASK; debugctl &= ~DEBUGCTLMSR_LBR_MASK;
if (!guest_cpuid_has(vcpu, X86_FEATURE_BUS_LOCK_DETECT))
debugctl &= ~DEBUGCTLMSR_BUS_LOCK_DETECT;
return debugctl; return debugctl;
} }
...@@ -5149,7 +5152,7 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu) ...@@ -5149,7 +5152,7 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
kvm_queue_exception_p(vcpu, DB_VECTOR, dr6); kvm_queue_exception_p(vcpu, DB_VECTOR, dr6);
return 1; return 1;
} }
kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1 | DR6_RTM; kvm_run->debug.arch.dr6 = dr6 | DR6_ACTIVE_LOW;
kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7); kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7);
fallthrough; fallthrough;
case BP_VECTOR: case BP_VECTOR:
...@@ -5394,7 +5397,7 @@ static int handle_dr(struct kvm_vcpu *vcpu) ...@@ -5394,7 +5397,7 @@ static int handle_dr(struct kvm_vcpu *vcpu)
* guest debugging itself. * guest debugging itself.
*/ */
if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
vcpu->run->debug.arch.dr6 = DR6_BD | DR6_RTM | DR6_FIXED_1; vcpu->run->debug.arch.dr6 = DR6_BD | DR6_ACTIVE_LOW;
vcpu->run->debug.arch.dr7 = dr7; vcpu->run->debug.arch.dr7 = dr7;
vcpu->run->debug.arch.pc = kvm_get_linear_rip(vcpu); vcpu->run->debug.arch.pc = kvm_get_linear_rip(vcpu);
vcpu->run->debug.arch.exception = DB_VECTOR; vcpu->run->debug.arch.exception = DB_VECTOR;
......
...@@ -553,19 +553,24 @@ void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu) ...@@ -553,19 +553,24 @@ void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu)
*/ */
vcpu->arch.dr6 &= ~DR_TRAP_BITS; vcpu->arch.dr6 &= ~DR_TRAP_BITS;
/* /*
* DR6.RTM is set by all #DB exceptions that don't clear it. * In order to reflect the #DB exception payload in guest
* dr6, three components need to be considered: active low
* bit, FIXED_1 bits and active high bits (e.g. DR6_BD,
* DR6_BS and DR6_BT)
* DR6_ACTIVE_LOW contains the FIXED_1 and active low bits.
* In the target guest dr6:
* FIXED_1 bits should always be set.
* Active low bits should be cleared if 1-setting in payload.
* Active high bits should be set if 1-setting in payload.
*
* Note, the payload is compatible with the pending debug
* exceptions/exit qualification under VMX, that active_low bits
* are active high in payload.
* So they need to be flipped for DR6.
*/ */
vcpu->arch.dr6 |= DR6_RTM; vcpu->arch.dr6 |= DR6_ACTIVE_LOW;
vcpu->arch.dr6 |= payload; vcpu->arch.dr6 |= payload;
/* vcpu->arch.dr6 ^= payload & DR6_ACTIVE_LOW;
* Bit 16 should be set in the payload whenever the #DB
* exception should clear DR6.RTM. This makes the payload
* compatible with the pending debug exceptions under VMX.
* Though not currently documented in the SDM, this also
* makes the payload compatible with the exit qualification
* for #DB exceptions under VMX.
*/
vcpu->arch.dr6 ^= payload & DR6_RTM;
/* /*
* The #DB payload is defined as compatible with the 'pending * The #DB payload is defined as compatible with the 'pending
...@@ -1177,6 +1182,9 @@ static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu) ...@@ -1177,6 +1182,9 @@ static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu)
if (!guest_cpuid_has(vcpu, X86_FEATURE_RTM)) if (!guest_cpuid_has(vcpu, X86_FEATURE_RTM))
fixed |= DR6_RTM; fixed |= DR6_RTM;
if (!guest_cpuid_has(vcpu, X86_FEATURE_BUS_LOCK_DETECT))
fixed |= DR6_BUS_LOCK;
return fixed; return fixed;
} }
...@@ -7332,7 +7340,7 @@ static int kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu) ...@@ -7332,7 +7340,7 @@ static int kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu)
struct kvm_run *kvm_run = vcpu->run; struct kvm_run *kvm_run = vcpu->run;
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 | DR6_RTM; kvm_run->debug.arch.dr6 = DR6_BS | DR6_ACTIVE_LOW;
kvm_run->debug.arch.pc = kvm_get_linear_rip(vcpu); kvm_run->debug.arch.pc = kvm_get_linear_rip(vcpu);
kvm_run->debug.arch.exception = DB_VECTOR; kvm_run->debug.arch.exception = DB_VECTOR;
kvm_run->exit_reason = KVM_EXIT_DEBUG; kvm_run->exit_reason = KVM_EXIT_DEBUG;
...@@ -7376,7 +7384,7 @@ static bool kvm_vcpu_check_code_breakpoint(struct kvm_vcpu *vcpu, int *r) ...@@ -7376,7 +7384,7 @@ static bool kvm_vcpu_check_code_breakpoint(struct kvm_vcpu *vcpu, int *r)
vcpu->arch.eff_db); vcpu->arch.eff_db);
if (dr6 != 0) { if (dr6 != 0) {
kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1 | DR6_RTM; kvm_run->debug.arch.dr6 = dr6 | DR6_ACTIVE_LOW;
kvm_run->debug.arch.pc = eip; kvm_run->debug.arch.pc = eip;
kvm_run->debug.arch.exception = DB_VECTOR; kvm_run->debug.arch.exception = DB_VECTOR;
kvm_run->exit_reason = KVM_EXIT_DEBUG; kvm_run->exit_reason = KVM_EXIT_DEBUG;
...@@ -10207,7 +10215,7 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) ...@@ -10207,7 +10215,7 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db)); memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
kvm_update_dr0123(vcpu); kvm_update_dr0123(vcpu);
vcpu->arch.dr6 = DR6_INIT; vcpu->arch.dr6 = DR6_ACTIVE_LOW;
vcpu->arch.dr7 = DR7_FIXED_1; vcpu->arch.dr7 = DR7_FIXED_1;
kvm_update_dr7(vcpu); kvm_update_dr7(vcpu);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册