diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 5f28dfa14cee2cb4c51bfd01b79652b3bc146d0b..6d36147951971b1a3fc5360995fce80503162f7a 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -64,6 +64,11 @@ static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr) vcpu->arch.hcr_el2 = hcr; } +static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr) +{ + vcpu->arch.vsesr_el2 = vsesr; +} + static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu) { return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc; diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index dcdd08edf5a56f45a9be0af9d613c3b924114154..3014b39b8fe204298a2b2af2cb5c86526ebaa0d1 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -280,6 +280,9 @@ struct kvm_vcpu_arch { /* Detect first run of a vcpu */ bool has_run_once; + + /* Virtual SError ESR to restore when HCR_EL2.VSE is set */ + u64 vsesr_el2; }; #define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index b5d543fc677d007f6d6e556650a9aa459c4d83c0..52cfdc216bcf9da381a78eb35d0233c431924ad2 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -355,6 +355,7 @@ #define SYS_DACR32_EL2 sys_reg(3, 4, 3, 0, 0) #define SYS_IFSR32_EL2 sys_reg(3, 4, 5, 0, 1) +#define SYS_VSESR_EL2 sys_reg(3, 4, 5, 2, 3) #define SYS_FPEXC32_EL2 sys_reg(3, 4, 5, 3, 0) #define __SYS__AP0Rx_EL2(x) sys_reg(3, 4, 12, 8, x) diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index 324f4202cdd50a35573d3a9aa12ac28df8a97327..b425b8aab45b3aab9077045d843cf875141afb32 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -94,6 +94,9 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu) write_sysreg(val, hcr_el2); + if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN) && (val & HCR_VSE)) + write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2); + /* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */ write_sysreg(1 << 15, hstr_el2); /* diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c index 8ecbcb40e31799cc1b965e51bce1538ba37461a8..60666a05694490dfc3fc070019cdf64c1794d4d8 100644 --- a/arch/arm64/kvm/inject_fault.c +++ b/arch/arm64/kvm/inject_fault.c @@ -164,14 +164,25 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu) inject_undef64(vcpu); } +static void pend_guest_serror(struct kvm_vcpu *vcpu, u64 esr) +{ + vcpu_set_vsesr(vcpu, esr); + vcpu_set_hcr(vcpu, vcpu_get_hcr(vcpu) | HCR_VSE); +} + /** * kvm_inject_vabt - inject an async abort / SError into the guest * @vcpu: The VCPU to receive the exception * * It is assumed that this code is called from the VCPU thread and that the * VCPU therefore is not currently executing guest code. + * + * Systems with the RAS Extensions specify an imp-def ESR (ISV/IDS = 1) with + * the remaining ISS all-zeros so that this error is not interpreted as an + * uncategorized RAS error. Without the RAS Extensions we can't specify an ESR + * value, so the CPU generates an imp-def value. */ void kvm_inject_vabt(struct kvm_vcpu *vcpu) { - vcpu_set_hcr(vcpu, vcpu_get_hcr(vcpu) | HCR_VSE); + pend_guest_serror(vcpu, ESR_ELx_ISV); }