提交 42c99fb8 编写于 作者: M Marc Zyngier 提交者: Wang ShaoBo

KVM: arm64: Provide KVM's own save/restore SVE primitives

mainline inclusion
from mainline-v5.13-rc1~76^2
commit 297b8603
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I5ITJT
CVE: NA

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id=297b8603e356ad82c1345cc75fad4d89310a3c34

-------------------------------------------------

as we are about to change the way KVM deals with SVE, provide
KVM with its own save/restore SVE primitives.

No functional change intended.
Acked-by: NWill Deacon <will@kernel.org>
Signed-off-by: NMarc Zyngier <maz@kernel.org>
Signed-off-by: NWang ShaoBo <bobo.shaobowang@huawei.com>
上级 b4f1cf06
...@@ -6,6 +6,8 @@ ...@@ -6,6 +6,8 @@
* Author: Catalin Marinas <catalin.marinas@arm.com> * Author: Catalin Marinas <catalin.marinas@arm.com>
*/ */
#include <asm/assembler.h>
.macro fpsimd_save state, tmpnr .macro fpsimd_save state, tmpnr
stp q0, q1, [\state, #16 * 0] stp q0, q1, [\state, #16 * 0]
stp q2, q3, [\state, #16 * 2] stp q2, q3, [\state, #16 * 2]
......
...@@ -89,6 +89,8 @@ void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu); ...@@ -89,6 +89,8 @@ void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu);
void __fpsimd_save_state(struct user_fpsimd_state *fp_regs); void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs); void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
void __sve_save_state(void *sve_pffr, u32 *fpsr);
void __sve_restore_state(void *sve_pffr, u32 *fpsr, unsigned int vqminus1);
#ifndef __KVM_NVHE_HYPERVISOR__ #ifndef __KVM_NVHE_HYPERVISOR__
void activate_traps_vhe_load(struct kvm_vcpu *vcpu); void activate_traps_vhe_load(struct kvm_vcpu *vcpu);
......
...@@ -19,3 +19,13 @@ SYM_FUNC_START(__fpsimd_restore_state) ...@@ -19,3 +19,13 @@ SYM_FUNC_START(__fpsimd_restore_state)
fpsimd_restore x0, 1 fpsimd_restore x0, 1
ret ret
SYM_FUNC_END(__fpsimd_restore_state) SYM_FUNC_END(__fpsimd_restore_state)
SYM_FUNC_START(__sve_restore_state)
sve_load 0, x1, x2, 3, x4
ret
SYM_FUNC_END(__sve_restore_state)
SYM_FUNC_START(__sve_save_state)
sve_save 0, x1, 2
ret
SYM_FUNC_END(__sve_save_state)
...@@ -255,7 +255,7 @@ static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu) ...@@ -255,7 +255,7 @@ static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
vcpu->arch.host_fpsimd_state, vcpu->arch.host_fpsimd_state,
struct thread_struct, uw.fpsimd_state); struct thread_struct, uw.fpsimd_state);
sve_save_state(sve_pffr(thread), __sve_save_state(sve_pffr(thread),
&vcpu->arch.host_fpsimd_state->fpsr); &vcpu->arch.host_fpsimd_state->fpsr);
} else { } else {
__fpsimd_save_state(vcpu->arch.host_fpsimd_state); __fpsimd_save_state(vcpu->arch.host_fpsimd_state);
...@@ -265,7 +265,7 @@ static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu) ...@@ -265,7 +265,7 @@ static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
} }
if (sve_guest) { if (sve_guest) {
sve_load_state(vcpu_sve_pffr(vcpu), __sve_restore_state(vcpu_sve_pffr(vcpu),
&vcpu->arch.ctxt.fp_regs.fpsr, &vcpu->arch.ctxt.fp_regs.fpsr,
sve_vq_from_vl(vcpu->arch.sve_max_vl) - 1); sve_vq_from_vl(vcpu->arch.sve_max_vl) - 1);
write_sysreg_s(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR_EL12); write_sysreg_s(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR_EL12);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册