提交 348c1681 编写于 作者: M Marc Zyngier 提交者: Wang ShaoBo

KVM: arm64: Rework SVE host-save/guest-restore

mainline inclusion
from mainline-v5.13-rc1~76^2
commit 52029198
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I5ITJT
CVE: NA

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id=52029198c1cec1e21513d74f87363a0408f28650

-------------------------------------------------

In order to keep the code readable, move the host-save/guest-restore
sequences in their own functions, with the following changes:
- the hypervisor ZCR is now set from C code
- ZCR_EL2 is always used as the EL2 accessor

This results in some minor assembler macro rework.
No functional change intended.
Acked-by: NWill Deacon <will@kernel.org>
Signed-off-by: NMarc Zyngier <maz@kernel.org>
Signed-off-by: NWang ShaoBo <bobo.shaobowang@huawei.com>
上级 87114dd6
...@@ -234,8 +234,7 @@ ...@@ -234,8 +234,7 @@
str w\nxtmp, [\xpfpsr, #4] str w\nxtmp, [\xpfpsr, #4]
.endm .endm
.macro sve_load nxbase, xpfpsr, xvqminus1, nxtmp, xtmp2 .macro __sve_load nxbase, xpfpsr, nxtmp
sve_load_vq \xvqminus1, x\nxtmp, \xtmp2
_for n, 0, 31, _sve_ldr_v \n, \nxbase, \n - 34 _for n, 0, 31, _sve_ldr_v \n, \nxbase, \n - 34
_sve_ldr_p 0, \nxbase _sve_ldr_p 0, \nxbase
_sve_wrffr 0 _sve_wrffr 0
...@@ -246,3 +245,8 @@ ...@@ -246,3 +245,8 @@
ldr w\nxtmp, [\xpfpsr, #4] ldr w\nxtmp, [\xpfpsr, #4]
msr fpcr, x\nxtmp msr fpcr, x\nxtmp
.endm .endm
.macro sve_load nxbase, xpfpsr, xvqminus1, nxtmp, xtmp2
sve_load_vq \xvqminus1, x\nxtmp, \xtmp2
__sve_load \nxbase, \xpfpsr, \nxtmp
.endm
...@@ -90,7 +90,7 @@ void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu); ...@@ -90,7 +90,7 @@ void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu);
void __fpsimd_save_state(struct user_fpsimd_state *fp_regs); void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs); void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
void __sve_save_state(void *sve_pffr, u32 *fpsr); void __sve_save_state(void *sve_pffr, u32 *fpsr);
void __sve_restore_state(void *sve_pffr, u32 *fpsr, unsigned int vqminus1); void __sve_restore_state(void *sve_pffr, u32 *fpsr);
#ifndef __KVM_NVHE_HYPERVISOR__ #ifndef __KVM_NVHE_HYPERVISOR__
void activate_traps_vhe_load(struct kvm_vcpu *vcpu); void activate_traps_vhe_load(struct kvm_vcpu *vcpu);
......
...@@ -21,7 +21,7 @@ SYM_FUNC_START(__fpsimd_restore_state) ...@@ -21,7 +21,7 @@ SYM_FUNC_START(__fpsimd_restore_state)
SYM_FUNC_END(__fpsimd_restore_state) SYM_FUNC_END(__fpsimd_restore_state)
SYM_FUNC_START(__sve_restore_state) SYM_FUNC_START(__sve_restore_state)
sve_load 0, x1, x2, 3, x4 __sve_load 0, x1, 2
ret ret
SYM_FUNC_END(__sve_restore_state) SYM_FUNC_END(__sve_restore_state)
......
...@@ -194,6 +194,24 @@ static inline bool __populate_fault_info(struct kvm_vcpu *vcpu) ...@@ -194,6 +194,24 @@ static inline bool __populate_fault_info(struct kvm_vcpu *vcpu)
return true; return true;
} }
static inline void __hyp_sve_save_host(struct kvm_vcpu *vcpu)
{
struct thread_struct *thread;
thread = container_of(vcpu->arch.host_fpsimd_state, struct thread_struct,
uw.fpsimd_state);
__sve_save_state(sve_pffr(thread), &vcpu->arch.host_fpsimd_state->fpsr);
}
static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu)
{
sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, SYS_ZCR_EL2);
__sve_restore_state(vcpu_sve_pffr(vcpu),
&vcpu->arch.ctxt.fp_regs.fpsr);
write_sysreg_el1(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR);
}
/* Check for an FPSIMD/SVE trap and handle as appropriate */ /* Check for an FPSIMD/SVE trap and handle as appropriate */
static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu) static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
{ {
...@@ -250,28 +268,18 @@ static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu) ...@@ -250,28 +268,18 @@ static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
* In the SVE case, VHE is assumed: it is enforced by * In the SVE case, VHE is assumed: it is enforced by
* Kconfig and kvm_arch_init(). * Kconfig and kvm_arch_init().
*/ */
if (sve_host) { if (sve_host)
struct thread_struct *thread = container_of( __hyp_sve_save_host(vcpu);
vcpu->arch.host_fpsimd_state, else
struct thread_struct, uw.fpsimd_state);
__sve_save_state(sve_pffr(thread),
&vcpu->arch.host_fpsimd_state->fpsr);
} else {
__fpsimd_save_state(vcpu->arch.host_fpsimd_state); __fpsimd_save_state(vcpu->arch.host_fpsimd_state);
}
vcpu->arch.flags &= ~KVM_ARM64_FP_HOST; vcpu->arch.flags &= ~KVM_ARM64_FP_HOST;
} }
if (sve_guest) { if (sve_guest)
__sve_restore_state(vcpu_sve_pffr(vcpu), __hyp_sve_restore_guest(vcpu);
&vcpu->arch.ctxt.fp_regs.fpsr, else
vcpu_sve_max_vq(vcpu) - 1);
write_sysreg_el1(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR);
} else {
__fpsimd_restore_state(&vcpu->arch.ctxt.fp_regs); __fpsimd_restore_state(&vcpu->arch.ctxt.fp_regs);
}
/* Skip restoring fpexc32 for AArch64 guests */ /* Skip restoring fpexc32 for AArch64 guests */
if (!(read_sysreg(hcr_el2) & HCR_RW)) if (!(read_sysreg(hcr_el2) & HCR_RW))
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册