提交 468f3477 编写于 作者: M Marc Zyngier

KVM: arm64: Introduce vcpu_sve_vq() helper

The KVM code contains a number of "sve_vq_from_vl(vcpu->arch.sve_max_vl)"
instances, and we are about to add more.

Introduce vcpu_sve_vq() as a shorthand for this expression.
Acked-by: NWill Deacon <will@kernel.org>
Signed-off-by: NMarc Zyngier <maz@kernel.org>
上级 985d3a1b
...@@ -375,6 +375,8 @@ struct kvm_vcpu_arch { ...@@ -375,6 +375,8 @@ struct kvm_vcpu_arch {
#define vcpu_sve_pffr(vcpu) (kern_hyp_va((vcpu)->arch.sve_state) + \ #define vcpu_sve_pffr(vcpu) (kern_hyp_va((vcpu)->arch.sve_state) + \
sve_ffr_offset((vcpu)->arch.sve_max_vl)) sve_ffr_offset((vcpu)->arch.sve_max_vl))
#define vcpu_sve_max_vq(vcpu) sve_vq_from_vl((vcpu)->arch.sve_max_vl)
#define vcpu_sve_state_size(vcpu) ({ \ #define vcpu_sve_state_size(vcpu) ({ \
size_t __size_ret; \ size_t __size_ret; \
unsigned int __vcpu_vq; \ unsigned int __vcpu_vq; \
...@@ -382,7 +384,7 @@ struct kvm_vcpu_arch { ...@@ -382,7 +384,7 @@ struct kvm_vcpu_arch {
if (WARN_ON(!sve_vl_valid((vcpu)->arch.sve_max_vl))) { \ if (WARN_ON(!sve_vl_valid((vcpu)->arch.sve_max_vl))) { \
__size_ret = 0; \ __size_ret = 0; \
} else { \ } else { \
__vcpu_vq = sve_vq_from_vl((vcpu)->arch.sve_max_vl); \ __vcpu_vq = vcpu_sve_max_vq(vcpu); \
__size_ret = SVE_SIG_REGS_SIZE(__vcpu_vq); \ __size_ret = SVE_SIG_REGS_SIZE(__vcpu_vq); \
} \ } \
\ \
......
...@@ -299,7 +299,7 @@ static int get_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) ...@@ -299,7 +299,7 @@ static int get_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
memset(vqs, 0, sizeof(vqs)); memset(vqs, 0, sizeof(vqs));
max_vq = sve_vq_from_vl(vcpu->arch.sve_max_vl); max_vq = vcpu_sve_max_vq(vcpu);
for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq) for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq)
if (sve_vq_available(vq)) if (sve_vq_available(vq))
vqs[vq_word(vq)] |= vq_mask(vq); vqs[vq_word(vq)] |= vq_mask(vq);
...@@ -427,7 +427,7 @@ static int sve_reg_to_region(struct sve_state_reg_region *region, ...@@ -427,7 +427,7 @@ static int sve_reg_to_region(struct sve_state_reg_region *region,
if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0) if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
return -ENOENT; return -ENOENT;
vq = sve_vq_from_vl(vcpu->arch.sve_max_vl); vq = vcpu_sve_max_vq(vcpu);
reqoffset = SVE_SIG_ZREG_OFFSET(vq, reg_num) - reqoffset = SVE_SIG_ZREG_OFFSET(vq, reg_num) -
SVE_SIG_REGS_OFFSET; SVE_SIG_REGS_OFFSET;
...@@ -437,7 +437,7 @@ static int sve_reg_to_region(struct sve_state_reg_region *region, ...@@ -437,7 +437,7 @@ static int sve_reg_to_region(struct sve_state_reg_region *region,
if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0) if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
return -ENOENT; return -ENOENT;
vq = sve_vq_from_vl(vcpu->arch.sve_max_vl); vq = vcpu_sve_max_vq(vcpu);
reqoffset = SVE_SIG_PREG_OFFSET(vq, reg_num) - reqoffset = SVE_SIG_PREG_OFFSET(vq, reg_num) -
SVE_SIG_REGS_OFFSET; SVE_SIG_REGS_OFFSET;
......
...@@ -268,7 +268,7 @@ static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu) ...@@ -268,7 +268,7 @@ static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
if (sve_guest) { if (sve_guest) {
__sve_restore_state(vcpu_sve_pffr(vcpu), __sve_restore_state(vcpu_sve_pffr(vcpu),
&vcpu->arch.ctxt.fp_regs.fpsr, &vcpu->arch.ctxt.fp_regs.fpsr,
sve_vq_from_vl(vcpu->arch.sve_max_vl) - 1); vcpu_sve_vq(vcpu) - 1);
write_sysreg_el1(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR); write_sysreg_el1(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR);
} else { } else {
__fpsimd_restore_state(&vcpu->arch.ctxt.fp_regs); __fpsimd_restore_state(&vcpu->arch.ctxt.fp_regs);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册