diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index a391a61b1033c38ac6dbb731d802788ea36a5f6f..756d0d614993abb7ec1d85f5d42675dcae795dad 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -54,12 +54,19 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) return 0; } +static bool core_reg_offset_is_vreg(u64 off) +{ + return off >= KVM_REG_ARM_CORE_REG(fp_regs.vregs) && + off < KVM_REG_ARM_CORE_REG(fp_regs.fpsr); +} + static u64 core_reg_offset_from_id(u64 id) { return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE); } -static int validate_core_offset(const struct kvm_one_reg *reg) +static int validate_core_offset(const struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg) { u64 off = core_reg_offset_from_id(reg->id); int size; @@ -91,11 +98,19 @@ static int validate_core_offset(const struct kvm_one_reg *reg) return -EINVAL; } - if (KVM_REG_SIZE(reg->id) == size && - IS_ALIGNED(off, size / sizeof(__u32))) - return 0; + if (KVM_REG_SIZE(reg->id) != size || + !IS_ALIGNED(off, size / sizeof(__u32))) + return -EINVAL; - return -EINVAL; + /* + * The KVM_REG_ARM64_SVE regs must be used instead of + * KVM_REG_ARM_CORE for accessing the FPSIMD V-registers on + * SVE-enabled vcpus: + */ + if (vcpu_has_sve(vcpu) && core_reg_offset_is_vreg(off)) + return -EINVAL; + + return 0; } static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) @@ -117,7 +132,7 @@ static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs) return -ENOENT; - if (validate_core_offset(reg)) + if (validate_core_offset(vcpu, reg)) return -EINVAL; if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id))) @@ -142,7 +157,7 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs) return -ENOENT; - if (validate_core_offset(reg)) + if (validate_core_offset(vcpu, reg)) return -EINVAL; if (KVM_REG_SIZE(reg->id) > sizeof(tmp)) @@ -195,13 +210,22 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) return -EINVAL; } -static int kvm_arm_copy_core_reg_indices(u64 __user *uindices) +static int copy_core_reg_indices(const struct kvm_vcpu *vcpu, + u64 __user *uindices) { unsigned int i; int n = 0; const u64 core_reg = KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE; for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) { + /* + * The KVM_REG_ARM64_SVE regs must be used instead of + * KVM_REG_ARM_CORE for accessing the FPSIMD V-registers on + * SVE-enabled vcpus: + */ + if (vcpu_has_sve(vcpu) && core_reg_offset_is_vreg(i)) + continue; + if (uindices) { if (put_user(core_reg | i, uindices)) return -EFAULT; @@ -214,9 +238,9 @@ static int kvm_arm_copy_core_reg_indices(u64 __user *uindices) return n; } -static unsigned long num_core_regs(void) +static unsigned long num_core_regs(const struct kvm_vcpu *vcpu) { - return kvm_arm_copy_core_reg_indices(NULL); + return copy_core_reg_indices(vcpu, NULL); } /** @@ -281,7 +305,7 @@ unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu) { unsigned long res = 0; - res += num_core_regs(); + res += num_core_regs(vcpu); res += kvm_arm_num_sys_reg_descs(vcpu); res += kvm_arm_get_fw_num_regs(vcpu); res += NUM_TIMER_REGS; @@ -298,7 +322,7 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) { int ret; - ret = kvm_arm_copy_core_reg_indices(uindices); + ret = copy_core_reg_indices(vcpu, uindices); if (ret) return ret; uindices += ret;