提交 8c86dfe3 编写于 作者: D Dave Martin 提交者: Marc Zyngier

KVM: arm64: Reject ioctl access to FPSIMD V-regs on SVE vcpus

In order to avoid the pointless complexity of maintaining two ioctl
register access views of the same data, this patch blocks ioctl
access to the FPSIMD V-registers on vcpus that support SVE.

This will make it more straightforward to add SVE register access
support.

Since SVE is an opt-in feature for userspace, this will not affect
existing users.
Signed-off-by: NDave Martin <Dave.Martin@arm.com>
Reviewed-by: NJulien Thierry <julien.thierry@arm.com>
Tested-by: Nzhang.lei <zhang.lei@jp.fujitsu.com>
Signed-off-by: NMarc Zyngier <marc.zyngier@arm.com>
上级 be25bbb3
...@@ -54,12 +54,19 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) ...@@ -54,12 +54,19 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
return 0; return 0;
} }
static bool core_reg_offset_is_vreg(u64 off)
{
return off >= KVM_REG_ARM_CORE_REG(fp_regs.vregs) &&
off < KVM_REG_ARM_CORE_REG(fp_regs.fpsr);
}
static u64 core_reg_offset_from_id(u64 id) static u64 core_reg_offset_from_id(u64 id)
{ {
return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE); return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
} }
static int validate_core_offset(const struct kvm_one_reg *reg) static int validate_core_offset(const struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{ {
u64 off = core_reg_offset_from_id(reg->id); u64 off = core_reg_offset_from_id(reg->id);
int size; int size;
...@@ -91,11 +98,19 @@ static int validate_core_offset(const struct kvm_one_reg *reg) ...@@ -91,11 +98,19 @@ static int validate_core_offset(const struct kvm_one_reg *reg)
return -EINVAL; return -EINVAL;
} }
if (KVM_REG_SIZE(reg->id) == size && if (KVM_REG_SIZE(reg->id) != size ||
IS_ALIGNED(off, size / sizeof(__u32))) !IS_ALIGNED(off, size / sizeof(__u32)))
return 0; return -EINVAL;
/*
* The KVM_REG_ARM64_SVE regs must be used instead of
* KVM_REG_ARM_CORE for accessing the FPSIMD V-registers on
* SVE-enabled vcpus:
*/
if (vcpu_has_sve(vcpu) && core_reg_offset_is_vreg(off))
return -EINVAL; return -EINVAL;
return 0;
} }
static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
...@@ -117,7 +132,7 @@ static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) ...@@ -117,7 +132,7 @@ static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
(off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs) (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
return -ENOENT; return -ENOENT;
if (validate_core_offset(reg)) if (validate_core_offset(vcpu, reg))
return -EINVAL; return -EINVAL;
if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id))) if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id)))
...@@ -142,7 +157,7 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) ...@@ -142,7 +157,7 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
(off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs) (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
return -ENOENT; return -ENOENT;
if (validate_core_offset(reg)) if (validate_core_offset(vcpu, reg))
return -EINVAL; return -EINVAL;
if (KVM_REG_SIZE(reg->id) > sizeof(tmp)) if (KVM_REG_SIZE(reg->id) > sizeof(tmp))
...@@ -195,13 +210,22 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) ...@@ -195,13 +210,22 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
return -EINVAL; return -EINVAL;
} }
static int kvm_arm_copy_core_reg_indices(u64 __user *uindices) static int copy_core_reg_indices(const struct kvm_vcpu *vcpu,
u64 __user *uindices)
{ {
unsigned int i; unsigned int i;
int n = 0; int n = 0;
const u64 core_reg = KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE; const u64 core_reg = KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE;
for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) { for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) {
/*
* The KVM_REG_ARM64_SVE regs must be used instead of
* KVM_REG_ARM_CORE for accessing the FPSIMD V-registers on
* SVE-enabled vcpus:
*/
if (vcpu_has_sve(vcpu) && core_reg_offset_is_vreg(i))
continue;
if (uindices) { if (uindices) {
if (put_user(core_reg | i, uindices)) if (put_user(core_reg | i, uindices))
return -EFAULT; return -EFAULT;
...@@ -214,9 +238,9 @@ static int kvm_arm_copy_core_reg_indices(u64 __user *uindices) ...@@ -214,9 +238,9 @@ static int kvm_arm_copy_core_reg_indices(u64 __user *uindices)
return n; return n;
} }
static unsigned long num_core_regs(void) static unsigned long num_core_regs(const struct kvm_vcpu *vcpu)
{ {
return kvm_arm_copy_core_reg_indices(NULL); return copy_core_reg_indices(vcpu, NULL);
} }
/** /**
...@@ -281,7 +305,7 @@ unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu) ...@@ -281,7 +305,7 @@ unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
{ {
unsigned long res = 0; unsigned long res = 0;
res += num_core_regs(); res += num_core_regs(vcpu);
res += kvm_arm_num_sys_reg_descs(vcpu); res += kvm_arm_num_sys_reg_descs(vcpu);
res += kvm_arm_get_fw_num_regs(vcpu); res += kvm_arm_get_fw_num_regs(vcpu);
res += NUM_TIMER_REGS; res += NUM_TIMER_REGS;
...@@ -298,7 +322,7 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) ...@@ -298,7 +322,7 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
{ {
int ret; int ret;
ret = kvm_arm_copy_core_reg_indices(uindices); ret = copy_core_reg_indices(vcpu, uindices);
if (ret) if (ret)
return ret; return ret;
uindices += ret; uindices += ret;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册