提交 539aee0e 编写于 作者: J James Morse 提交者: Marc Zyngier

KVM: arm64: Share the parts of get/set events useful to 32bit

The get/set events helpers to do some work to check reserved
and padding fields are zero. This is useful on 32bit too.

Move this code into virt/kvm/arm/arm.c, and give the arch
code some underscores.

This is temporarily hidden behind __KVM_HAVE_VCPU_EVENTS until
32bit is wired up.
Signed-off-by: NJames Morse <james.morse@arm.com>
Reviewed-by: NDongjiu Geng <gengdongjiu@huawei.com>
Signed-off-by: NMarc Zyngier <marc.zyngier@arm.com>
上级 be26b3a7
...@@ -350,11 +350,11 @@ unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); ...@@ -350,11 +350,11 @@ unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
int kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu, int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events); struct kvm_vcpu_events *events);
int kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events); struct kvm_vcpu_events *events);
#define KVM_ARCH_WANT_MMU_NOTIFIER #define KVM_ARCH_WANT_MMU_NOTIFIER
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
......
...@@ -289,11 +289,9 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, ...@@ -289,11 +289,9 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
return -EINVAL; return -EINVAL;
} }
int kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu, int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events) struct kvm_vcpu_events *events)
{ {
memset(events, 0, sizeof(*events));
events->exception.serror_pending = !!(vcpu->arch.hcr_el2 & HCR_VSE); events->exception.serror_pending = !!(vcpu->arch.hcr_el2 & HCR_VSE);
events->exception.serror_has_esr = cpus_have_const_cap(ARM64_HAS_RAS_EXTN); events->exception.serror_has_esr = cpus_have_const_cap(ARM64_HAS_RAS_EXTN);
...@@ -303,23 +301,12 @@ int kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu, ...@@ -303,23 +301,12 @@ int kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
return 0; return 0;
} }
int kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events) struct kvm_vcpu_events *events)
{ {
int i;
bool serror_pending = events->exception.serror_pending; bool serror_pending = events->exception.serror_pending;
bool has_esr = events->exception.serror_has_esr; bool has_esr = events->exception.serror_has_esr;
/* check whether the reserved field is zero */
for (i = 0; i < ARRAY_SIZE(events->reserved); i++)
if (events->reserved[i])
return -EINVAL;
/* check whether the pad field is zero */
for (i = 0; i < ARRAY_SIZE(events->exception.pad); i++)
if (events->exception.pad[i])
return -EINVAL;
if (serror_pending && has_esr) { if (serror_pending && has_esr) {
if (!cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) if (!cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
return -EINVAL; return -EINVAL;
......
...@@ -1050,6 +1050,34 @@ static int kvm_arm_vcpu_has_attr(struct kvm_vcpu *vcpu, ...@@ -1050,6 +1050,34 @@ static int kvm_arm_vcpu_has_attr(struct kvm_vcpu *vcpu,
return ret; return ret;
} }
#ifdef __KVM_HAVE_VCPU_EVENTS /* temporary: until 32bit is wired up */
static int kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events)
{
memset(events, 0, sizeof(*events));
return __kvm_arm_vcpu_get_events(vcpu, events);
}
static int kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events)
{
int i;
/* check whether the reserved field is zero */
for (i = 0; i < ARRAY_SIZE(events->reserved); i++)
if (events->reserved[i])
return -EINVAL;
/* check whether the pad field is zero */
for (i = 0; i < ARRAY_SIZE(events->exception.pad); i++)
if (events->exception.pad[i])
return -EINVAL;
return __kvm_arm_vcpu_set_events(vcpu, events);
}
#endif /* __KVM_HAVE_VCPU_EVENTS */
long kvm_arch_vcpu_ioctl(struct file *filp, long kvm_arch_vcpu_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg) unsigned int ioctl, unsigned long arg)
{ {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册