From 5b727b32d4542491790035d4e77a1fa0f57e05f8 Mon Sep 17 00:00:00 2001 From: Zengruan Ye Date: Thu, 22 Apr 2021 20:15:52 +0800 Subject: [PATCH] KVM: arm64: Support pvsched preempted via shared structure euleros inclusion category: feature bugzilla: https://bugzilla.openeuler.org/show_bug.cgi?id=35 CVE: NA -------------------------------- Implement the service call for configuring a shared structure between a vCPU and the hypervisor in which the hypervisor can tell the vCPU that is running or not. Signed-off-by: Zengruan Ye Reviewed-by: zhanghailiang Signed-off-by: Yang Yingliang Signed-off-by: Chaochao Xing Reviewed-by: Zengruan Ye Reviewed-by: Xiangyou Xie Signed-off-by: Cheng Jian --- arch/arm/include/asm/kvm_host.h | 14 ++++++++++++++ arch/arm64/include/asm/kvm_host.h | 16 ++++++++++++++++ include/linux/kvm_types.h | 2 ++ virt/kvm/arm/arm.c | 8 ++++++++ virt/kvm/arm/hypercalls.c | 12 ++++++++++++ virt/kvm/arm/pvsched.c | 32 +++++++++++++++++++++++++++++++ 6 files changed, 84 insertions(+) diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 87fce10dfc4c..0066de61f4c6 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -303,6 +303,20 @@ static inline int kvm_arch_dev_ioctl_check_extension(struct kvm *kvm, long ext) int kvm_perf_init(void); int kvm_perf_teardown(void); +static inline void kvm_arm_pvsched_vcpu_init(struct kvm_vcpu_arch *vcpu_arch) +{ +} + +static inline bool kvm_arm_is_pvsched_enabled(struct kvm_vcpu_arch *vcpu_arch) +{ + return false; +} + +static inline void kvm_update_pvsched_preempted(struct kvm_vcpu *vcpu, + u32 preempted) +{ +} + static inline int kvm_hypercall_pvsched_features(struct kvm_vcpu *vcpu) { return SMCCC_RET_NOT_SUPPORTED; diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 2ab9cb5b5742..556351524748 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -325,6 +325,11 @@ struct kvm_vcpu_arch { * see kvm_vcpu_load_sysregs and kvm_vcpu_put_sysregs. */ bool sysregs_loaded_on_cpu; + /* Guest PV sched state */ + struct { + gpa_t base; + } pvsched; + struct id_registers idregs; }; @@ -430,6 +435,17 @@ void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run, int kvm_perf_init(void); int kvm_perf_teardown(void); +static inline void kvm_arm_pvsched_vcpu_init(struct kvm_vcpu_arch *vcpu_arch) +{ + vcpu_arch->pvsched.base = GPA_INVALID; +} + +static inline bool kvm_arm_is_pvsched_enabled(struct kvm_vcpu_arch *vcpu_arch) +{ + return (vcpu_arch->pvsched.base != GPA_INVALID); +} + +void kvm_update_pvsched_preempted(struct kvm_vcpu *vcpu, u32 preempted); int kvm_hypercall_pvsched_features(struct kvm_vcpu *vcpu); void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome); diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h index 8bf259dae9f6..e66a9c19f71c 100644 --- a/include/linux/kvm_types.h +++ b/include/linux/kvm_types.h @@ -49,6 +49,8 @@ typedef unsigned long gva_t; typedef u64 gpa_t; typedef u64 gfn_t; +#define GPA_INVALID (~(gpa_t)0) + typedef unsigned long hva_t; typedef u64 hpa_t; typedef u64 hfn_t; diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 9b8fe62e67a1..cdfe28311f41 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -396,6 +396,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) kvm_arm_reset_debug_ptr(vcpu); + kvm_arm_pvsched_vcpu_init(&vcpu->arch); + return kvm_vgic_vcpu_init(vcpu); } @@ -446,6 +448,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) vcpu_set_wfe_traps(vcpu); update_steal_time(vcpu); + + if (kvm_arm_is_pvsched_enabled(&vcpu->arch)) + kvm_update_pvsched_preempted(vcpu, 0); } void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) @@ -459,6 +464,9 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) vcpu->cpu = -1; kvm_arm_set_running_vcpu(NULL); + + if (kvm_arm_is_pvsched_enabled(&vcpu->arch)) + kvm_update_pvsched_preempted(vcpu, 1); } static void vcpu_power_off(struct kvm_vcpu *vcpu) diff --git a/virt/kvm/arm/hypercalls.c b/virt/kvm/arm/hypercalls.c index 214b8bb0d3f9..20cc3c94260d 100644 --- a/virt/kvm/arm/hypercalls.c +++ b/virt/kvm/arm/hypercalls.c @@ -14,6 +14,7 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu) u32 func_id = smccc_get_function(vcpu); u32 val = SMCCC_RET_NOT_SUPPORTED; u32 feature; + gpa_t gpa; switch (func_id) { case ARM_SMCCC_VERSION_FUNC_ID: @@ -48,6 +49,17 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu) case ARM_SMCCC_HV_PV_SCHED_FEATURES: val = kvm_hypercall_pvsched_features(vcpu); break; + case ARM_SMCCC_HV_PV_SCHED_IPA_INIT: + gpa = smccc_get_arg1(vcpu); + if (gpa != GPA_INVALID) { + vcpu->arch.pvsched.base = gpa; + val = SMCCC_RET_SUCCESS; + } + break; + case ARM_SMCCC_HV_PV_SCHED_IPA_RELEASE: + vcpu->arch.pvsched.base = GPA_INVALID; + val = SMCCC_RET_SUCCESS; + break; default: return kvm_psci_call(vcpu); } diff --git a/virt/kvm/arm/pvsched.c b/virt/kvm/arm/pvsched.c index 40b56e01fc5d..8a1302a52464 100644 --- a/virt/kvm/arm/pvsched.c +++ b/virt/kvm/arm/pvsched.c @@ -5,9 +5,39 @@ */ #include +#include + +#include #include +void kvm_update_pvsched_preempted(struct kvm_vcpu *vcpu, u32 preempted) +{ + __le32 preempted_le; + u64 offset; + int idx; + u64 base = vcpu->arch.pvsched.base; + struct kvm *kvm = vcpu->kvm; + + if (base == GPA_INVALID) + return; + + preempted_le = cpu_to_le32(preempted); + + /* + * This function is called from atomic context, so we need to + * disable page faults. + */ + pagefault_disable(); + + idx = srcu_read_lock(&kvm->srcu); + offset = offsetof(struct pvsched_vcpu_state, preempted); + kvm_put_guest(kvm, base + offset, preempted_le, u32); + srcu_read_unlock(&kvm->srcu, idx); + + pagefault_enable(); +} + int kvm_hypercall_pvsched_features(struct kvm_vcpu *vcpu) { u32 feature = smccc_get_arg1(vcpu); @@ -15,6 +45,8 @@ int kvm_hypercall_pvsched_features(struct kvm_vcpu *vcpu) switch (feature) { case ARM_SMCCC_HV_PV_SCHED_FEATURES: + case ARM_SMCCC_HV_PV_SCHED_IPA_INIT: + case ARM_SMCCC_HV_PV_SCHED_IPA_RELEASE: val = SMCCC_RET_SUCCESS; break; } -- GitLab