提交 5b727b32 编写于 作者: Z Zengruan Ye 提交者: Cheng Jian

KVM: arm64: Support pvsched preempted via shared structure

euleros inclusion
category: feature
bugzilla: https://bugzilla.openeuler.org/show_bug.cgi?id=35
CVE: NA

--------------------------------

Implement the service call for configuring a shared structure between a
vCPU and the hypervisor in which the hypervisor can tell the vCPU that is
running or not.
Signed-off-by: NZengruan Ye <yezengruan@huawei.com>
Reviewed-by: Nzhanghailiang <zhang.zhanghailiang@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
Signed-off-by: NChaochao Xing <xingchaochao@huawei.com>
Reviewed-by: NZengruan Ye <yezengruan@huawei.com>
Reviewed-by: NXiangyou Xie <xiexiangyou@huawei.com>
Signed-off-by: NCheng Jian <cj.chengjian@huawei.com>
上级 bbcd7df5
......@@ -303,6 +303,20 @@ static inline int kvm_arch_dev_ioctl_check_extension(struct kvm *kvm, long ext)
int kvm_perf_init(void);
int kvm_perf_teardown(void);
static inline void kvm_arm_pvsched_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
{
}
static inline bool kvm_arm_is_pvsched_enabled(struct kvm_vcpu_arch *vcpu_arch)
{
return false;
}
static inline void kvm_update_pvsched_preempted(struct kvm_vcpu *vcpu,
u32 preempted)
{
}
static inline int kvm_hypercall_pvsched_features(struct kvm_vcpu *vcpu)
{
return SMCCC_RET_NOT_SUPPORTED;
......
......@@ -325,6 +325,11 @@ struct kvm_vcpu_arch {
* see kvm_vcpu_load_sysregs and kvm_vcpu_put_sysregs. */
bool sysregs_loaded_on_cpu;
/* Guest PV sched state */
struct {
gpa_t base;
} pvsched;
struct id_registers idregs;
};
......@@ -430,6 +435,17 @@ void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
int kvm_perf_init(void);
int kvm_perf_teardown(void);
static inline void kvm_arm_pvsched_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
{
vcpu_arch->pvsched.base = GPA_INVALID;
}
static inline bool kvm_arm_is_pvsched_enabled(struct kvm_vcpu_arch *vcpu_arch)
{
return (vcpu_arch->pvsched.base != GPA_INVALID);
}
void kvm_update_pvsched_preempted(struct kvm_vcpu *vcpu, u32 preempted);
int kvm_hypercall_pvsched_features(struct kvm_vcpu *vcpu);
void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);
......
......@@ -49,6 +49,8 @@ typedef unsigned long gva_t;
typedef u64 gpa_t;
typedef u64 gfn_t;
#define GPA_INVALID (~(gpa_t)0)
typedef unsigned long hva_t;
typedef u64 hpa_t;
typedef u64 hfn_t;
......
......@@ -396,6 +396,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
kvm_arm_reset_debug_ptr(vcpu);
kvm_arm_pvsched_vcpu_init(&vcpu->arch);
return kvm_vgic_vcpu_init(vcpu);
}
......@@ -446,6 +448,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
vcpu_set_wfe_traps(vcpu);
update_steal_time(vcpu);
if (kvm_arm_is_pvsched_enabled(&vcpu->arch))
kvm_update_pvsched_preempted(vcpu, 0);
}
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
......@@ -459,6 +464,9 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
vcpu->cpu = -1;
kvm_arm_set_running_vcpu(NULL);
if (kvm_arm_is_pvsched_enabled(&vcpu->arch))
kvm_update_pvsched_preempted(vcpu, 1);
}
static void vcpu_power_off(struct kvm_vcpu *vcpu)
......
......@@ -14,6 +14,7 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
u32 func_id = smccc_get_function(vcpu);
u32 val = SMCCC_RET_NOT_SUPPORTED;
u32 feature;
gpa_t gpa;
switch (func_id) {
case ARM_SMCCC_VERSION_FUNC_ID:
......@@ -48,6 +49,17 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
case ARM_SMCCC_HV_PV_SCHED_FEATURES:
val = kvm_hypercall_pvsched_features(vcpu);
break;
case ARM_SMCCC_HV_PV_SCHED_IPA_INIT:
gpa = smccc_get_arg1(vcpu);
if (gpa != GPA_INVALID) {
vcpu->arch.pvsched.base = gpa;
val = SMCCC_RET_SUCCESS;
}
break;
case ARM_SMCCC_HV_PV_SCHED_IPA_RELEASE:
vcpu->arch.pvsched.base = GPA_INVALID;
val = SMCCC_RET_SUCCESS;
break;
default:
return kvm_psci_call(vcpu);
}
......
......@@ -5,9 +5,39 @@
*/
#include <linux/arm-smccc.h>
#include <linux/kvm_host.h>
#include <asm/pvsched-abi.h>
#include <kvm/arm_hypercalls.h>
void kvm_update_pvsched_preempted(struct kvm_vcpu *vcpu, u32 preempted)
{
__le32 preempted_le;
u64 offset;
int idx;
u64 base = vcpu->arch.pvsched.base;
struct kvm *kvm = vcpu->kvm;
if (base == GPA_INVALID)
return;
preempted_le = cpu_to_le32(preempted);
/*
* This function is called from atomic context, so we need to
* disable page faults.
*/
pagefault_disable();
idx = srcu_read_lock(&kvm->srcu);
offset = offsetof(struct pvsched_vcpu_state, preempted);
kvm_put_guest(kvm, base + offset, preempted_le, u32);
srcu_read_unlock(&kvm->srcu, idx);
pagefault_enable();
}
int kvm_hypercall_pvsched_features(struct kvm_vcpu *vcpu)
{
u32 feature = smccc_get_arg1(vcpu);
......@@ -15,6 +45,8 @@ int kvm_hypercall_pvsched_features(struct kvm_vcpu *vcpu)
switch (feature) {
case ARM_SMCCC_HV_PV_SCHED_FEATURES:
case ARM_SMCCC_HV_PV_SCHED_IPA_INIT:
case ARM_SMCCC_HV_PV_SCHED_IPA_RELEASE:
val = SMCCC_RET_SUCCESS;
break;
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册