You need to sign in or sign up before continuing.
提交 bbcd7df5 编写于 作者: Z Zengruan Ye 提交者: Cheng Jian

KVM: arm64: Implement PV_SCHED_FEATURES call

euleros inclusion
category: feature
bugzilla: https://bugzilla.openeuler.org/show_bug.cgi?id=35
CVE: NA

--------------------------------

This provides a mechanism for querying which paravirtualized sched
features are available in this hypervisor.

Add some SMCCC compatible hypercalls for PV sched features:
  PV_SCHED_FEATURES:       0xC5000090
  PV_SCHED_IPA_INIT:       0xC5000091
  PV_SCHED_IPA_RELEASE:    0xC5000092

Also add the header file which defines the ABI for the paravirtualized
sched features we're about to add.
Signed-off-by: NZengruan Ye <yezengruan@huawei.com>
Reviewed-by: Nzhanghailiang <zhang.zhanghailiang@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
Signed-off-by: NChaochao Xing <xingchaochao@huawei.com>
Reviewed-by: NZengruan Ye <yezengruan@huawei.com>
Reviewed-by: NXiangyou Xie <xiexiangyou@huawei.com>
Signed-off-by: NCheng Jian <cj.chengjian@huawei.com>
上级 07a97002
......@@ -19,6 +19,7 @@
#ifndef __ARM_KVM_HOST_H__
#define __ARM_KVM_HOST_H__
#include <linux/arm-smccc.h>
#include <linux/types.h>
#include <linux/kvm_types.h>
#include <asm/cputype.h>
......@@ -302,6 +303,11 @@ static inline int kvm_arch_dev_ioctl_check_extension(struct kvm *kvm, long ext)
int kvm_perf_init(void);
int kvm_perf_teardown(void);
static inline int kvm_hypercall_pvsched_features(struct kvm_vcpu *vcpu)
{
return SMCCC_RET_NOT_SUPPORTED;
}
void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
......
......@@ -430,6 +430,8 @@ void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
int kvm_perf_init(void);
int kvm_perf_teardown(void);
int kvm_hypercall_pvsched_features(struct kvm_vcpu *vcpu);
void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);
struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
......
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright(c) 2019 Huawei Technologies Co., Ltd
* Author: Zengruan Ye <yezengruan@huawei.com>
*/
#ifndef __ASM_PVSCHED_ABI_H
#define __ASM_PVSCHED_ABI_H
struct pvsched_vcpu_state {
__le32 preempted;
/* Structure must be 64 byte aligned, pad to that size */
u8 padding[60];
} __packed;
#endif
......@@ -16,6 +16,7 @@ kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/e
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/arm.o $(KVM)/arm/mmu.o $(KVM)/arm/mmio.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/psci.o $(KVM)/arm/perf.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hypercalls.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/pvsched.o
kvm-$(CONFIG_KVM_ARM_HOST) += inject_fault.o regmap.o va_layout.o
kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o
......
......@@ -54,6 +54,7 @@
#define ARM_SMCCC_OWNER_SIP 2
#define ARM_SMCCC_OWNER_OEM 3
#define ARM_SMCCC_OWNER_STANDARD 4
#define ARM_SMCCC_OWNER_STANDARD_HYP 5
#define ARM_SMCCC_OWNER_TRUSTED_APP 48
#define ARM_SMCCC_OWNER_TRUSTED_APP_END 49
#define ARM_SMCCC_OWNER_TRUSTED_OS 50
......@@ -356,5 +357,24 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
method; \
})
/* Paravirtualised sched calls */
#define ARM_SMCCC_HV_PV_SCHED_FEATURES \
ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
ARM_SMCCC_SMC_64, \
ARM_SMCCC_OWNER_STANDARD_HYP, \
0x90)
#define ARM_SMCCC_HV_PV_SCHED_IPA_INIT \
ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
ARM_SMCCC_SMC_64, \
ARM_SMCCC_OWNER_STANDARD_HYP, \
0x91)
#define ARM_SMCCC_HV_PV_SCHED_IPA_RELEASE \
ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
ARM_SMCCC_SMC_64, \
ARM_SMCCC_OWNER_STANDARD_HYP, \
0x92)
#endif /*__ASSEMBLY__*/
#endif /*__LINUX_ARM_SMCCC_H*/
......@@ -40,8 +40,14 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
break;
}
break;
case ARM_SMCCC_HV_PV_SCHED_FEATURES:
val = SMCCC_RET_SUCCESS;
break;
}
break;
case ARM_SMCCC_HV_PV_SCHED_FEATURES:
val = kvm_hypercall_pvsched_features(vcpu);
break;
default:
return kvm_psci_call(vcpu);
}
......
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright(c) 2019 Huawei Technologies Co., Ltd
* Author: Zengruan Ye <yezengruan@huawei.com>
*/
#include <linux/arm-smccc.h>
#include <kvm/arm_hypercalls.h>
int kvm_hypercall_pvsched_features(struct kvm_vcpu *vcpu)
{
u32 feature = smccc_get_arg1(vcpu);
int val = SMCCC_RET_NOT_SUPPORTED;
switch (feature) {
case ARM_SMCCC_HV_PV_SCHED_FEATURES:
val = SMCCC_RET_SUCCESS;
break;
}
return val;
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册